accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [39/48] accumulo git commit: ACCUMULO-4514 Remove unnecessary code
Date Fri, 04 Nov 2016 22:17:16 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/fate/src/test/java/org/apache/accumulo/fate/SimpleStore.java
----------------------------------------------------------------------
diff --git a/fate/src/test/java/org/apache/accumulo/fate/SimpleStore.java b/fate/src/test/java/org/apache/accumulo/fate/SimpleStore.java
index f0bac88..0987ac9 100644
--- a/fate/src/test/java/org/apache/accumulo/fate/SimpleStore.java
+++ b/fate/src/test/java/org/apache/accumulo/fate/SimpleStore.java
@@ -33,8 +33,8 @@ import org.apache.commons.lang.NotImplementedException;
 public class SimpleStore<T> implements TStore<T> {
 
   private long nextId = 1;
-  private Map<Long,TStatus> statuses = new HashMap<Long,TStore.TStatus>();
-  private Set<Long> reserved = new HashSet<Long>();
+  private Map<Long,TStatus> statuses = new HashMap<>();
+  private Set<Long> reserved = new HashSet<>();
 
   @Override
   public long create() {
@@ -120,7 +120,7 @@ public class SimpleStore<T> implements TStore<T> {
 
   @Override
   public List<Long> list() {
-    return new ArrayList<Long>(statuses.keySet());
+    return new ArrayList<>(statuses.keySet());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/fate/src/test/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLockTest.java
----------------------------------------------------------------------
diff --git a/fate/src/test/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLockTest.java b/fate/src/test/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLockTest.java
index 0b04bd1..486a4c6 100644
--- a/fate/src/test/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLockTest.java
+++ b/fate/src/test/java/org/apache/accumulo/fate/zookeeper/DistributedReadWriteLockTest.java
@@ -34,11 +34,11 @@ public class DistributedReadWriteLockTest {
   public static class MockQueueLock implements QueueLock {
 
     long next = 0L;
-    final SortedMap<Long,byte[]> locks = new TreeMap<Long,byte[]>();
+    final SortedMap<Long,byte[]> locks = new TreeMap<>();
 
     @Override
     synchronized public SortedMap<Long,byte[]> getEarlierEntries(long entry) {
-      SortedMap<Long,byte[]> result = new TreeMap<Long,byte[]>();
+      SortedMap<Long,byte[]> result = new TreeMap<>();
       result.putAll(locks.headMap(entry + 1));
       return result;
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/fate/src/test/java/org/apache/accumulo/fate/zookeeper/TransactionWatcherTest.java
----------------------------------------------------------------------
diff --git a/fate/src/test/java/org/apache/accumulo/fate/zookeeper/TransactionWatcherTest.java b/fate/src/test/java/org/apache/accumulo/fate/zookeeper/TransactionWatcherTest.java
index 0e4e329..72745b9 100644
--- a/fate/src/test/java/org/apache/accumulo/fate/zookeeper/TransactionWatcherTest.java
+++ b/fate/src/test/java/org/apache/accumulo/fate/zookeeper/TransactionWatcherTest.java
@@ -28,13 +28,13 @@ import org.junit.Test;
 public class TransactionWatcherTest {
 
   static class SimpleArbitrator implements TransactionWatcher.Arbitrator {
-    Map<String,List<Long>> started = new HashMap<String,List<Long>>();
-    Map<String,List<Long>> cleanedUp = new HashMap<String,List<Long>>();
+    Map<String,List<Long>> started = new HashMap<>();
+    Map<String,List<Long>> cleanedUp = new HashMap<>();
 
     public synchronized void start(String txType, Long txid) throws Exception {
       List<Long> txids = started.get(txType);
       if (txids == null)
-        txids = new ArrayList<Long>();
+        txids = new ArrayList<>();
       if (txids.contains(txid))
         throw new Exception("transaction already started");
       txids.add(txid);
@@ -42,7 +42,7 @@ public class TransactionWatcherTest {
 
       txids = cleanedUp.get(txType);
       if (txids == null)
-        txids = new ArrayList<Long>();
+        txids = new ArrayList<>();
       if (txids.contains(txid))
         throw new IllegalStateException("transaction was started but not cleaned up");
       txids.add(txid);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/fate/src/test/java/org/apache/accumulo/fate/zookeeper/ZooCacheTest.java
----------------------------------------------------------------------
diff --git a/fate/src/test/java/org/apache/accumulo/fate/zookeeper/ZooCacheTest.java b/fate/src/test/java/org/apache/accumulo/fate/zookeeper/ZooCacheTest.java
index 5dd6f61..8e1fbd2 100644
--- a/fate/src/test/java/org/apache/accumulo/fate/zookeeper/ZooCacheTest.java
+++ b/fate/src/test/java/org/apache/accumulo/fate/zookeeper/ZooCacheTest.java
@@ -266,7 +266,7 @@ public class ZooCacheTest {
   }
 
   private Watcher watchData(byte[] initialData) throws Exception {
-    Capture<Watcher> cw = new Capture<Watcher>();
+    Capture<Watcher> cw = new Capture<>();
     Stat existsStat = new Stat();
     if (initialData != null) {
       expect(zk.exists(eq(ZPATH), capture(cw))).andReturn(existsStat);
@@ -335,7 +335,7 @@ public class ZooCacheTest {
   }
 
   private Watcher watchChildren(List<String> initialChildren) throws Exception {
-    Capture<Watcher> cw = new Capture<Watcher>();
+    Capture<Watcher> cw = new Capture<>();
     expect(zk.getChildren(eq(ZPATH), capture(cw))).andReturn(initialChildren);
     replay(zk);
     zc.getChildren(ZPATH);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/AbstractAccumuloMojo.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/AbstractAccumuloMojo.java b/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/AbstractAccumuloMojo.java
index 2028a2e..37eeb4d 100644
--- a/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/AbstractAccumuloMojo.java
+++ b/maven-plugin/src/main/java/org/apache/accumulo/maven/plugin/AbstractAccumuloMojo.java
@@ -43,7 +43,7 @@ public abstract class AbstractAccumuloMojo extends AbstractMojo {
   }
 
   void configureMiniClasspath(MiniAccumuloConfigImpl macConfig, String miniClasspath) throws MalformedURLException {
-    ArrayList<String> classpathItems = new ArrayList<String>();
+    ArrayList<String> classpathItems = new ArrayList<>();
     if (miniClasspath == null && project != null) {
       classpathItems.add(project.getBuild().getOutputDirectory());
       classpathItems.add(project.getBuild().getTestOutputDirectory());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControl.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControl.java b/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControl.java
index 3edf60d..febc94c 100644
--- a/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/cluster/standalone/StandaloneClusterControl.java
@@ -377,7 +377,7 @@ public class StandaloneClusterControl implements ClusterControl {
   protected List<String> getHosts(File f) throws IOException {
     BufferedReader reader = new BufferedReader(new FileReader(f));
     try {
-      List<String> hosts = new ArrayList<String>();
+      List<String> hosts = new ArrayList<>();
       String line = null;
       while ((line = reader.readLine()) != null) {
         line = line.trim();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
index 13a75b5..e88248c 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
@@ -180,7 +180,7 @@ public class MiniAccumuloRunner {
     if (opts.prop.containsKey(SHUTDOWN_PORT_PROP))
       shutdownPort = Integer.parseInt(opts.prop.getProperty(SHUTDOWN_PORT_PROP));
 
-    Map<String,String> siteConfig = new HashMap<String,String>();
+    Map<String,String> siteConfig = new HashMap<>();
     for (Map.Entry<Object,Object> entry : opts.prop.entrySet()) {
       String key = (String) entry.getKey();
       if (key.startsWith("site."))

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterControl.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterControl.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterControl.java
index 80c4edc..8cc7950 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterControl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterControl.java
@@ -56,7 +56,7 @@ public class MiniAccumuloClusterControl implements ClusterControl {
   Process gcProcess = null;
   Process monitor = null;
   Process tracer = null;
-  final List<Process> tabletServerProcesses = new ArrayList<Process>();
+  final List<Process> tabletServerProcesses = new ArrayList<>();
 
   public MiniAccumuloClusterControl(MiniAccumuloClusterImpl cluster) {
     requireNonNull(cluster);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
index 653c600..f2e5c7c 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
@@ -168,7 +168,7 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
 
   private boolean initialized = false;
 
-  private Set<Pair<ServerType,Integer>> debugPorts = new HashSet<Pair<ServerType,Integer>>();
+  private Set<Pair<ServerType,Integer>> debugPorts = new HashSet<>();
 
   private File zooCfgFile;
   private String dfsUri;
@@ -177,11 +177,11 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
     return logWriters;
   }
 
-  private List<LogWriter> logWriters = new ArrayList<MiniAccumuloClusterImpl.LogWriter>();
+  private List<LogWriter> logWriters = new ArrayList<>();
 
   private MiniAccumuloConfigImpl config;
   private MiniDFSCluster miniDFS = null;
-  private List<Process> cleanup = new ArrayList<Process>();
+  private List<Process> cleanup = new ArrayList<>();
 
   private ExecutorService executor;
 
@@ -196,7 +196,7 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
   }
 
   public Process exec(Class<?> clazz, List<String> jvmArgs, String... args) throws IOException {
-    ArrayList<String> jvmArgs2 = new ArrayList<String>(1 + (jvmArgs == null ? 0 : jvmArgs.size()));
+    ArrayList<String> jvmArgs2 = new ArrayList<>(1 + (jvmArgs == null ? 0 : jvmArgs.size()));
     jvmArgs2.add("-Xmx" + config.getDefaultMemory());
     if (jvmArgs != null)
       jvmArgs2.addAll(jvmArgs);
@@ -228,7 +228,7 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
   private String getClasspath() throws IOException {
 
     try {
-      ArrayList<ClassLoader> classloaders = new ArrayList<ClassLoader>();
+      ArrayList<ClassLoader> classloaders = new ArrayList<>();
 
       ClassLoader cl = this.getClass().getClassLoader();
 
@@ -286,7 +286,7 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
 
     String className = clazz.getName();
 
-    ArrayList<String> argList = new ArrayList<String>();
+    ArrayList<String> argList = new ArrayList<>();
     argList.addAll(Arrays.asList(javaBin, "-Dproc=" + clazz.getSimpleName(), "-cp", classpath));
     argList.addAll(extraJvmOpts);
     for (Entry<String,String> sysProp : config.getSystemProperties().entrySet()) {
@@ -343,13 +343,13 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
 
   Process _exec(Class<?> clazz, ServerType serverType, String... args) throws IOException {
 
-    List<String> jvmOpts = new ArrayList<String>();
+    List<String> jvmOpts = new ArrayList<>();
     jvmOpts.add("-Xmx" + config.getMemory(serverType));
 
     if (config.isJDWPEnabled()) {
       Integer port = PortUtils.getRandomFreePort();
       jvmOpts.addAll(buildRemoteDebugParams(port));
-      debugPorts.add(new Pair<ServerType,Integer>(serverType, port));
+      debugPorts.add(new Pair<>(serverType, port));
     }
     return _exec(clazz, jvmOpts, args);
   }
@@ -642,7 +642,7 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
   }
 
   List<ProcessReference> references(Process... procs) {
-    List<ProcessReference> result = new ArrayList<ProcessReference>();
+    List<ProcessReference> result = new ArrayList<>();
     for (Process proc : procs) {
       result.add(new ProcessReference(proc));
     }
@@ -650,7 +650,7 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
   }
 
   public Map<ServerType,Collection<ProcessReference>> getProcesses() {
-    Map<ServerType,Collection<ProcessReference>> result = new HashMap<ServerType,Collection<ProcessReference>>();
+    Map<ServerType,Collection<ProcessReference>> result = new HashMap<>();
     MiniAccumuloClusterControl control = getClusterControl();
     result.put(ServerType.MASTER, references(control.masterProcess));
     result.put(ServerType.TABLET_SERVER, references(control.tabletServerProcesses.toArray(new Process[0])));
@@ -762,7 +762,7 @@ public class MiniAccumuloClusterImpl implements AccumuloCluster {
   }
 
   int stopProcessWithTimeout(final Process proc, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
-    FutureTask<Integer> future = new FutureTask<Integer>(new Callable<Integer>() {
+    FutureTask<Integer> future = new FutureTask<>(new Callable<Integer>() {
       @Override
       public Integer call() throws InterruptedException {
         proc.destroy();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java
index c8f65d2..ac5ebc1 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImpl.java
@@ -47,12 +47,12 @@ public class MiniAccumuloConfigImpl {
 
   private File dir = null;
   private String rootPassword = null;
-  private Map<String,String> siteConfig = new HashMap<String,String>();
-  private Map<String,String> configuredSiteConig = new HashMap<String,String>();
+  private Map<String,String> siteConfig = new HashMap<>();
+  private Map<String,String> configuredSiteConig = new HashMap<>();
   private int numTservers = 2;
-  private Map<ServerType,Long> memoryConfig = new HashMap<ServerType,Long>();
+  private Map<ServerType,Long> memoryConfig = new HashMap<>();
   private boolean jdwpEnabled = false;
-  private Map<String,String> systemProperties = new HashMap<String,String>();
+  private Map<String,String> systemProperties = new HashMap<>();
 
   private String instanceName = "miniInstance";
   private String rootUserName = "root";
@@ -276,8 +276,8 @@ public class MiniAccumuloConfigImpl {
   }
 
   private MiniAccumuloConfigImpl _setSiteConfig(Map<String,String> siteConfig) {
-    this.siteConfig = new HashMap<String,String>(siteConfig);
-    this.configuredSiteConig = new HashMap<String,String>(siteConfig);
+    this.siteConfig = new HashMap<>(siteConfig);
+    this.configuredSiteConig = new HashMap<>(siteConfig);
     return this;
   }
 
@@ -357,11 +357,11 @@ public class MiniAccumuloConfigImpl {
    * @return a copy of the site config
    */
   public Map<String,String> getSiteConfig() {
-    return new HashMap<String,String>(siteConfig);
+    return new HashMap<>(siteConfig);
   }
 
   public Map<String,String> getConfiguredSiteConfig() {
-    return new HashMap<String,String>(configuredSiteConig);
+    return new HashMap<>(configuredSiteConig);
   }
 
   /**
@@ -518,7 +518,7 @@ public class MiniAccumuloConfigImpl {
    * @since 1.6.0
    */
   public void setSystemProperties(Map<String,String> systemProperties) {
-    this.systemProperties = new HashMap<String,String>(systemProperties);
+    this.systemProperties = new HashMap<>(systemProperties);
   }
 
   /**
@@ -527,7 +527,7 @@ public class MiniAccumuloConfigImpl {
    * @since 1.6.0
    */
   public Map<String,String> getSystemProperties() {
-    return new HashMap<String,String>(systemProperties);
+    return new HashMap<>(systemProperties);
   }
 
   /**
@@ -633,7 +633,7 @@ public class MiniAccumuloConfigImpl {
       throw e1;
     }
 
-    Map<String,String> siteConfigMap = new HashMap<String,String>();
+    Map<String,String> siteConfigMap = new HashMap<>();
     for (Entry<String,String> e : accumuloConf) {
       siteConfigMap.put(e.getKey(), e.getValue());
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterTest.java
----------------------------------------------------------------------
diff --git a/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterTest.java b/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterTest.java
index 7c62384..f691bf6 100644
--- a/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterTest.java
+++ b/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterTest.java
@@ -69,7 +69,7 @@ public class MiniAccumuloClusterTest {
 
     MiniAccumuloConfig config = new MiniAccumuloConfig(testDir, "superSecret").setJDWPEnabled(true);
     config.setZooKeeperPort(0);
-    HashMap<String,String> site = new HashMap<String,String>();
+    HashMap<String,String> site = new HashMap<>();
     site.put(Property.TSERV_WORKQ_THREADS.getKey(), "2");
     config.setSiteConfig(site);
     accumulo = new MiniAccumuloCluster(config);
@@ -216,7 +216,7 @@ public class MiniAccumuloClusterTest {
   public void testConfig() {
     // ensure what user passed in is what comes back
     Assert.assertEquals(0, accumulo.getConfig().getZooKeeperPort());
-    HashMap<String,String> site = new HashMap<String,String>();
+    HashMap<String,String> site = new HashMap<>();
     site.put(Property.TSERV_WORKQ_THREADS.getKey(), "2");
     Assert.assertEquals(site, accumulo.getConfig().getSiteConfig());
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImplTest.java
----------------------------------------------------------------------
diff --git a/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImplTest.java b/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImplTest.java
index dc616df..ba12f53 100644
--- a/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImplTest.java
+++ b/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/MiniAccumuloConfigImplTest.java
@@ -66,7 +66,7 @@ public class MiniAccumuloConfigImplTest {
   public void testSiteConfig() {
 
     // constructor site config overrides default props
-    Map<String,String> siteConfig = new HashMap<String,String>();
+    Map<String,String> siteConfig = new HashMap<>();
     siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), "hdfs://");
     MiniAccumuloConfigImpl config = new MiniAccumuloConfigImpl(tempFolder.getRoot(), "password").setSiteConfig(siteConfig).initialize();
     assertEquals("hdfs://", config.getSiteConfig().get(Property.INSTANCE_DFS_URI.getKey()));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
----------------------------------------------------------------------
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
index 87e2c58..39cb28e 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
@@ -204,7 +204,7 @@ public class Proxy implements KeywordExecutable {
     AccumuloProxy.Iface wrappedImpl = RpcWrapper.service(impl, new AccumuloProxy.Processor<AccumuloProxy.Iface>(impl));
 
     // Create the processor from the implementation
-    TProcessor processor = new AccumuloProxy.Processor<AccumuloProxy.Iface>(wrappedImpl);
+    TProcessor processor = new AccumuloProxy.Processor<>(wrappedImpl);
 
     // Get the type of thrift server to instantiate
     final String serverTypeStr = properties.getProperty(THRIFT_SERVER_TYPE, THRIFT_SERVER_TYPE_DEFAULT);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
----------------------------------------------------------------------
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index d8b678a..f4310e8 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -339,7 +339,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
       org.apache.accumulo.proxy.thrift.AccumuloSecurityException, org.apache.accumulo.proxy.thrift.TableNotFoundException, TException {
 
     try {
-      SortedSet<Text> sorted = new TreeSet<Text>();
+      SortedSet<Text> sorted = new TreeSet<>();
       for (ByteBuffer split : splits) {
         sorted.add(ByteBufferUtil.toText(split));
       }
@@ -395,7 +395,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
   }
 
   private List<IteratorSetting> getIteratorSettings(List<org.apache.accumulo.proxy.thrift.IteratorSetting> iterators) {
-    List<IteratorSetting> result = new ArrayList<IteratorSetting>();
+    List<IteratorSetting> result = new ArrayList<>();
     if (iterators != null) {
       for (org.apache.accumulo.proxy.thrift.IteratorSetting is : iterators) {
         result.add(getIteratorSetting(is));
@@ -468,9 +468,9 @@ public class ProxyServer implements AccumuloProxy.Iface {
       org.apache.accumulo.proxy.thrift.AccumuloSecurityException, org.apache.accumulo.proxy.thrift.TableNotFoundException, TException {
     try {
       Map<String,Set<Text>> groups = getConnector(login).tableOperations().getLocalityGroups(tableName);
-      Map<String,Set<String>> ret = new HashMap<String,Set<String>>();
+      Map<String,Set<String>> ret = new HashMap<>();
       for (Entry<String,Set<Text>> entry : groups.entrySet()) {
-        Set<String> value = new HashSet<String>();
+        Set<String> value = new HashSet<>();
         ret.put(entry.getKey(), value);
         for (Text val : entry.getValue()) {
           value.add(val.toString());
@@ -509,7 +509,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
   public Map<String,String> getTableProperties(ByteBuffer login, String tableName) throws org.apache.accumulo.proxy.thrift.AccumuloException,
       org.apache.accumulo.proxy.thrift.AccumuloSecurityException, org.apache.accumulo.proxy.thrift.TableNotFoundException, TException {
     try {
-      Map<String,String> ret = new HashMap<String,String>();
+      Map<String,String> ret = new HashMap<>();
 
       for (Map.Entry<String,String> entry : getConnector(login).tableOperations().getProperties(tableName)) {
         ret.put(entry.getKey(), entry.getValue());
@@ -526,7 +526,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
       org.apache.accumulo.proxy.thrift.AccumuloSecurityException, org.apache.accumulo.proxy.thrift.TableNotFoundException, TException {
     try {
       Collection<Text> splits = getConnector(login).tableOperations().listSplits(tableName, maxSplits);
-      List<ByteBuffer> ret = new ArrayList<ByteBuffer>();
+      List<ByteBuffer> ret = new ArrayList<>();
       for (Text split : splits) {
         ret.add(TextUtil.getByteBuffer(split));
       }
@@ -626,7 +626,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
       throws org.apache.accumulo.proxy.thrift.AccumuloException, org.apache.accumulo.proxy.thrift.AccumuloSecurityException,
       org.apache.accumulo.proxy.thrift.TableNotFoundException, TException {
     try {
-      Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
+      Map<String,Set<Text>> groups = new HashMap<>();
       for (Entry<String,Set<String>> groupEntry : groupStrings.entrySet()) {
         groups.put(groupEntry.getKey(), new HashSet<Text>());
         for (String val : groupEntry.getValue()) {
@@ -663,10 +663,10 @@ public class ProxyServer implements AccumuloProxy.Iface {
       org.apache.accumulo.proxy.thrift.AccumuloSecurityException, org.apache.accumulo.proxy.thrift.TableNotFoundException, TException {
     try {
       List<org.apache.accumulo.core.client.admin.DiskUsage> diskUsages = getConnector(login).tableOperations().getDiskUsage(tables);
-      List<DiskUsage> retUsages = new ArrayList<DiskUsage>();
+      List<DiskUsage> retUsages = new ArrayList<>();
       for (org.apache.accumulo.core.client.admin.DiskUsage diskUsage : diskUsages) {
         DiskUsage usage = new DiskUsage();
-        usage.setTables(new ArrayList<String>(diskUsage.getTables()));
+        usage.setTables(new ArrayList<>(diskUsage.getTables()));
         usage.setUsage(diskUsage.getUsage());
         retUsages.add(usage);
       }
@@ -711,7 +711,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
   @Override
   public List<org.apache.accumulo.proxy.thrift.ActiveScan> getActiveScans(ByteBuffer login, String tserver)
       throws org.apache.accumulo.proxy.thrift.AccumuloException, org.apache.accumulo.proxy.thrift.AccumuloSecurityException, TException {
-    List<org.apache.accumulo.proxy.thrift.ActiveScan> result = new ArrayList<org.apache.accumulo.proxy.thrift.ActiveScan>();
+    List<org.apache.accumulo.proxy.thrift.ActiveScan> result = new ArrayList<>();
     try {
       List<ActiveScan> activeScans = getConnector(login).instanceOperations().getActiveScans(tserver);
       for (ActiveScan scan : activeScans) {
@@ -726,7 +726,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
         TabletId e = scan.getTablet();
         pscan.extent = new org.apache.accumulo.proxy.thrift.KeyExtent(e.getTableId().toString(), TextUtil.getByteBuffer(e.getEndRow()),
             TextUtil.getByteBuffer(e.getPrevEndRow()));
-        pscan.columns = new ArrayList<org.apache.accumulo.proxy.thrift.Column>();
+        pscan.columns = new ArrayList<>();
         if (scan.getColumns() != null) {
           for (Column c : scan.getColumns()) {
             org.apache.accumulo.proxy.thrift.Column column = new org.apache.accumulo.proxy.thrift.Column();
@@ -736,7 +736,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
             pscan.columns.add(column);
           }
         }
-        pscan.iterators = new ArrayList<org.apache.accumulo.proxy.thrift.IteratorSetting>();
+        pscan.iterators = new ArrayList<>();
         for (String iteratorString : scan.getSsiList()) {
           String[] parts = iteratorString.split("[=,]");
           if (parts.length == 3) {
@@ -748,7 +748,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
             pscan.iterators.add(settings);
           }
         }
-        pscan.authorizations = new ArrayList<ByteBuffer>();
+        pscan.authorizations = new ArrayList<>();
         if (scan.getAuthorizations() != null) {
           for (byte[] a : scan.getAuthorizations()) {
             pscan.authorizations.add(ByteBuffer.wrap(a));
@@ -768,7 +768,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
       throws org.apache.accumulo.proxy.thrift.AccumuloException, org.apache.accumulo.proxy.thrift.AccumuloSecurityException, TException {
 
     try {
-      List<org.apache.accumulo.proxy.thrift.ActiveCompaction> result = new ArrayList<org.apache.accumulo.proxy.thrift.ActiveCompaction>();
+      List<org.apache.accumulo.proxy.thrift.ActiveCompaction> result = new ArrayList<>();
       List<ActiveCompaction> active = getConnector(login).instanceOperations().getActiveCompactions(tserver);
       for (ActiveCompaction comp : active) {
         org.apache.accumulo.proxy.thrift.ActiveCompaction pcomp = new org.apache.accumulo.proxy.thrift.ActiveCompaction();
@@ -778,7 +778,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
         TabletId e = comp.getTablet();
         pcomp.extent = new org.apache.accumulo.proxy.thrift.KeyExtent(e.getTableId().toString(), TextUtil.getByteBuffer(e.getEndRow()),
             TextUtil.getByteBuffer(e.getPrevEndRow()));
-        pcomp.inputFiles = new ArrayList<String>();
+        pcomp.inputFiles = new ArrayList<>();
         if (comp.getInputFiles() != null) {
           pcomp.inputFiles.addAll(comp.getInputFiles());
         }
@@ -787,7 +787,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
         pcomp.reason = CompactionReason.valueOf(comp.getReason().toString());
         pcomp.type = CompactionType.valueOf(comp.getType().toString());
 
-        pcomp.iterators = new ArrayList<org.apache.accumulo.proxy.thrift.IteratorSetting>();
+        pcomp.iterators = new ArrayList<>();
         if (comp.getIterators() != null) {
           for (IteratorSetting setting : comp.getIterators()) {
             org.apache.accumulo.proxy.thrift.IteratorSetting psetting = new org.apache.accumulo.proxy.thrift.IteratorSetting(setting.getPriority(),
@@ -850,7 +850,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
   public void changeUserAuthorizations(ByteBuffer login, String user, Set<ByteBuffer> authorizations)
       throws org.apache.accumulo.proxy.thrift.AccumuloException, org.apache.accumulo.proxy.thrift.AccumuloSecurityException, TException {
     try {
-      Set<String> auths = new HashSet<String>();
+      Set<String> auths = new HashSet<>();
       for (ByteBuffer auth : authorizations) {
         auths.add(ByteBufferUtil.toString(auth));
       }
@@ -978,7 +978,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
   }
 
   private Authorizations getAuthorizations(Set<ByteBuffer> authorizations) {
-    List<String> auths = new ArrayList<String>();
+    List<String> auths = new ArrayList<>();
     for (ByteBuffer bbauth : authorizations) {
       auths.add(ByteBufferUtil.toString(bbauth));
     }
@@ -1060,7 +1060,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
           }
         }
 
-        ArrayList<Range> ranges = new ArrayList<Range>();
+        ArrayList<Range> ranges = new ArrayList<>();
 
         if (opts.ranges == null) {
           ranges.add(new Range());
@@ -1208,7 +1208,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
     if (bwpe.exception != null)
       return;
 
-    HashMap<Text,ColumnVisibility> vizMap = new HashMap<Text,ColumnVisibility>();
+    HashMap<Text,ColumnVisibility> vizMap = new HashMap<>();
 
     for (Map.Entry<ByteBuffer,List<ColumnUpdate>> entry : cells.entrySet()) {
       Mutation m = new Mutation(ByteBufferUtil.toBytes(entry.getKey()));
@@ -1467,7 +1467,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
       org.apache.accumulo.proxy.thrift.TableNotFoundException, TException {
     try {
       Map<String,EnumSet<IteratorScope>> iterMap = getConnector(login).tableOperations().listIterators(tableName);
-      Map<String,Set<org.apache.accumulo.proxy.thrift.IteratorScope>> result = new HashMap<String,Set<org.apache.accumulo.proxy.thrift.IteratorScope>>();
+      Map<String,Set<org.apache.accumulo.proxy.thrift.IteratorScope>> result = new HashMap<>();
       for (Map.Entry<String,EnumSet<IteratorScope>> entry : iterMap.entrySet()) {
         result.put(entry.getKey(), getProxyIteratorScopes(entry.getValue()));
       }
@@ -1495,7 +1495,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
       org.apache.accumulo.proxy.thrift.TableNotFoundException, TException {
     try {
       Set<Range> ranges = getConnector(login).tableOperations().splitRangeByTablets(tableName, getRange(range), maxSplits);
-      Set<org.apache.accumulo.proxy.thrift.Range> result = new HashSet<org.apache.accumulo.proxy.thrift.Range>();
+      Set<org.apache.accumulo.proxy.thrift.Range> result = new HashSet<>();
       for (Range r : ranges) {
         result.add(getRange(r));
       }
@@ -1650,9 +1650,9 @@ public class ProxyServer implements AccumuloProxy.Iface {
     }
 
     try {
-      HashMap<Text,ColumnVisibility> vizMap = new HashMap<Text,ColumnVisibility>();
+      HashMap<Text,ColumnVisibility> vizMap = new HashMap<>();
 
-      ArrayList<ConditionalMutation> cmuts = new ArrayList<ConditionalMutation>(updates.size());
+      ArrayList<ConditionalMutation> cmuts = new ArrayList<>(updates.size());
       for (Entry<ByteBuffer,ConditionalUpdates> cu : updates.entrySet()) {
         ConditionalMutation cmut = new ConditionalMutation(ByteBufferUtil.toBytes(cu.getKey()));
 
@@ -1684,7 +1684,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
 
       Iterator<Result> results = cw.write(cmuts.iterator());
 
-      HashMap<ByteBuffer,ConditionalStatus> resultMap = new HashMap<ByteBuffer,ConditionalStatus>();
+      HashMap<ByteBuffer,ConditionalStatus> resultMap = new HashMap<>();
 
       while (results.hasNext()) {
         Result result = results.next();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/proxy/src/main/java/org/apache/accumulo/proxy/TestProxyClient.java
----------------------------------------------------------------------
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/TestProxyClient.java b/proxy/src/main/java/org/apache/accumulo/proxy/TestProxyClient.java
index 4894f13..9aeada0 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/TestProxyClient.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/TestProxyClient.java
@@ -95,7 +95,7 @@ public class TestProxyClient {
 
     TestProxyClient tpc = new TestProxyClient("localhost", 42424);
     String principal = "root";
-    Map<String,String> props = new TreeMap<String,String>();
+    Map<String,String> props = new TreeMap<>();
     props.put("password", "secret");
 
     System.out.println("Logging in");
@@ -126,7 +126,7 @@ public class TestProxyClient {
     Date then = new Date();
     int maxInserts = 1000000;
     String format = "%1$05d";
-    Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+    Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<>();
     for (int i = 0; i < maxInserts; i++) {
       String result = String.format(format, i);
       ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(("cf" + i).getBytes(UTF_8)), ByteBuffer.wrap(("cq" + i).getBytes(UTF_8)));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/proxy/src/test/java/org/apache/accumulo/proxy/ProxyServerTest.java
----------------------------------------------------------------------
diff --git a/proxy/src/test/java/org/apache/accumulo/proxy/ProxyServerTest.java b/proxy/src/test/java/org/apache/accumulo/proxy/ProxyServerTest.java
index 07fdc45..2f64445 100644
--- a/proxy/src/test/java/org/apache/accumulo/proxy/ProxyServerTest.java
+++ b/proxy/src/test/java/org/apache/accumulo/proxy/ProxyServerTest.java
@@ -48,7 +48,7 @@ public class ProxyServerTest {
 
     final ByteBuffer login = ByteBuffer.wrap("my_login".getBytes(UTF_8));
     final String tableName = "table1";
-    final Map<ByteBuffer,List<ColumnUpdate>> cells = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+    final Map<ByteBuffer,List<ColumnUpdate>> cells = new HashMap<>();
 
     EasyMock.expect(server.getWriter(login, tableName, null)).andReturn(bwpe);
     server.addCellsToWriter(cells, bwpe);
@@ -83,7 +83,7 @@ public class ProxyServerTest {
 
     final ByteBuffer login = ByteBuffer.wrap("my_login".getBytes(UTF_8));
     final String tableName = "table1";
-    final Map<ByteBuffer,List<ColumnUpdate>> cells = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+    final Map<ByteBuffer,List<ColumnUpdate>> cells = new HashMap<>();
 
     EasyMock.expect(server.getWriter(login, tableName, null)).andReturn(bwpe);
     server.addCellsToWriter(cells, bwpe);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index c888be5..1589e9d 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@ -180,7 +180,7 @@ public class Accumulo {
       throw new RuntimeException("This version of accumulo (" + codeVersion + ") is not compatible with files stored using data version " + dataVersion);
     }
 
-    TreeMap<String,String> sortedProps = new TreeMap<String,String>();
+    TreeMap<String,String> sortedProps = new TreeMap<>();
     for (Entry<String,String> entry : conf)
       sortedProps.put(entry.getKey(), entry.getValue());
 
@@ -311,8 +311,8 @@ public class Accumulo {
    */
   public static void abortIfFateTransactions() {
     try {
-      final ReadOnlyTStore<Accumulo> fate = new ReadOnlyStore<Accumulo>(new ZooStore<Accumulo>(
-          ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZFATE, ZooReaderWriter.getInstance()));
+      final ReadOnlyTStore<Accumulo> fate = new ReadOnlyStore<>(new ZooStore<Accumulo>(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZFATE,
+          ZooReaderWriter.getInstance()));
       if (!(fate.list().isEmpty())) {
         throw new AccumuloException("Aborting upgrade because there are outstanding FATE transactions from a previous Accumulo version. "
             + "Please see the README document for instructions on what to do under your previous version.");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java b/server/base/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java
index 389a544..0ec159e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/GarbageCollectionLogger.java
@@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory;
 public class GarbageCollectionLogger {
   private static final Logger log = LoggerFactory.getLogger(GarbageCollectionLogger.class);
 
-  private final HashMap<String,Long> prevGcTime = new HashMap<String,Long>();
+  private final HashMap<String,Long> prevGcTime = new HashMap<>();
   private long lastMemorySize = 0;
   private long gcTimeIncreasedCount = 0;
   private static long lastMemoryCheckTime = 0;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
index c268e83..8c64d50 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
@@ -96,7 +96,7 @@ public class ServerConstants {
     String firstDir = null;
     String firstIid = null;
     Integer firstVersion = null;
-    ArrayList<String> baseDirsList = new ArrayList<String>();
+    ArrayList<String> baseDirsList = new ArrayList<>();
     for (String baseDir : configuredBaseDirs) {
       Path path = new Path(baseDir, INSTANCE_ID_DIR);
       String currentIid;
@@ -177,7 +177,7 @@ public class ServerConstants {
         return Collections.emptyList();
 
       String[] pairs = replacements.split(",");
-      List<Pair<Path,Path>> ret = new ArrayList<Pair<Path,Path>>();
+      List<Pair<Path,Path>> ret = new ArrayList<>();
 
       for (String pair : pairs) {
 
@@ -203,10 +203,10 @@ public class ServerConstants {
           throw new IllegalArgumentException(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey() + " contains " + uris[1] + " which has a syntax error", e);
         }
 
-        ret.add(new Pair<Path,Path>(p1, p2));
+        ret.add(new Pair<>(p1, p2));
       }
 
-      HashSet<Path> baseDirs = new HashSet<Path>();
+      HashSet<Path> baseDirs = new HashSet<>();
       for (String baseDir : getBaseUris()) {
         // normalize using path
         baseDirs.add(new Path(baseDir));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java b/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
index 37bb041..32ee373 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
@@ -81,7 +81,7 @@ public class BulkImporter {
   public static List<String> bulkLoad(ClientContext context, long tid, String tableId, List<String> files, String errorDir, boolean setTime)
       throws IOException, AccumuloException, AccumuloSecurityException, ThriftTableOperationException {
     AssignmentStats stats = new BulkImporter(context, tid, tableId, setTime).importFiles(files, new Path(errorDir));
-    List<String> result = new ArrayList<String>();
+    List<String> result = new ArrayList<>();
     for (Path p : stats.completeFailures.keySet()) {
       result.add(p.toString());
     }
@@ -112,14 +112,14 @@ public class BulkImporter {
     int numThreads = context.getConfiguration().getCount(Property.TSERV_BULK_PROCESS_THREADS);
     int numAssignThreads = context.getConfiguration().getCount(Property.TSERV_BULK_ASSIGNMENT_THREADS);
 
-    timer = new StopWatch<Timers>(Timers.class);
+    timer = new StopWatch<>(Timers.class);
     timer.start(Timers.TOTAL);
 
     Configuration conf = CachedConfiguration.getInstance();
     VolumeManagerImpl.get(context.getConfiguration());
     final VolumeManager fs = VolumeManagerImpl.get(context.getConfiguration());
 
-    Set<Path> paths = new HashSet<Path>();
+    Set<Path> paths = new HashSet<>();
     for (String file : files) {
       paths.add(new Path(file));
     }
@@ -171,7 +171,7 @@ public class BulkImporter {
       Map<Path,List<KeyExtent>> assignmentFailures = assignMapFiles(context, conf, fs, tableId, assignments, paths, numAssignThreads, numThreads);
       assignmentStats.assignmentsFailed(assignmentFailures);
 
-      Map<Path,Integer> failureCount = new TreeMap<Path,Integer>();
+      Map<Path,Integer> failureCount = new TreeMap<>();
 
       for (Entry<Path,List<KeyExtent>> entry : assignmentFailures.entrySet())
         failureCount.put(entry.getKey(), 1);
@@ -198,7 +198,7 @@ public class BulkImporter {
         for (Entry<Path,List<KeyExtent>> entry : assignmentFailures.entrySet()) {
           Iterator<KeyExtent> keListIter = entry.getValue().iterator();
 
-          List<TabletLocation> tabletsToAssignMapFileTo = new ArrayList<TabletLocation>();
+          List<TabletLocation> tabletsToAssignMapFileTo = new ArrayList<>();
 
           while (keListIter.hasNext()) {
             KeyExtent ke = keListIter.next();
@@ -272,7 +272,7 @@ public class BulkImporter {
 
       totalTime += timer.get(t);
     }
-    List<String> files = new ArrayList<String>();
+    List<String> files = new ArrayList<>();
     for (Path path : paths) {
       files.add(path.getName());
     }
@@ -325,7 +325,7 @@ public class BulkImporter {
   }
 
   private static List<KeyExtent> extentsOf(List<TabletLocation> locations) {
-    List<KeyExtent> result = new ArrayList<KeyExtent>(locations.size());
+    List<KeyExtent> result = new ArrayList<>(locations.size());
     for (TabletLocation tl : locations)
       result.add(tl.tablet_extent);
     return result;
@@ -335,7 +335,7 @@ public class BulkImporter {
       Map<Path,List<TabletLocation>> assignments, Collection<Path> paths, int numThreads) {
 
     long t1 = System.currentTimeMillis();
-    final Map<Path,Long> mapFileSizes = new TreeMap<Path,Long>();
+    final Map<Path,Long> mapFileSizes = new TreeMap<>();
 
     try {
       for (Path path : paths) {
@@ -375,13 +375,13 @@ public class BulkImporter {
 
           if (estimatedSizes == null) {
             // estimation failed, do a simple estimation
-            estimatedSizes = new TreeMap<KeyExtent,Long>();
+            estimatedSizes = new TreeMap<>();
             long estSize = (long) (mapFileSizes.get(entry.getKey()) / (double) entry.getValue().size());
             for (TabletLocation tl : entry.getValue())
               estimatedSizes.put(tl.tablet_extent, estSize);
           }
 
-          List<AssignmentInfo> assignmentInfoList = new ArrayList<AssignmentInfo>(estimatedSizes.size());
+          List<AssignmentInfo> assignmentInfoList = new ArrayList<>(estimatedSizes.size());
 
           for (Entry<KeyExtent,Long> entry2 : estimatedSizes.entrySet())
             assignmentInfoList.add(new AssignmentInfo(entry2.getKey(), entry2.getValue()));
@@ -412,7 +412,7 @@ public class BulkImporter {
   }
 
   private static Map<KeyExtent,String> locationsOf(Map<Path,List<TabletLocation>> assignments) {
-    Map<KeyExtent,String> result = new HashMap<KeyExtent,String>();
+    Map<KeyExtent,String> result = new HashMap<>();
     for (List<TabletLocation> entry : assignments.values()) {
       for (TabletLocation tl : entry) {
         result.put(tl.tablet_extent, tl.tablet_location);
@@ -454,7 +454,7 @@ public class BulkImporter {
           for (PathSize pathSize : mapFiles) {
             List<KeyExtent> existingFailures = assignmentFailures.get(pathSize.path);
             if (existingFailures == null) {
-              existingFailures = new ArrayList<KeyExtent>();
+              existingFailures = new ArrayList<>();
               assignmentFailures.put(pathSize.path, existingFailures);
             }
 
@@ -468,7 +468,7 @@ public class BulkImporter {
 
     @Override
     public void run() {
-      HashSet<Path> uniqMapFiles = new HashSet<Path>();
+      HashSet<Path> uniqMapFiles = new HashSet<>();
       for (List<PathSize> mapFiles : assignmentsPerTablet.values())
         for (PathSize ps : mapFiles)
           uniqMapFiles.add(ps.path);
@@ -505,7 +505,7 @@ public class BulkImporter {
   private Map<Path,List<KeyExtent>> assignMapFiles(String tableName, Map<Path,List<AssignmentInfo>> assignments, Map<KeyExtent,String> locations, int numThreads) {
 
     // group assignments by tablet
-    Map<KeyExtent,List<PathSize>> assignmentsPerTablet = new TreeMap<KeyExtent,List<PathSize>>();
+    Map<KeyExtent,List<PathSize>> assignmentsPerTablet = new TreeMap<>();
     for (Entry<Path,List<AssignmentInfo>> entry : assignments.entrySet()) {
       Path mapFile = entry.getKey();
       List<AssignmentInfo> tabletsToAssignMapFileTo = entry.getValue();
@@ -513,7 +513,7 @@ public class BulkImporter {
       for (AssignmentInfo ai : tabletsToAssignMapFileTo) {
         List<PathSize> mapFiles = assignmentsPerTablet.get(ai.ke);
         if (mapFiles == null) {
-          mapFiles = new ArrayList<PathSize>();
+          mapFiles = new ArrayList<>();
           assignmentsPerTablet.put(ai.ke, mapFiles);
         }
 
@@ -525,7 +525,7 @@ public class BulkImporter {
 
     Map<Path,List<KeyExtent>> assignmentFailures = Collections.synchronizedMap(new TreeMap<Path,List<KeyExtent>>());
 
-    TreeMap<String,Map<KeyExtent,List<PathSize>>> assignmentsPerTabletServer = new TreeMap<String,Map<KeyExtent,List<PathSize>>>();
+    TreeMap<String,Map<KeyExtent,List<PathSize>>> assignmentsPerTabletServer = new TreeMap<>();
 
     for (Entry<KeyExtent,List<PathSize>> entry : assignmentsPerTablet.entrySet()) {
       KeyExtent ke = entry.getKey();
@@ -536,7 +536,7 @@ public class BulkImporter {
           synchronized (assignmentFailures) {
             List<KeyExtent> failures = assignmentFailures.get(pathSize.path);
             if (failures == null) {
-              failures = new ArrayList<KeyExtent>();
+              failures = new ArrayList<>();
               assignmentFailures.put(pathSize.path, failures);
             }
 
@@ -551,7 +551,7 @@ public class BulkImporter {
 
       Map<KeyExtent,List<PathSize>> apt = assignmentsPerTabletServer.get(location);
       if (apt == null) {
-        apt = new TreeMap<KeyExtent,List<PathSize>>();
+        apt = new TreeMap<>();
         assignmentsPerTabletServer.put(location, apt);
       }
 
@@ -585,9 +585,9 @@ public class BulkImporter {
       long timeInMillis = context.getConfiguration().getTimeInMillis(Property.TSERV_BULK_TIMEOUT);
       TabletClientService.Iface client = ThriftUtil.getTServerClient(location, context, timeInMillis);
       try {
-        HashMap<KeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>> files = new HashMap<KeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>>();
+        HashMap<KeyExtent,Map<String,org.apache.accumulo.core.data.thrift.MapFileInfo>> files = new HashMap<>();
         for (Entry<KeyExtent,List<PathSize>> entry : assignmentsPerTablet.entrySet()) {
-          HashMap<String,org.apache.accumulo.core.data.thrift.MapFileInfo> tabletFiles = new HashMap<String,org.apache.accumulo.core.data.thrift.MapFileInfo>();
+          HashMap<String,org.apache.accumulo.core.data.thrift.MapFileInfo> tabletFiles = new HashMap<>();
           files.put(entry.getKey(), tabletFiles);
 
           for (PathSize pathSize : entry.getValue()) {
@@ -636,7 +636,7 @@ public class BulkImporter {
 
   public static List<TabletLocation> findOverlappingTablets(ClientContext context, VolumeManager vm, TabletLocator locator, Path file, Text startRow,
       Text endRow) throws Exception {
-    List<TabletLocation> result = new ArrayList<TabletLocation>();
+    List<TabletLocation> result = new ArrayList<>();
     Collection<ByteSequence> columnFamilies = Collections.emptyList();
     String filename = file.toString();
     // log.debug(filename + " finding overlapping tablets " + startRow + " -> " + endRow);
@@ -678,7 +678,7 @@ public class BulkImporter {
     private Set<Path> failedFailures = null;
 
     AssignmentStats(int fileCount) {
-      counts = new HashMap<KeyExtent,Integer>();
+      counts = new HashMap<>();
       numUniqueMapFiles = fileCount;
     }
 
@@ -749,7 +749,7 @@ public class BulkImporter {
       stddev = stddev / counts.size();
       stddev = Math.sqrt(stddev);
 
-      Set<KeyExtent> failedTablets = new HashSet<KeyExtent>();
+      Set<KeyExtent> failedTablets = new HashSet<>();
       for (List<KeyExtent> ft : completeFailures.values())
         failedTablets.addAll(ft);
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java b/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
index 588e3e0..fbfb492 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/ClientServiceHandler.java
@@ -258,7 +258,7 @@ public class ClientServiceHandler implements ClientService.Iface {
     security.authenticateUser(credentials, credentials);
     conf.invalidateCache();
 
-    Map<String,String> result = new HashMap<String,String>();
+    Map<String,String> result = new HashMap<>();
     for (Entry<String,String> entry : conf) {
       String key = entry.getKey();
       if (!Property.isSensitive(key))
@@ -411,7 +411,7 @@ public class ClientServiceHandler implements ClientService.Iface {
   @Override
   public List<TDiskUsage> getDiskUsage(Set<String> tables, TCredentials credentials) throws ThriftTableOperationException, ThriftSecurityException, TException {
     try {
-      HashSet<String> tableIds = new HashSet<String>();
+      HashSet<String> tableIds = new HashSet<>();
 
       for (String table : tables) {
         // ensure that table table exists
@@ -425,9 +425,9 @@ public class ClientServiceHandler implements ClientService.Iface {
       // use the same set of tableIds that were validated above to avoid race conditions
       Map<TreeSet<String>,Long> diskUsage = TableDiskUsage.getDiskUsage(context.getServerConfigurationFactory().getConfiguration(), tableIds, fs,
           context.getConnector());
-      List<TDiskUsage> retUsages = new ArrayList<TDiskUsage>();
+      List<TDiskUsage> retUsages = new ArrayList<>();
       for (Map.Entry<TreeSet<String>,Long> usageItem : diskUsage.entrySet()) {
-        retUsages.add(new TDiskUsage(new ArrayList<String>(usageItem.getKey()), usageItem.getValue()));
+        retUsages.add(new TDiskUsage(new ArrayList<>(usageItem.getKey()), usageItem.getValue()));
       }
       return retUsages;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfiguration.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfiguration.java
index 3d19723..1ca083e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/NamespaceConfiguration.java
@@ -38,7 +38,7 @@ import com.google.common.base.Predicate;
 public class NamespaceConfiguration extends ObservableConfiguration {
   private static final Logger log = LoggerFactory.getLogger(NamespaceConfiguration.class);
 
-  private static final Map<PropCacheKey,ZooCache> propCaches = new java.util.HashMap<PropCacheKey,ZooCache>();
+  private static final Map<PropCacheKey,ZooCache> propCaches = new java.util.HashMap<>();
 
   private final AccumuloConfiguration parent;
   private ZooCachePropertyAccessor propCacheAccessor = null;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java b/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
index 6976bab..2995272 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
@@ -34,9 +34,9 @@ import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
  */
 public class ServerConfigurationFactory extends ServerConfiguration {
 
-  private static final Map<String,Map<String,TableConfiguration>> tableConfigs = new HashMap<String,Map<String,TableConfiguration>>(1);
-  private static final Map<String,Map<String,NamespaceConfiguration>> namespaceConfigs = new HashMap<String,Map<String,NamespaceConfiguration>>(1);
-  private static final Map<String,Map<String,NamespaceConfiguration>> tableParentConfigs = new HashMap<String,Map<String,NamespaceConfiguration>>(1);
+  private static final Map<String,Map<String,TableConfiguration>> tableConfigs = new HashMap<>(1);
+  private static final Map<String,Map<String,NamespaceConfiguration>> namespaceConfigs = new HashMap<>(1);
+  private static final Map<String,Map<String,NamespaceConfiguration>> tableParentConfigs = new HashMap<>(1);
 
   private static void addInstanceToCaches(String iid) {
     synchronized (tableConfigs) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
index 915122b..9d87623 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/TableConfiguration.java
@@ -35,7 +35,7 @@ import com.google.common.base.Predicate;
 public class TableConfiguration extends ObservableConfiguration {
   private static final Logger log = LoggerFactory.getLogger(TableConfiguration.class);
 
-  private static final Map<PropCacheKey,ZooCache> propCaches = new java.util.HashMap<PropCacheKey,ZooCache>();
+  private static final Map<PropCacheKey,ZooCache> propCaches = new java.util.HashMap<>();
 
   private ZooCachePropertyAccessor propCacheAccessor = null;
   private final Instance instance;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java
index 6c8ceca..0cfbff8 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.fs.Path;
  * A factory for {@link ZooConfiguration} objects.
  */
 class ZooConfigurationFactory {
-  private static final Map<String,ZooConfiguration> instances = new HashMap<String,ZooConfiguration>();
+  private static final Map<String,ZooConfiguration> instances = new HashMap<>();
 
   /**
    * Gets a configuration object for the given instance with the given parent. Repeated calls will return the same object.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index fd5af14..7815a3d 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@ -64,12 +64,12 @@ public class MetadataConstraints implements Constraint {
     }
   }
 
-  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN,
+  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<>(Arrays.asList(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN,
       TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN,
       TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN, TabletsSection.ServerColumnFamily.LOCK_COLUMN,
       TabletsSection.ServerColumnFamily.FLUSH_COLUMN, TabletsSection.ServerColumnFamily.COMPACT_COLUMN));
 
-  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(TabletsSection.BulkFileColumnFamily.NAME, LogColumnFamily.NAME,
+  private static final HashSet<Text> validColumnFams = new HashSet<>(Arrays.asList(TabletsSection.BulkFileColumnFamily.NAME, LogColumnFamily.NAME,
       ScanFileColumnFamily.NAME, DataFileColumnFamily.NAME, TabletsSection.CurrentLocationColumnFamily.NAME, TabletsSection.LastLocationColumnFamily.NAME,
       TabletsSection.FutureLocationColumnFamily.NAME, ChoppedColumnFamily.NAME, ClonedColumnFamily.NAME));
 
@@ -86,7 +86,7 @@ public class MetadataConstraints implements Constraint {
 
   static private ArrayList<Short> addViolation(ArrayList<Short> lst, int violation) {
     if (lst == null)
-      lst = new ArrayList<Short>();
+      lst = new ArrayList<>();
     lst.add((short) violation);
     return lst;
   }
@@ -194,8 +194,8 @@ public class MetadataConstraints implements Constraint {
           // See ACCUMULO-1230.
           boolean isLocationMutation = false;
 
-          HashSet<Text> dataFiles = new HashSet<Text>();
-          HashSet<Text> loadedFiles = new HashSet<Text>();
+          HashSet<Text> dataFiles = new HashSet<>();
+          HashSet<Text> loadedFiles = new HashSet<>();
 
           String tidString = new String(columnUpdate.getValue(), UTF_8);
           int otherTidCount = 0;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java
index e51df03..594a0a2 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java
@@ -32,7 +32,7 @@ public class PerTableVolumeChooser implements VolumeChooser {
   private final VolumeChooser fallbackVolumeChooser = new RandomVolumeChooser();
   // TODO Add hint of expected size to construction, see ACCUMULO-3410
   /* Track VolumeChooser instances so they can keep state. */
-  private final ConcurrentHashMap<String,VolumeChooser> tableSpecificChooser = new ConcurrentHashMap<String,VolumeChooser>();
+  private final ConcurrentHashMap<String,VolumeChooser> tableSpecificChooser = new ConcurrentHashMap<>();
   // TODO has to be lazily initialized currently because of the reliance on HdfsZooInstance. see ACCUMULO-3411
   private volatile ServerConfigurationFactory serverConfs;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
index 6bc225f..6a69d75 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
@@ -77,7 +77,7 @@ public class PreferredVolumeChooser extends RandomVolumeChooser implements Volum
       serverConfs = localConf;
     }
     TableConfiguration tableConf = localConf.getTableConfiguration(env.getTableId());
-    final Map<String,String> props = new HashMap<String,String>();
+    final Map<String,String> props = new HashMap<>();
     tableConf.getProperties(props, PREFERRED_VOLUMES_FILTER);
     if (props.isEmpty()) {
       log.warn("No preferred volumes specified. Defaulting to randomly choosing from instance volumes");
@@ -93,12 +93,12 @@ public class PreferredVolumeChooser extends RandomVolumeChooser implements Volum
     // If the preferred volumes property was specified, split the returned string by the comma and add use it to filter the given options.
     Set<String> preferred = parsedPreferredVolumes.get(volumes);
     if (preferred == null) {
-      preferred = new HashSet<String>(Arrays.asList(StringUtils.split(volumes, ',')));
+      preferred = new HashSet<>(Arrays.asList(StringUtils.split(volumes, ',')));
       parsedPreferredVolumes.put(volumes, preferred);
     }
 
     // Only keep the options that are in the preferred set
-    final ArrayList<String> filteredOptions = new ArrayList<String>(Arrays.asList(options));
+    final ArrayList<String> filteredOptions = new ArrayList<>(Arrays.asList(options));
     filteredOptions.retainAll(preferred);
 
     // If there are no preferred volumes left, then warn the user and choose randomly from the instance volumes

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index a496fa1..37fcb0d 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -65,7 +65,7 @@ public class VolumeManagerImpl implements VolumeManager {
 
   private static final Logger log = LoggerFactory.getLogger(VolumeManagerImpl.class);
 
-  private static final HashSet<String> WARNED_ABOUT_SYNCONCLOSE = new HashSet<String>();
+  private static final HashSet<String> WARNED_ABOUT_SYNCONCLOSE = new HashSet<>();
 
   Map<String,Volume> volumesByName;
   Multimap<URI,Volume> volumesByFileSystemUri;
@@ -322,7 +322,7 @@ public class VolumeManagerImpl implements VolumeManager {
   }
 
   public static VolumeManager get(AccumuloConfiguration conf, final Configuration hadoopConf) throws IOException {
-    final Map<String,Volume> volumes = new HashMap<String,Volume>();
+    final Map<String,Volume> volumes = new HashMap<>();
 
     // The "default" Volume for Accumulo (in case no volumes are specified)
     for (String volumeUriOrDir : VolumeConfiguration.getVolumeUris(conf, hadoopConf)) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
index c3595cd..1aded1e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
@@ -127,7 +127,7 @@ public class VolumeUtil {
     else
       switchedPath = le.filename;
 
-    ArrayList<String> switchedLogs = new ArrayList<String>();
+    ArrayList<String> switchedLogs = new ArrayList<>();
     for (String log : le.logSet) {
       String switchedLog = switchVolume(le.filename, FileType.WAL, replacements);
       if (switchedLog != null) {
@@ -159,8 +159,8 @@ public class VolumeUtil {
     public SortedMap<FileRef,DataFileValue> datafiles;
 
     public TabletFiles() {
-      logEntries = new ArrayList<LogEntry>();
-      datafiles = new TreeMap<FileRef,DataFileValue>();
+      logEntries = new ArrayList<>();
+      datafiles = new TreeMap<>();
     }
 
     public TabletFiles(String dir, List<LogEntry> logEntries, SortedMap<FileRef,DataFileValue> datafiles) {
@@ -191,11 +191,11 @@ public class VolumeUtil {
     List<Pair<Path,Path>> replacements = ServerConstants.getVolumeReplacements();
     log.trace("Using volume replacements: " + replacements);
 
-    List<LogEntry> logsToRemove = new ArrayList<LogEntry>();
-    List<LogEntry> logsToAdd = new ArrayList<LogEntry>();
+    List<LogEntry> logsToRemove = new ArrayList<>();
+    List<LogEntry> logsToAdd = new ArrayList<>();
 
-    List<FileRef> filesToRemove = new ArrayList<FileRef>();
-    SortedMap<FileRef,DataFileValue> filesToAdd = new TreeMap<FileRef,DataFileValue>();
+    List<FileRef> filesToRemove = new ArrayList<>();
+    SortedMap<FileRef,DataFileValue> filesToAdd = new TreeMap<>();
 
     TabletFiles ret = new TabletFiles();
 
@@ -347,7 +347,7 @@ public class VolumeUtil {
   }
 
   private static HashSet<String> getFileNames(FileStatus[] filesStatuses) {
-    HashSet<String> names = new HashSet<String>();
+    HashSet<String> names = new HashSet<>();
     for (FileStatus fileStatus : filesStatuses)
       if (fileStatus.isDirectory())
         throw new IllegalArgumentException("expected " + fileStatus.getPath() + " to be a file");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 4e5864e..4886f5b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -154,9 +154,9 @@ public class Initialize implements KeywordExecutable {
     return zoo;
   }
 
-  private static HashMap<String,String> initialMetadataConf = new HashMap<String,String>();
-  private static HashMap<String,String> initialMetadataCombinerConf = new HashMap<String,String>();
-  private static HashMap<String,String> initialReplicationTableConf = new HashMap<String,String>();
+  private static HashMap<String,String> initialMetadataConf = new HashMap<>();
+  private static HashMap<String,String> initialMetadataCombinerConf = new HashMap<>();
+  private static HashMap<String,String> initialReplicationTableConf = new HashMap<>();
 
   static {
     initialMetadataConf.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "32K");
@@ -692,10 +692,10 @@ public class Initialize implements KeywordExecutable {
 
     String[] volumeURIs = VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance());
 
-    HashSet<String> initializedDirs = new HashSet<String>();
+    HashSet<String> initializedDirs = new HashSet<>();
     initializedDirs.addAll(Arrays.asList(ServerConstants.checkBaseUris(volumeURIs, true)));
 
-    HashSet<String> uinitializedDirs = new HashSet<String>();
+    HashSet<String> uinitializedDirs = new HashSet<>();
     uinitializedDirs.addAll(Arrays.asList(volumeURIs));
     uinitializedDirs.removeAll(initializedDirs);
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java b/server/base/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
index 772be32..3a76442 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
@@ -82,7 +82,7 @@ public class MetadataBulkLoadFilter extends Filter {
       throw new IOException("This iterator not intended for use at scan time");
     }
 
-    bulkTxStatusCache = new HashMap<Long,MetadataBulkLoadFilter.Status>();
+    bulkTxStatusCache = new HashMap<>();
     arbitrator = getArbitrator();
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java b/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
index 0c0cceb..76ae39e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/LiveTServerSet.java
@@ -208,12 +208,12 @@ public class LiveTServerSet implements Watcher {
   };
 
   // The set of active tservers with locks, indexed by their name in zookeeper
-  private Map<String,TServerInfo> current = new HashMap<String,TServerInfo>();
+  private Map<String,TServerInfo> current = new HashMap<>();
   // as above, indexed by TServerInstance
-  private Map<TServerInstance,TServerInfo> currentInstances = new HashMap<TServerInstance,TServerInfo>();
+  private Map<TServerInstance,TServerInfo> currentInstances = new HashMap<>();
 
   // The set of entries in zookeeper without locks, and the first time each was noticed
-  private Map<String,Long> locklessServers = new HashMap<String,Long>();
+  private Map<String,Long> locklessServers = new HashMap<>();
 
   public LiveTServerSet(ClientContext context, Listener cback) {
     this.cback = cback;
@@ -238,12 +238,12 @@ public class LiveTServerSet implements Watcher {
 
   public synchronized void scanServers() {
     try {
-      final Set<TServerInstance> updates = new HashSet<TServerInstance>();
-      final Set<TServerInstance> doomed = new HashSet<TServerInstance>();
+      final Set<TServerInstance> updates = new HashSet<>();
+      final Set<TServerInstance> doomed = new HashSet<>();
 
       final String path = ZooUtil.getRoot(context.getInstance()) + Constants.ZTSERVERS;
 
-      HashSet<String> all = new HashSet<String>(current.keySet());
+      HashSet<String> all = new HashSet<>(current.keySet());
       all.addAll(getZooCache().getChildren(path));
 
       locklessServers.keySet().retainAll(all);
@@ -332,8 +332,8 @@ public class LiveTServerSet implements Watcher {
 
           String server = event.getPath().substring(pos + 1);
 
-          final Set<TServerInstance> updates = new HashSet<TServerInstance>();
-          final Set<TServerInstance> doomed = new HashSet<TServerInstance>();
+          final Set<TServerInstance> updates = new HashSet<>();
+          final Set<TServerInstance> doomed = new HashSet<>();
 
           final String path = ZooUtil.getRoot(context.getInstance()) + Constants.ZTSERVERS;
 
@@ -359,7 +359,7 @@ public class LiveTServerSet implements Watcher {
   }
 
   public synchronized Set<TServerInstance> getCurrentServers() {
-    return new HashSet<TServerInstance>(currentInstances.keySet());
+    return new HashSet<>(currentInstances.keySet());
   }
 
   public synchronized int size() {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java
index fd3dfd8..112fcc1 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java
@@ -69,8 +69,8 @@ public class ChaoticLoadBalancer extends TabletBalancer {
       Map<KeyExtent,TServerInstance> assignments) {
     long total = assignments.size() + unassigned.size();
     long avg = (long) Math.ceil(((double) total) / current.size());
-    Map<TServerInstance,Long> toAssign = new HashMap<TServerInstance,Long>();
-    List<TServerInstance> tServerArray = new ArrayList<TServerInstance>();
+    Map<TServerInstance,Long> toAssign = new HashMap<>();
+    List<TServerInstance> tServerArray = new ArrayList<>();
     for (Entry<TServerInstance,TabletServerStatus> e : current.entrySet()) {
       long numTablets = 0;
       for (TableInfo ti : e.getValue().getTableMap().values()) {
@@ -105,8 +105,8 @@ public class ChaoticLoadBalancer extends TabletBalancer {
 
   @Override
   public long balance(SortedMap<TServerInstance,TabletServerStatus> current, Set<KeyExtent> migrations, List<TabletMigration> migrationsOut) {
-    Map<TServerInstance,Long> numTablets = new HashMap<TServerInstance,Long>();
-    List<TServerInstance> underCapacityTServer = new ArrayList<TServerInstance>();
+    Map<TServerInstance,Long> numTablets = new HashMap<>();
+    List<TServerInstance> underCapacityTServer = new ArrayList<>();
 
     if (!migrations.isEmpty()) {
       outstandingMigrations.migrations = migrations;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/master/balancer/DefaultLoadBalancer.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/DefaultLoadBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/DefaultLoadBalancer.java
index 56b3839..c31eb37 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/DefaultLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/DefaultLoadBalancer.java
@@ -53,7 +53,7 @@ public class DefaultLoadBalancer extends TabletBalancer {
   }
 
   List<TServerInstance> randomize(Set<TServerInstance> locations) {
-    List<TServerInstance> result = new ArrayList<TServerInstance>(locations);
+    List<TServerInstance> result = new ArrayList<>(locations);
     Collections.shuffle(result);
     return result;
   }
@@ -123,11 +123,11 @@ public class DefaultLoadBalancer extends TabletBalancer {
       if (current.size() < 2) {
         return false;
       }
-      final Map<String,Map<KeyExtent,TabletStats>> donerTabletStats = new HashMap<String,Map<KeyExtent,TabletStats>>();
+      final Map<String,Map<KeyExtent,TabletStats>> donerTabletStats = new HashMap<>();
 
       // Sort by total number of online tablets, per server
       int total = 0;
-      ArrayList<ServerCounts> totals = new ArrayList<ServerCounts>();
+      ArrayList<ServerCounts> totals = new ArrayList<>();
       for (Entry<TServerInstance,TabletServerStatus> entry : current.entrySet()) {
         int serverTotal = 0;
         if (entry.getValue() != null && entry.getValue().tableMap != null) {
@@ -197,7 +197,7 @@ public class DefaultLoadBalancer extends TabletBalancer {
    */
   List<TabletMigration> move(ServerCounts tooMuch, ServerCounts tooLittle, int count, Map<String,Map<KeyExtent,TabletStats>> donerTabletStats) {
 
-    List<TabletMigration> result = new ArrayList<TabletMigration>();
+    List<TabletMigration> result = new ArrayList<>();
     if (count == 0)
       return result;
 
@@ -235,7 +235,7 @@ public class DefaultLoadBalancer extends TabletBalancer {
       Map<KeyExtent,TabletStats> onlineTabletsForTable = donerTabletStats.get(table);
       try {
         if (onlineTabletsForTable == null) {
-          onlineTabletsForTable = new HashMap<KeyExtent,TabletStats>();
+          onlineTabletsForTable = new HashMap<>();
           List<TabletStats> stats = getOnlineTabletsForTable(tooMuch.server, table);
           if (null == stats) {
             log.warn("Unable to find tablets to move");
@@ -271,7 +271,7 @@ public class DefaultLoadBalancer extends TabletBalancer {
   }
 
   static Map<String,Integer> tabletCountsPerTable(TabletServerStatus status) {
-    Map<String,Integer> result = new HashMap<String,Integer>();
+    Map<String,Integer> result = new HashMap<>();
     if (status != null && status.tableMap != null) {
       Map<String,TableInfo> tableMap = status.tableMap;
       for (Entry<String,TableInfo> entry : tableMap.entrySet()) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/master/balancer/GroupBalancer.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/GroupBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/GroupBalancer.java
index 2ea8e71..23cb76a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/GroupBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/GroupBalancer.java
@@ -148,7 +148,7 @@ public abstract class GroupBalancer extends TabletBalancer {
         }
       }
 
-      tabletsByGroup.add(new ComparablePair<String,KeyExtent>(partitioner.apply(entry.getKey()), entry.getKey()));
+      tabletsByGroup.add(new ComparablePair<>(partitioner.apply(entry.getKey()), entry.getKey()));
     }
 
     Collections.sort(tabletsByGroup);
@@ -531,7 +531,7 @@ public abstract class GroupBalancer extends TabletBalancer {
             if (srcTgi.getExtras().size() <= maxExtraGroups) {
               serversToRemove.add(srcTgi.getTserverInstance());
             } else {
-              serversGroupsToRemove.add(new Pair<String,TServerInstance>(group, srcTgi.getTserverInstance()));
+              serversGroupsToRemove.add(new Pair<>(group, srcTgi.getTserverInstance()));
             }
 
             if (destTgi.getExtras().size() >= maxExtraGroups || moves.size() >= getMaxMigrations()) {
@@ -596,7 +596,7 @@ public abstract class GroupBalancer extends TabletBalancer {
             moves.move(group, 1, srcTgi, destTgi);
 
             if (num == 2) {
-              serversToRemove.add(new Pair<String,TserverGroupInfo>(group, srcTgi));
+              serversToRemove.add(new Pair<>(group, srcTgi));
             }
 
             if (destTgi.getExtras().size() >= maxExtraGroups || moves.size() >= getMaxMigrations()) {
@@ -658,7 +658,7 @@ public abstract class GroupBalancer extends TabletBalancer {
             if (srcTgi.getExtras().size() <= expectedExtra) {
               emptyServers.add(srcTgi.getTserverInstance());
             } else if (srcTgi.getExtras().get(group) == null) {
-              emptyServerGroups.add(new Pair<String,TServerInstance>(group, srcTgi.getTserverInstance()));
+              emptyServerGroups.add(new Pair<>(group, srcTgi.getTserverInstance()));
             }
 
             if (destTgi.getExtras().size() >= expectedExtra || moves.size() >= getMaxMigrations()) {
@@ -764,7 +764,7 @@ public abstract class GroupBalancer extends TabletBalancer {
         }
       }
 
-      return new Pair<KeyExtent,Location>(extent, loc);
+      return new Pair<>(extent, loc);
     }
 
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/717febbf/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
index d19ab82..31d8c81 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/HostRegexTableLoadBalancer.java
@@ -89,7 +89,7 @@ public class HostRegexTableLoadBalancer extends TableLoadBalancer implements Con
   private volatile long lastOOBCheck = System.currentTimeMillis();
   private volatile long lastPoolRecheck = 0;
   private boolean isIpBasedRegex = false;
-  private Map<String,SortedMap<TServerInstance,TabletServerStatus>> pools = new HashMap<String,SortedMap<TServerInstance,TabletServerStatus>>();
+  private Map<String,SortedMap<TServerInstance,TabletServerStatus>> pools = new HashMap<>();
   private int maxTServerMigrations = HOST_BALANCER_REGEX_MAX_MIGRATIONS_DEFAULT;
 
   /**
@@ -103,13 +103,13 @@ public class HostRegexTableLoadBalancer extends TableLoadBalancer implements Con
   protected synchronized Map<String,SortedMap<TServerInstance,TabletServerStatus>> splitCurrentByRegex(SortedMap<TServerInstance,TabletServerStatus> current) {
     if ((System.currentTimeMillis() - lastPoolRecheck) > poolRecheckMillis) {
       LOG.debug("Performing pool recheck - regrouping tablet servers based on regular expressions");
-      Map<String,SortedMap<TServerInstance,TabletServerStatus>> newPools = new HashMap<String,SortedMap<TServerInstance,TabletServerStatus>>();
+      Map<String,SortedMap<TServerInstance,TabletServerStatus>> newPools = new HashMap<>();
       for (Entry<TServerInstance,TabletServerStatus> e : current.entrySet()) {
         List<String> poolNames = getPoolNamesForHost(e.getKey().host());
         for (String pool : poolNames) {
           SortedMap<TServerInstance,TabletServerStatus> np = newPools.get(pool);
           if (null == np) {
-            np = new TreeMap<TServerInstance,TabletServerStatus>(current.comparator());
+            np = new TreeMap<>(current.comparator());
             newPools.put(pool, np);
           }
           np.put(e.getKey(), e.getValue());
@@ -265,18 +265,18 @@ public class HostRegexTableLoadBalancer extends TableLoadBalancer implements Con
 
     Map<String,SortedMap<TServerInstance,TabletServerStatus>> pools = splitCurrentByRegex(current);
     // group the unassigned into tables
-    Map<String,Map<KeyExtent,TServerInstance>> groupedUnassigned = new HashMap<String,Map<KeyExtent,TServerInstance>>();
+    Map<String,Map<KeyExtent,TServerInstance>> groupedUnassigned = new HashMap<>();
     for (Entry<KeyExtent,TServerInstance> e : unassigned.entrySet()) {
       Map<KeyExtent,TServerInstance> tableUnassigned = groupedUnassigned.get(e.getKey().getTableId().toString());
       if (tableUnassigned == null) {
-        tableUnassigned = new HashMap<KeyExtent,TServerInstance>();
+        tableUnassigned = new HashMap<>();
         groupedUnassigned.put(e.getKey().getTableId().toString(), tableUnassigned);
       }
       tableUnassigned.put(e.getKey(), e.getValue());
     }
     // Send a view of the current servers to the tables tablet balancer
     for (Entry<String,Map<KeyExtent,TServerInstance>> e : groupedUnassigned.entrySet()) {
-      Map<KeyExtent,TServerInstance> newAssignments = new HashMap<KeyExtent,TServerInstance>();
+      Map<KeyExtent,TServerInstance> newAssignments = new HashMap<>();
       String tableName = tableIdToTableName.get(e.getKey());
       String poolName = getPoolNameForTable(tableName);
       SortedMap<TServerInstance,TabletServerStatus> currentView = pools.get(poolName);
@@ -377,7 +377,7 @@ public class HostRegexTableLoadBalancer extends TableLoadBalancer implements Con
             this.poolRecheckMillis);
         continue;
       }
-      ArrayList<TabletMigration> newMigrations = new ArrayList<TabletMigration>();
+      ArrayList<TabletMigration> newMigrations = new ArrayList<>();
       long tableBalanceTime = getBalancerForTable(s).balance(currentView, migrations, newMigrations);
       if (tableBalanceTime < minBalanceTime) {
         minBalanceTime = tableBalanceTime;


Mime
View raw message