accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject accumulo git commit: ACCUMULO-3299 Ensure tests run properly scoped
Date Sat, 22 Nov 2014 01:15:34 GMT
Repository: accumulo
Updated Branches:
  refs/heads/master 2ab616a64 -> ccc9a6788


ACCUMULO-3299 Ensure tests run properly scoped

  Ensure some tests which were not running, because of their name are running
  during the build. Made some MiniAccumuloCluster-based tests ITs, and attempted
  to prevent tests from writing to /tmp (use target/ instead).


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/ccc9a678
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/ccc9a678
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/ccc9a678

Branch: refs/heads/master
Commit: ccc9a6788664b4ddce7ca207f0c24cf28b7b9c8b
Parents: 2ab616a
Author: Christopher Tubbs <ctubbsii@apache.org>
Authored: Fri Nov 21 20:13:24 2014 -0500
Committer: Christopher Tubbs <ctubbsii@apache.org>
Committed: Fri Nov 21 20:13:24 2014 -0500

----------------------------------------------------------------------
 .../accumulo/cluster/AccumuloClustersTest.java  |  10 +-
 .../minicluster/impl/CleanShutdownMacTest.java  |  11 +-
 pom.xml                                         |  12 ++
 .../accumulo/server/util/FileUtilTest.java      |  66 ++++-------
 .../accumulo/test/KeyValueEqualityIT.java       |  75 ++++++++++++
 .../accumulo/test/KeyValueEqualityTest.java     |  75 ------------
 .../test/functional/MetadataMaxFiles.java       | 112 ------------------
 .../test/functional/MetadataMaxFilesIT.java     | 112 ++++++++++++++++++
 .../test/replication/StatusCombinerMacIT.java   | 114 +++++++++++++++++++
 .../test/replication/StatusCombinerMacTest.java | 114 -------------------
 10 files changed, 351 insertions(+), 350 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/ccc9a678/minicluster/src/test/java/org/apache/accumulo/cluster/AccumuloClustersTest.java
----------------------------------------------------------------------
diff --git a/minicluster/src/test/java/org/apache/accumulo/cluster/AccumuloClustersTest.java
b/minicluster/src/test/java/org/apache/accumulo/cluster/AccumuloClustersTest.java
index e368240..177a60f 100644
--- a/minicluster/src/test/java/org/apache/accumulo/cluster/AccumuloClustersTest.java
+++ b/minicluster/src/test/java/org/apache/accumulo/cluster/AccumuloClustersTest.java
@@ -23,16 +23,15 @@ import static org.easymock.EasyMock.replay;
 import java.io.File;
 import java.io.IOException;
 
-import org.apache.accumulo.cluster.AccumuloClusters;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 import org.apache.commons.io.FileUtils;
 import org.junit.Assert;
+import org.junit.Rule;
 import org.junit.Test;
-
-import com.google.common.io.Files;
+import org.junit.rules.TemporaryFolder;
 
 public class AccumuloClustersTest {
 
@@ -46,9 +45,12 @@ public class AccumuloClustersTest {
     cfg.build();
   }
 
+  @Rule
+  public TemporaryFolder tmpDir = new TemporaryFolder(new File(System.getProperty("user.dir")
+ "/target"));
+
   @Test
   public void testFactoryReturn() throws IOException {
-    File dir = Files.createTempDir();
+    File dir = tmpDir.newFolder();
     try {
       MiniAccumuloConfig cfg = new MiniAccumuloConfig(dir, "foo");
       Assert.assertEquals(MiniAccumuloCluster.class, AccumuloClusters.createMiniCluster(cfg).getClass());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ccc9a678/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/CleanShutdownMacTest.java
----------------------------------------------------------------------
diff --git a/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/CleanShutdownMacTest.java
b/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/CleanShutdownMacTest.java
index 2ac8d6f..2c32948 100644
--- a/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/CleanShutdownMacTest.java
+++ b/minicluster/src/test/java/org/apache/accumulo/minicluster/impl/CleanShutdownMacTest.java
@@ -23,19 +23,22 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 
 import org.easymock.EasyMock;
+import org.junit.Rule;
 import org.junit.Test;
-
-import com.google.common.io.Files;
+import org.junit.rules.TemporaryFolder;
 
 /**
- * 
+ *
  */
 public class CleanShutdownMacTest {
 
+  @Rule
+  public TemporaryFolder tmpDir = new TemporaryFolder(new File(System.getProperty("user.dir")
+ "/target"));
+
   @SuppressWarnings("unchecked")
   @Test
   public void testExecutorServiceShutdown() throws Exception {
-    File tmp = Files.createTempDir();
+    File tmp = tmpDir.newFolder();
     MiniAccumuloClusterImpl cluster = new MiniAccumuloClusterImpl(tmp, "foo");
 
     ExecutorService mockService = EasyMock.createMock(ExecutorService.class);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ccc9a678/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 9e62389..31601a1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -648,10 +648,22 @@
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-surefire-plugin</artifactId>
           <configuration>
+            <systemPropertyVariables>
+              <java.io.tmpdir>${project.build.directory}</java.io.tmpdir>
+            </systemPropertyVariables>
             <argLine>-Xmx1G</argLine>
           </configuration>
         </plugin>
         <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-failsafe-plugin</artifactId>
+          <configuration>
+            <systemPropertyVariables>
+              <java.io.tmpdir>${project.build.directory}</java.io.tmpdir>
+            </systemPropertyVariables>
+          </configuration>
+        </plugin>
+        <plugin>
           <groupId>org.asciidoctor</groupId>
           <artifactId>asciidoctor-maven-plugin</artifactId>
           <version>0.1.4</version>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ccc9a678/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
index 8924738..90c6300 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
@@ -16,6 +16,10 @@
  */
 package org.apache.accumulo.server.util;
 
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -31,22 +35,32 @@ import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.Path;
 import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
-
-import com.google.common.io.Files;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
 
 /**
- * 
+ *
  */
 public class FileUtilTest {
 
+  @Rule
+  public TemporaryFolder tmpDir = new TemporaryFolder(new File(System.getProperty("user.dir")
+ "/target"));
+
+  @Rule
+  public TestName testName = new TestName();
+
+  private File accumuloDir;
+
+  @Before
+  public void createTmpDir() throws IOException {
+    accumuloDir = tmpDir.newFolder(testName.getMethodName());
+  }
+
   @Test
   public void testToPathStrings() {
     Collection<FileRef> c = new java.util.ArrayList<FileRef>();
@@ -69,33 +83,24 @@ public class FileUtilTest {
   @SuppressWarnings("deprecation")
   @Test
   public void testCleanupIndexOpWithDfsDir() throws IOException {
-    File dfsDir = Files.createTempDir();
-
-    try {
       // And a "unique" tmp directory for each volume
-      File tmp1 = new File(dfsDir, "tmp");
+    File tmp1 = new File(accumuloDir, "tmp");
       tmp1.mkdirs();
       Path tmpPath1 = new Path(tmp1.toURI());
 
       HashMap<Property,String> testProps = new HashMap<Property,String>();
-      testProps.put(Property.INSTANCE_DFS_DIR, dfsDir.getAbsolutePath());
+    testProps.put(Property.INSTANCE_DFS_DIR, accumuloDir.getAbsolutePath());
 
       AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
-      VolumeManager fs = VolumeManagerImpl.getLocal(dfsDir.getAbsolutePath());
+    VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
 
       FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
 
       Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
-    } finally {
-      FileUtils.deleteQuietly(dfsDir);
-    }
   }
 
   @Test
   public void testCleanupIndexOpWithCommonParentVolume() throws IOException {
-    File accumuloDir = Files.createTempDir();
-
-    try {
       File volumeDir = new File(accumuloDir, "volumes");
       volumeDir.mkdirs();
 
@@ -123,16 +128,10 @@ public class FileUtilTest {
       FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
 
       Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
-    } finally {
-      FileUtils.deleteQuietly(accumuloDir);
-    }
   }
 
   @Test
   public void testCleanupIndexOpWithCommonParentVolumeWithDepth() throws IOException {
-    File accumuloDir = Files.createTempDir();
-
-    try {
       File volumeDir = new File(accumuloDir, "volumes");
       volumeDir.mkdirs();
 
@@ -161,16 +160,10 @@ public class FileUtilTest {
       FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
 
       Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
-    } finally {
-      FileUtils.deleteQuietly(accumuloDir);
-    }
   }
 
   @Test
   public void testCleanupIndexOpWithoutCommonParentVolume() throws IOException {
-    File accumuloDir = Files.createTempDir();
-
-    try {
       // Make some directories to simulate multiple volumes
       File v1 = new File(accumuloDir, "v1"), v2 = new File(accumuloDir, "v2");
       v1.mkdirs();
@@ -195,16 +188,10 @@ public class FileUtilTest {
       FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
 
       Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
-    } finally {
-      FileUtils.deleteQuietly(accumuloDir);
-    }
   }
 
   @Test
   public void testCleanupIndexOpWithoutCommonParentVolumeWithDepth() throws IOException {
-    File accumuloDir = Files.createTempDir();
-
-    try {
       // Make some directories to simulate multiple volumes
       File v1 = new File(accumuloDir, "v1"), v2 = new File(accumuloDir, "v2");
       v1.mkdirs();
@@ -230,9 +217,6 @@ public class FileUtilTest {
       FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
 
       Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
-    } finally {
-      FileUtils.deleteQuietly(accumuloDir);
-    }
   }
 
   private static class FileUtilTestConfiguration extends AccumuloConfiguration {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ccc9a678/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityIT.java b/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityIT.java
new file mode 100644
index 0000000..b1e5364
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityIT.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.test.functional.SimpleMacIT;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class KeyValueEqualityIT extends SimpleMacIT {
+
+  @Override
+  public int defaultTimeoutSeconds() {
+    return 60;
+  }
+
+  @Test
+  public void testEquality() throws Exception {
+    Connector conn = this.getConnector();
+    final BatchWriterConfig config = new BatchWriterConfig();
+
+    final String table1 = "table1", table2 = "table2";
+    final TableOperations tops = conn.tableOperations();
+    tops.create(table1);
+    tops.create(table2);
+
+    final BatchWriter bw1 = conn.createBatchWriter(table1, config), bw2 = conn.createBatchWriter(table2,
config);
+
+    for (int row = 0; row < 100; row++) {
+      Mutation m = new Mutation(Integer.toString(row));
+      for (int col = 0; col < 10; col++) {
+        m.put(Integer.toString(col), "", System.currentTimeMillis(), Integer.toString(col
* 2));
+      }
+      bw1.addMutation(m);
+      bw2.addMutation(m);
+    }
+
+    bw1.close();
+    bw2.close();
+
+    Iterator<Entry<Key,Value>> t1 = conn.createScanner(table1, Authorizations.EMPTY).iterator(),
t2 = conn.createScanner(table2, Authorizations.EMPTY).iterator();
+    while (t1.hasNext() && t2.hasNext()) {
+      // KeyValue, the implementation of Entry<Key,Value>, should support equality
and hashCode properly
+      Entry<Key,Value> e1 = t1.next(), e2 = t2.next();
+      Assert.assertEquals(e1, e2);
+      Assert.assertEquals(e1.hashCode(), e2.hashCode());
+    }
+    Assert.assertFalse("table1 had more data to read", t1.hasNext());
+    Assert.assertFalse("table2 had more data to read", t2.hasNext());
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ccc9a678/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityTest.java b/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityTest.java
deleted file mode 100644
index 1302b23..0000000
--- a/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityTest.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.test.functional.SimpleMacIT;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class KeyValueEqualityTest extends SimpleMacIT {
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 60;
-  }
-
-  @Test
-  public void testEquality() throws Exception {
-    Connector conn = this.getConnector();
-    final BatchWriterConfig config = new BatchWriterConfig();
-
-    final String table1 = "table1", table2 = "table2";
-    final TableOperations tops = conn.tableOperations();
-    tops.create(table1);
-    tops.create(table2);
-
-    final BatchWriter bw1 = conn.createBatchWriter(table1, config), bw2 = conn.createBatchWriter(table2,
config);
-
-    for (int row = 0; row < 100; row++) {
-      Mutation m = new Mutation(Integer.toString(row));
-      for (int col = 0; col < 10; col++) {
-        m.put(Integer.toString(col), "", System.currentTimeMillis(), Integer.toString(col
* 2));
-      }
-      bw1.addMutation(m);
-      bw2.addMutation(m);
-    }
-
-    bw1.close();
-    bw2.close();
-
-    Iterator<Entry<Key,Value>> t1 = conn.createScanner(table1, Authorizations.EMPTY).iterator(),
t2 = conn.createScanner(table2, Authorizations.EMPTY).iterator();
-    while (t1.hasNext() && t2.hasNext()) {
-      // KeyValue, the implementation of Entry<Key,Value>, should support equality
and hashCode properly
-      Entry<Key,Value> e1 = t1.next(), e2 = t2.next();
-      Assert.assertEquals(e1, e2);
-      Assert.assertEquals(e1.hashCode(), e2.hashCode());
-    }
-    Assert.assertFalse("table1 had more data to read", t1.hasNext());
-    Assert.assertFalse("table2 had more data to read", t2.hasNext());
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ccc9a678/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
b/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
deleted file mode 100644
index 98adbf6..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.MasterClient;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.master.thrift.MasterClientService.Client;
-import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
-import org.apache.accumulo.core.master.thrift.TableInfo;
-import org.apache.accumulo.core.master.thrift.TabletServerStatus;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.security.Credentials;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MetadataMaxFiles extends ConfigurableMacIT {
-  
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    Map<String,String> siteConfig = new HashMap<String,String>();
-    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
-    siteConfig.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "10");
-    cfg.setSiteConfig(siteConfig);
-  }
-  
-  @Override
-  protected int defaultTimeoutSeconds() {
-    return 4 * 60;
-  }
-
-  @Test
-  public void test() throws Exception {
-    Connector c = getConnector();
-    SortedSet<Text> splits = new TreeSet<Text>();
-    for (int i = 0; i < 1000; i++) {
-      splits.add(new Text(String.format("%03d", i)));
-    }
-    c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(),
"10000");
-    for (int i = 0; i < 5; i++) {
-      String tableName = "table" + i;
-      log.info("Creating " + tableName);
-      c.tableOperations().create(tableName);
-      log.info("adding splits");
-      c.tableOperations().addSplits(tableName, splits);
-      log.info("flushing");
-      c.tableOperations().flush(MetadataTable.NAME, null, null, true);
-      c.tableOperations().flush(RootTable.NAME, null, null, true);
-    }
-    UtilWaitThread.sleep(20 * 1000);
-    log.info("shutting down");
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
-    cluster.stop();
-    log.info("starting up");
-    cluster.start();
-    
-    UtilWaitThread.sleep(30 * 1000);
-    
-    while (true) {
-      MasterMonitorInfo stats = null;
-      Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
-      Client client = null;
-      try {
-        client = MasterClient.getConnectionWithRetry(c.getInstance());
-        stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance()));
-      } finally {
-        if (client != null)
-          MasterClient.close(client);
-      }
-      int tablets = 0;
-      for (TabletServerStatus tserver : stats.tServerInfo) {
-        for (Entry<String,TableInfo> entry : tserver.tableMap.entrySet()) {
-          if (entry.getKey().startsWith("!"))
-            continue;
-          tablets += entry.getValue().onlineTablets;
-        }
-      }
-      if (tablets == 5005)
-        break;
-      UtilWaitThread.sleep(1000);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ccc9a678/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
b/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
new file mode 100644
index 0000000..1ea5652
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.impl.MasterClient;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.master.thrift.MasterClientService.Client;
+import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.security.Credentials;
+import org.apache.accumulo.core.trace.Tracer;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MetadataMaxFilesIT extends ConfigurableMacIT {
+  
+  @Override
+  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    Map<String,String> siteConfig = new HashMap<String,String>();
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
+    siteConfig.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "10");
+    cfg.setSiteConfig(siteConfig);
+  }
+  
+  @Override
+  protected int defaultTimeoutSeconds() {
+    return 4 * 60;
+  }
+
+  @Test
+  public void test() throws Exception {
+    Connector c = getConnector();
+    SortedSet<Text> splits = new TreeSet<Text>();
+    for (int i = 0; i < 1000; i++) {
+      splits.add(new Text(String.format("%03d", i)));
+    }
+    c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(),
"10000");
+    for (int i = 0; i < 5; i++) {
+      String tableName = "table" + i;
+      log.info("Creating " + tableName);
+      c.tableOperations().create(tableName);
+      log.info("adding splits");
+      c.tableOperations().addSplits(tableName, splits);
+      log.info("flushing");
+      c.tableOperations().flush(MetadataTable.NAME, null, null, true);
+      c.tableOperations().flush(RootTable.NAME, null, null, true);
+    }
+    UtilWaitThread.sleep(20 * 1000);
+    log.info("shutting down");
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+    cluster.stop();
+    log.info("starting up");
+    cluster.start();
+    
+    UtilWaitThread.sleep(30 * 1000);
+    
+    while (true) {
+      MasterMonitorInfo stats = null;
+      Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
+      Client client = null;
+      try {
+        client = MasterClient.getConnectionWithRetry(c.getInstance());
+        stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance()));
+      } finally {
+        if (client != null)
+          MasterClient.close(client);
+      }
+      int tablets = 0;
+      for (TabletServerStatus tserver : stats.tServerInfo) {
+        for (Entry<String,TableInfo> entry : tserver.tableMap.entrySet()) {
+          if (entry.getKey().startsWith("!") || entry.getKey().startsWith("+"))
+            continue;
+          tablets += entry.getValue().onlineTablets;
+        }
+      }
+      if (tablets == 5005)
+        break;
+      UtilWaitThread.sleep(1000);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ccc9a678/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java
b/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java
new file mode 100644
index 0000000..a667c37
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.protobuf.ProtobufUtil;
+import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
+import org.apache.accumulo.core.replication.ReplicationTable;
+import org.apache.accumulo.core.replication.StatusUtil;
+import org.apache.accumulo.core.replication.proto.Replication.Status;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.server.util.ReplicationTableUtil;
+import org.apache.accumulo.test.functional.SimpleMacIT;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+/**
+ *
+ */
+public class StatusCombinerMacIT extends SimpleMacIT {
+
+  @Test
+  public void testCombinerSetOnMetadata() throws Exception {
+    TableOperations tops = getConnector().tableOperations();
+    Map<String,EnumSet<IteratorScope>> iterators = tops.listIterators(MetadataTable.NAME);
+
+    Assert.assertTrue(iterators.containsKey(ReplicationTableUtil.COMBINER_NAME));
+    EnumSet<IteratorScope> scopes = iterators.get(ReplicationTableUtil.COMBINER_NAME);
+    Assert.assertEquals(3, scopes.size());
+    Assert.assertTrue(scopes.contains(IteratorScope.scan));
+    Assert.assertTrue(scopes.contains(IteratorScope.minc));
+    Assert.assertTrue(scopes.contains(IteratorScope.majc));
+
+    Iterable<Entry<String,String>> propIter = tops.getProperties(MetadataTable.NAME);
+    HashMap<String,String> properties = new HashMap<String,String>();
+    for (Entry<String,String> entry : propIter) {
+      properties.put(entry.getKey(), entry.getValue());
+    }
+
+    for (IteratorScope scope : scopes) {
+      String key = Property.TABLE_ITERATOR_PREFIX.getKey() + scope.name() + "." + ReplicationTableUtil.COMBINER_NAME
+ ".opt.columns";
+      Assert.assertTrue("Properties did not contain key : " + key, properties.containsKey(key));
+      Assert.assertEquals(MetadataSchema.ReplicationSection.COLF.toString(), properties.get(key));
+    }
+  }
+
+  @Test
+  public void test() throws Exception {
+    Connector conn = getConnector();
+
+    ReplicationTable.setOnline(conn);
+    conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
+    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
+    long createTime = System.currentTimeMillis();
+    try {
+      Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
+      StatusSection.add(m, new Text("1"), StatusUtil.fileCreatedValue(createTime));
+      bw.addMutation(m);
+    } finally {
+      bw.close();
+    }
+
+    Scanner s = ReplicationTable.getScanner(conn);
+    Entry<Key,Value> entry = Iterables.getOnlyElement(s);
+    Assert.assertEquals(StatusUtil.fileCreatedValue(createTime), entry.getValue());
+
+    bw = ReplicationTable.getBatchWriter(conn);
+    try {
+      Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
+      StatusSection.add(m, new Text("1"), ProtobufUtil.toValue(StatusUtil.replicated(Long.MAX_VALUE)));
+      bw.addMutation(m);
+    } finally {
+      bw.close();
+    }
+
+    s = ReplicationTable.getScanner(conn);
+    entry = Iterables.getOnlyElement(s);
+    Status stat = Status.parseFrom(entry.getValue().get());
+    Assert.assertEquals(Long.MAX_VALUE, stat.getBegin());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ccc9a678/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacTest.java
b/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacTest.java
deleted file mode 100644
index 98d7676..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacTest.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.protobuf.ProtobufUtil;
-import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
-import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.replication.StatusUtil;
-import org.apache.accumulo.core.replication.proto.Replication.Status;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.server.util.ReplicationTableUtil;
-import org.apache.accumulo.test.functional.SimpleMacIT;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterables;
-
-/**
- *
- */
-public class StatusCombinerMacTest extends SimpleMacIT {
-
-  @Test
-  public void testCombinerSetOnMetadata() throws Exception {
-    TableOperations tops = getConnector().tableOperations();
-    Map<String,EnumSet<IteratorScope>> iterators = tops.listIterators(MetadataTable.NAME);
-
-    Assert.assertTrue(iterators.containsKey(ReplicationTableUtil.COMBINER_NAME));
-    EnumSet<IteratorScope> scopes = iterators.get(ReplicationTableUtil.COMBINER_NAME);
-    Assert.assertEquals(3, scopes.size());
-    Assert.assertTrue(scopes.contains(IteratorScope.scan));
-    Assert.assertTrue(scopes.contains(IteratorScope.minc));
-    Assert.assertTrue(scopes.contains(IteratorScope.majc));
-
-    Iterable<Entry<String,String>> propIter = tops.getProperties(MetadataTable.NAME);
-    HashMap<String,String> properties = new HashMap<String,String>();
-    for (Entry<String,String> entry : propIter) {
-      properties.put(entry.getKey(), entry.getValue());
-    }
-
-    for (IteratorScope scope : scopes) {
-      String key = Property.TABLE_ITERATOR_PREFIX.getKey() + scope.name() + "." + ReplicationTableUtil.COMBINER_NAME
+ ".opt.columns";
-      Assert.assertTrue("Properties did not contain key : " + key, properties.containsKey(key));
-      Assert.assertEquals(MetadataSchema.ReplicationSection.COLF.toString(), properties.get(key));
-    }
-  }
-
-  @Test
-  public void test() throws Exception {
-    Connector conn = getConnector();
-
-    ReplicationTable.setOnline(conn);
-    conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
-    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
-    long createTime = System.currentTimeMillis();
-    try {
-      Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
-      StatusSection.add(m, new Text("1"), StatusUtil.fileCreatedValue(createTime));
-      bw.addMutation(m);
-    } finally {
-      bw.close();
-    }
-
-    Scanner s = ReplicationTable.getScanner(conn);
-    Entry<Key,Value> entry = Iterables.getOnlyElement(s);
-    Assert.assertEquals(StatusUtil.fileCreatedValue(createTime), entry.getValue());
-
-    bw = ReplicationTable.getBatchWriter(conn);
-    try {
-      Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
-      StatusSection.add(m, new Text("1"), ProtobufUtil.toValue(StatusUtil.replicated(Long.MAX_VALUE)));
-      bw.addMutation(m);
-    } finally {
-      bw.close();
-    }
-
-    s = ReplicationTable.getScanner(conn);
-    entry = Iterables.getOnlyElement(s);
-    Status stat = Status.parseFrom(entry.getValue().get());
-    Assert.assertEquals(Long.MAX_VALUE, stat.getBegin());
-  }
-
-}


Mime
View raw message