accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [01/17] git commit: ACCUMULO-3010 renamed test to conform to integration test pattern
Date Sun, 10 Aug 2014 06:20:54 GMT
Repository: accumulo
Updated Branches:
  refs/heads/1.5.2-SNAPSHOT fc1a4ff07 -> 17d3fd3ce
  refs/heads/1.6.1-SNAPSHOT c2be73ab1 -> 0277521da
  refs/heads/master 3c7b3da04 -> 19de9683c


ACCUMULO-3010 renamed test to conform to integration test pattern


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/c2be73ab
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/c2be73ab
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/c2be73ab

Branch: refs/heads/master
Commit: c2be73ab1826958168e55683e5e7f4568c4bbe87
Parents: 6023dba
Author: Eric C. Newton <eric.newton@gmail.com>
Authored: Fri Aug 8 10:56:38 2014 -0400
Committer: Eric C. Newton <eric.newton@gmail.com>
Committed: Fri Aug 8 14:26:43 2014 -0400

----------------------------------------------------------------------
 .../org/apache/accumulo/test/Accumulo3010.java  | 91 --------------------
 .../apache/accumulo/test/Accumulo3010IT.java    | 91 ++++++++++++++++++++
 2 files changed, 91 insertions(+), 91 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/c2be73ab/test/src/test/java/org/apache/accumulo/test/Accumulo3010.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/Accumulo3010.java b/test/src/test/java/org/apache/accumulo/test/Accumulo3010.java
deleted file mode 100644
index 2f6bb6f..0000000
--- a/test/src/test/java/org/apache/accumulo/test/Accumulo3010.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.test.functional.ConfigurableMacIT;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class Accumulo3010 extends ConfigurableMacIT {
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setNumTservers(1);
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
-    // file system supports recovery
-    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-  }
-  
-  @Test(timeout = 60 * 1000)
-  public void test() throws Exception {
-    // create a table
-    String tableName = getUniqueNames(1)[0];
-    Connector c = getConnector();
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "100");
-    c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "3");
-    // create 3 flush files
-    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-    Mutation m = new Mutation("a");
-    m.put("b", "c", new Value("v".getBytes()));
-    for (int i = 0; i < 3; i++) {
-      bw.addMutation(m);
-      bw.flush();
-      c.tableOperations().flush(tableName, null, null, true);
-    }
-    // create an unsaved mutation
-    bw.addMutation(m);
-    bw.close();
-    // kill the tablet server
-    for (ProcessReference p : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
-      cluster.killProcess(ServerType.TABLET_SERVER, p);
-    }
-    // recover
-    cluster.start();
-    // ensure the table is readable
-    for (@SuppressWarnings("unused") Entry<Key,Value> entry : c.createScanner(tableName,
Authorizations.EMPTY)) {
-    }
-    // ensure that the recovery was not a merging minor compaction
-    Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-    for (Entry<Key, Value> entry : s) {
-      String filename = entry.getKey().getColumnQualifier().toString();
-      String parts[] = filename.split("/");
-      Assert.assertFalse(parts[parts.length-1].startsWith("M"));
-    }
-  }
-  
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/c2be73ab/test/src/test/java/org/apache/accumulo/test/Accumulo3010IT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/Accumulo3010IT.java b/test/src/test/java/org/apache/accumulo/test/Accumulo3010IT.java
new file mode 100644
index 0000000..1a0bfa2
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/Accumulo3010IT.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.test.functional.ConfigurableMacIT;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class Accumulo3010IT extends ConfigurableMacIT {
+
+  @Override
+  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    cfg.setNumTservers(1);
+    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+    // file system supports recovery
+    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+  }
+  
+  @Test(timeout = 60 * 1000)
+  public void test() throws Exception {
+    // create a table
+    String tableName = getUniqueNames(1)[0];
+    Connector c = getConnector();
+    c.tableOperations().create(tableName);
+    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "100");
+    c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "3");
+    // create 3 flush files
+    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+    Mutation m = new Mutation("a");
+    m.put("b", "c", new Value("v".getBytes()));
+    for (int i = 0; i < 3; i++) {
+      bw.addMutation(m);
+      bw.flush();
+      c.tableOperations().flush(tableName, null, null, true);
+    }
+    // create an unsaved mutation
+    bw.addMutation(m);
+    bw.close();
+    // kill the tablet server
+    for (ProcessReference p : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+      cluster.killProcess(ServerType.TABLET_SERVER, p);
+    }
+    // recover
+    cluster.start();
+    // ensure the table is readable
+    for (@SuppressWarnings("unused") Entry<Key,Value> entry : c.createScanner(tableName,
Authorizations.EMPTY)) {
+    }
+    // ensure that the recovery was not a merging minor compaction
+    Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+    for (Entry<Key, Value> entry : s) {
+      String filename = entry.getKey().getColumnQualifier().toString();
+      String parts[] = filename.split("/");
+      Assert.assertFalse(parts[parts.length-1].startsWith("M"));
+    }
+  }
+  
+  
+}


Mime
View raw message