accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [28/50] [abbrv] git commit: ACCUMULO-2819 Fix up some now broken tests.
Date Wed, 21 May 2014 01:59:47 GMT
ACCUMULO-2819 Fix up some now broken tests.


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/72265f5f
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/72265f5f
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/72265f5f

Branch: refs/heads/ACCUMULO-378
Commit: 72265f5f4999a7a449ab4222049e755431a02469
Parents: 5124fa3
Author: Josh Elser <elserj@apache.org>
Authored: Sun May 18 14:27:15 2014 -0400
Committer: Josh Elser <elserj@apache.org>
Committed: Sun May 18 14:27:15 2014 -0400

----------------------------------------------------------------------
 .../ReplicationOperationsImplTest.java          | 41 +++++++++++++++++---
 .../test/replication/ReplicationTest.java       |  6 ++-
 .../replication/ReplicationWithMakerTest.java   | 12 +++---
 3 files changed, 46 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/72265f5f/core/src/test/java/org/apache/accumulo/core/replication/ReplicationOperationsImplTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/replication/ReplicationOperationsImplTest.java
b/core/src/test/java/org/apache/accumulo/core/replication/ReplicationOperationsImplTest.java
index 0a8cd92..f0670ee 100644
--- a/core/src/test/java/org/apache/accumulo/core/replication/ReplicationOperationsImplTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/replication/ReplicationOperationsImplTest.java
@@ -17,6 +17,7 @@
 package org.apache.accumulo.core.replication;
 
 import java.util.Arrays;
+import java.util.Map.Entry;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -27,14 +28,16 @@ import org.apache.accumulo.core.client.impl.ReplicationOperationsImpl;
 import org.apache.accumulo.core.client.mock.MockInstance;
 import org.apache.accumulo.core.client.replication.ReplicationTable;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 import org.apache.accumulo.core.protobuf.ProtobufUtil;
 import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
 import org.apache.accumulo.core.replication.proto.Replication.Status;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.hadoop.io.Text;
@@ -340,14 +343,11 @@ public class ReplicationOperationsImplTest {
     Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
 
     BatchWriter bw = conn.createBatchWriter(ReplicationTable.NAME, new BatchWriterConfig());
-
     Mutation m = new Mutation(file1);
     StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
     bw.addMutation(m);
     bw.close();
 
-    // We create a file which is only the replication record, but without a corresponding
log entry
-    // We can do this to fake a WAL that was "added" after we first called drain()
     bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     m = new Mutation(ReplicationSection.getRowPrefix() + file1);
     m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
@@ -355,7 +355,12 @@ public class ReplicationOperationsImplTest {
 
     bw.close();
 
-    final AtomicBoolean done = new AtomicBoolean(false);
+    System.out.println("Reading metadata first time");
+    for (Entry<Key,Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY))
{
+      System.out.println(e.getKey());
+    }
+
+    final AtomicBoolean done = new AtomicBoolean(false), firstRunComplete = new AtomicBoolean(false);
     final AtomicBoolean exception = new AtomicBoolean(false);
     final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(inst, new Credentials("root",
new PasswordToken("")));
     Thread t = new Thread(new Runnable() {
@@ -373,13 +378,37 @@ public class ReplicationOperationsImplTest {
 
     t.start();
 
+    // We need to wait long enough for the table to read once
+    Thread.sleep(2000);
+
+    // Write another file, but also delete the old files
+    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+    m = new Mutation(ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" +
UUID.randomUUID());
+    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
+    bw.addMutation(m);
+    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
+    m.putDelete(ReplicationSection.COLF, tableId1);
+    bw.addMutation(m);
+    bw.close();
+
+    System.out.println("Reading metadata second time");
+    for (Entry<Key,Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY))
{
+      System.out.println(e.getKey());
+    }
+
+    bw = conn.createBatchWriter(ReplicationTable.NAME, new BatchWriterConfig());
+    m = new Mutation(file1);
+    m.putDelete(StatusSection.NAME, tableId1);
+    bw.addMutation(m);
+    bw.close();
+
     try {
       t.join(5000);
     } catch (InterruptedException e) {
       Assert.fail("ReplicationOperatiotns.drain did not complete");
     }
 
-    // We should pass immediately
+    // We should pass immediately because we aren't waiting on both files to be deleted (just
the one that we did)
     Assert.assertTrue(done.get());
   }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/72265f5f/test/src/test/java/org/apache/accumulo/test/replication/ReplicationTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/ReplicationTest.java
b/test/src/test/java/org/apache/accumulo/test/replication/ReplicationTest.java
index 1207400..90bfabf 100644
--- a/test/src/test/java/org/apache/accumulo/test/replication/ReplicationTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/replication/ReplicationTest.java
@@ -63,7 +63,7 @@ public class ReplicationTest extends ConfigurableMacIT {
   @Override
   public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
     hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-    // Run the master work maker infrequently
+    // Run the master replication loop run frequently
     cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "0");
     cfg.setNumTservers(1);
   }
@@ -240,6 +240,7 @@ public class ReplicationTest extends ConfigurableMacIT {
 
     // Verify that we found a single replication record that's for table1
     Scanner s = ReplicationTable.getScanner(conn, new Authorizations());
+    StatusSection.limit(s);
     Iterator<Entry<Key,Value>> iter = s.iterator();
     attempts = 5;
     while (attempts > 0) {
@@ -255,8 +256,8 @@ public class ReplicationTest extends ConfigurableMacIT {
     }
     Assert.assertTrue(iter.hasNext());
     Entry<Key,Value> entry = iter.next();
+    // We should at least find one status record for this table, we might find a second if
another log was started from ingesting the data
     Assert.assertEquals("Expected to find replication entry for " + table1, conn.tableOperations().tableIdMap().get(table1),
entry.getKey().getColumnQualifier().toString());
-    Assert.assertFalse(iter.hasNext());
     s.close();
 
     // Enable replication on table2
@@ -287,6 +288,7 @@ public class ReplicationTest extends ConfigurableMacIT {
 
     // Verify that we found two replication records: one for table1 and one for table2
     s = ReplicationTable.getScanner(conn, new Authorizations());
+    StatusSection.limit(s);
     iter = s.iterator();
     Assert.assertTrue("Found no records in replication table", iter.hasNext());
     entry = iter.next();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/72265f5f/test/src/test/java/org/apache/accumulo/test/replication/ReplicationWithMakerTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/ReplicationWithMakerTest.java
b/test/src/test/java/org/apache/accumulo/test/replication/ReplicationWithMakerTest.java
index 1e8d9a9..aee8a1e 100644
--- a/test/src/test/java/org/apache/accumulo/test/replication/ReplicationWithMakerTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/replication/ReplicationWithMakerTest.java
@@ -27,10 +27,10 @@ import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.protobuf.ProtobufUtil;
 import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
 import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
 import org.apache.accumulo.core.replication.ReplicationTarget;
-import org.apache.accumulo.core.replication.StatusUtil;
 import org.apache.accumulo.core.replication.proto.Replication.Status;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.util.UtilWaitThread;
@@ -74,7 +74,7 @@ public class ReplicationWithMakerTest extends ConfigurableMacIT {
     for (ProcessReference proc : cluster.getProcesses().get(ServerType.GARBAGE_COLLECTOR))
{
       cluster.killProcess(ServerType.GARBAGE_COLLECTOR, proc);
     }
-    
+
     Connector conn = getConnector();
     String table1 = "table1";
 
@@ -144,11 +144,11 @@ public class ReplicationWithMakerTest extends ConfigurableMacIT {
     Entry<Key,Value> entry = null;
     attempts = 5;
     // This record will move from new to new with infinite length because of the minc (flush)
-    Status expectedStatus = StatusUtil.openWithUnknownLength();
     while (null == entry && attempts > 0) {
       try {
         entry = Iterables.getOnlyElement(s);
-        if (!expectedStatus.equals(Status.parseFrom(entry.getValue().get()))) {
+        Status actualStatus = Status.parseFrom(entry.getValue().get());
+        if (!actualStatus.hasClosedTime() || !actualStatus.getClosed()) {
           entry = null;
           // the master process didn't yet fire and write the new mutation, wait for it to
do
           // so and try to read it again
@@ -171,7 +171,9 @@ public class ReplicationWithMakerTest extends ConfigurableMacIT {
     }
 
     Assert.assertNotNull("Could not find expected entry in replication table", entry);
-    Assert.assertEquals("Expected to find a replication entry that is open with infinite
length", expectedStatus, Status.parseFrom(entry.getValue().get()));
+    Status actualStatus = Status.parseFrom(entry.getValue().get());
+    Assert.assertTrue("Expected to find a replication entry that is closed with infinite
length: " + ProtobufUtil.toString(actualStatus),
+        actualStatus.getClosed() && actualStatus.hasClosedTime());
 
     // Try a couple of times to watch for the work record to be created
     boolean notFound = true;


Mime
View raw message