accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject git commit: ACCUMULO-1803 remove classes unneeded for functional test, found SplitRecoverIT was not working as intended
Date Fri, 25 Oct 2013 22:38:11 GMT
Updated Branches:
  refs/heads/master c7b810b55 -> 9e32d86ec


ACCUMULO-1803 remove classes unneeded for functional test, found SplitRecoverIT was not working
as intended


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/9e32d86e
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/9e32d86e
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/9e32d86e

Branch: refs/heads/master
Commit: 9e32d86ecb374bdcd5ec554488afda04e23f8958
Parents: c7b810b
Author: Eric Newton <eric.newton@gmail.com>
Authored: Fri Oct 25 18:38:17 2013 -0400
Committer: Eric Newton <eric.newton@gmail.com>
Committed: Fri Oct 25 18:38:28 2013 -0400

----------------------------------------------------------------------
 .../test/functional/FunctionalTest.java         | 295 -------------------
 .../test/functional/SplitRecoveryTest.java      | 270 -----------------
 .../test/functional/FunctionalTestUtils.java    |   1 +
 .../test/functional/SplitRecoveryIT.java        | 239 ++++++++++++++-
 4 files changed, 239 insertions(+), 566 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/9e32d86e/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTest.java b/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTest.java
deleted file mode 100644
index c9581bb..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTest.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.server.conf.ServerConfiguration;
-import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.JCommander;
-import com.beust.jcommander.Parameter;
-
-public abstract class FunctionalTest {
-  
-  public static Map<String,String> parseConfig(String... perTableConfigs) {
-    
-    TreeMap<String,String> config = new TreeMap<String,String>();
-    
-    for (String line : perTableConfigs) {
-      String[] splitLine = line.split("=");
-      if (splitLine.length == 1 && line.endsWith("="))
-        config.put(splitLine[0], "");
-      else
-        config.put(splitLine[0], splitLine[1]);
-    }
-    
-    return config;
-    
-  }
-  
-  public static class TableSetup {
-    private String tableName;
-    private Map<String,String> perTableConfigs;
-    private SortedSet<Text> splitPoints;
-    
-    public TableSetup(String tableName) {
-      this.tableName = tableName;
-    }
-    
-    public TableSetup(String tableName, Map<String,String> perTableConfigs) {
-      this.tableName = tableName;
-      this.perTableConfigs = perTableConfigs;
-    }
-    
-    public TableSetup(String tableName, Map<String,String> perTableConfigs, SortedSet<Text>
splitPoints) {
-      this.tableName = tableName;
-      this.perTableConfigs = perTableConfigs;
-      this.splitPoints = splitPoints;
-    }
-    
-    public TableSetup(String tableName, SortedSet<Text> splitPoints) {
-      this.tableName = tableName;
-      this.splitPoints = splitPoints;
-    }
-    
-    public TableSetup(String tableName, String... splitPoints) {
-      this.tableName = tableName;
-      
-      this.splitPoints = new TreeSet<Text>();
-      for (String split : splitPoints) {
-        this.splitPoints.add(new Text(split));
-      }
-    }
-    
-  }
-  
-  private AuthenticationToken token = null;
-  private String instanceName = "";
-  private String principal = "";
-  
-  protected void setPrincipal(String princ) {
-    this.principal = princ;
-  }
-  
-  protected String getPrincipal() {
-    return principal;
-  }
-  
-  protected void setToken(AuthenticationToken token) {
-    this.token = token;
-  }
-  
-  protected AuthenticationToken getToken() {
-    return token;
-  }
-  
-  protected Connector getConnector() throws AccumuloException, AccumuloSecurityException
{
-    return getInstance().getConnector(getPrincipal(), getToken());
-  }
-  
-  protected Instance getInstance() {
-    return new ZooKeeperInstance(getInstanceName(), ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_ZK_HOST));
-  }
-  
-  protected void setInstanceName(String instanceName) {
-    this.instanceName = instanceName;
-  }
-  
-  private String getInstanceName() {
-    return instanceName;
-  }
-  
-  public abstract Map<String,String> getInitialConfig();
-  
-  public abstract List<TableSetup> getTablesToCreate();
-  
-  public abstract void run() throws Exception;
-  
-  public abstract void cleanup() throws Exception;
-  
-  public void setup() throws Exception {
-    Connector conn = getConnector();
-    
-    List<TableSetup> ttcl = getTablesToCreate();
-    
-    for (TableSetup tableSetup : ttcl) {
-      if (tableSetup.splitPoints != null) {
-        conn.tableOperations().create(tableSetup.tableName);
-        conn.tableOperations().addSplits(tableSetup.tableName, tableSetup.splitPoints);
-      } else {
-        conn.tableOperations().create(tableSetup.tableName);
-      }
-      
-      if (tableSetup.perTableConfigs != null) {
-        for (Entry<String,String> entry : tableSetup.perTableConfigs.entrySet()) {
-          conn.tableOperations().setProperty(tableSetup.tableName, entry.getKey(), entry.getValue());
-        }
-      }
-    }
-  }
-  
-  /**
-   * A utility method for use by functional test that ensures a tables has between min and
max split points inclusive. If not an exception is thrown.
-   * 
-   */
-  
-  protected void checkSplits(String table, int min, int max) throws Exception {
-    Collection<Text> splits = getConnector().tableOperations().listSplits(table);
-    if (splits.size() < min || splits.size() > max) {
-      throw new Exception("# of table splits points out of range, #splits=" + splits.size()
+ " table=" + table + " min=" + min + " max=" + max);
-    }
-  }
-  
-  /**
-   * A utility function that checks that each tablet has an expected number of rfiles.
-   * 
-   */
-  
-  protected void checkRFiles(String tableName, int minTablets, int maxTablets, int minRFiles,
int maxRFiles) throws Exception {
-    Scanner scanner = getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    String tableId = Tables.getNameToIdMap(getInstance()).get(tableName);
-    scanner.setRange(new Range(new Text(tableId + ";"), true, new Text(tableId + "<"),
true));
-    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
-    
-    HashMap<Text,Integer> tabletFileCounts = new HashMap<Text,Integer>();
-    
-    for (Entry<Key,Value> entry : scanner) {
-      
-      Text row = entry.getKey().getRow();
-      
-      Integer count = tabletFileCounts.get(row);
-      if (count == null)
-        count = 0;
-      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
-        count = count + 1;
-      }
-      
-      tabletFileCounts.put(row, count);
-    }
-    
-    if (tabletFileCounts.size() < minTablets || tabletFileCounts.size() > maxTablets)
{
-      throw new Exception("Did not find expected number of tablets " + tabletFileCounts.size());
-    }
-    
-    Set<Entry<Text,Integer>> es = tabletFileCounts.entrySet();
-    for (Entry<Text,Integer> entry : es) {
-      if (entry.getValue() > maxRFiles || entry.getValue() < minRFiles) {
-        throw new Exception("tablet " + entry.getKey() + " has " + entry.getValue() + " map
files");
-      }
-    }
-  }
-  
-  protected void bulkImport(FileSystem fs, String table, String dir) throws Exception {
-    String failDir = dir + "_failures";
-    Path failPath = new Path(failDir);
-    fs.delete(failPath, true);
-    fs.mkdirs(failPath);
-    
-    getConnector().tableOperations().importDirectory(table, dir, failDir, false);
-    
-    if (fs.listStatus(failPath).length > 0) {
-      throw new Exception("Some files failed to bulk import");
-    }
-    
-  }
-  
-  static class Opts extends ClientOpts {
-    @Parameter(names = "--classname", required = true, description = "name of the class under
test")
-    String classname = null;
-    
-    @Parameter(names = "--opt", required = true, description = "the options for test")
-    String opt = null;
-  }
-  
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(FunctionalTest.class.getName(), args);
-    
-    Class<? extends FunctionalTest> testClass = AccumuloVFSClassLoader.loadClass(opts.classname,
FunctionalTest.class);
-    FunctionalTest fTest = testClass.newInstance();
-    
-    // fTest.setMaster(master);
-    fTest.setInstanceName(opts.instance);
-    fTest.setPrincipal(opts.principal);
-    fTest.setToken(opts.getToken());
-    
-    if (opts.opt.equals("getConfig")) {
-      Map<String,String> iconfig = fTest.getInitialConfig();
-      System.out.println("{");
-      for (Entry<String,String> entry : iconfig.entrySet()) {
-        System.out.println("'" + entry.getKey() + "':'" + entry.getValue() + "',");
-      }
-      System.out.println("}");
-    } else if (opts.opt.equals("setup")) {
-      fTest.setup();
-    } else if (opts.opt.equals("run")) {
-      fTest.run();
-    } else if (opts.opt.equals("cleanup")) {
-      fTest.cleanup();
-    } else {
-      printHelpAndExit("Unknown option: " + opts.opt);
-    }
-    
-  }
-  
-  static void printHelpAndExit(String message) {
-    System.out.println(message);
-    new JCommander(new Opts()).usage();
-    System.exit(1);
-  }
-  
-  static Mutation nm(String row, String cf, String cq, Value value) {
-    Mutation m = new Mutation(new Text(row));
-    m.put(new Text(cf), new Text(cq), value);
-    return m;
-  }
-  
-  static Mutation nm(String row, String cf, String cq, String value) {
-    return nm(row, cf, cq, new Value(value.getBytes()));
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9e32d86e/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
deleted file mode 100644
index 5eed38f..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.ScannerImpl;
-import org.apache.accumulo.core.client.impl.Writer;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.rfile.RFile;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
-import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.fs.FileRef;
-import org.apache.accumulo.server.master.state.Assignment;
-import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.security.SystemCredentials;
-import org.apache.accumulo.server.tabletserver.TabletServer;
-import org.apache.accumulo.server.tabletserver.TabletTime;
-import org.apache.accumulo.server.util.FileUtil;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.accumulo.server.zookeeper.TransactionWatcher;
-import org.apache.accumulo.server.zookeeper.ZooLock;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.hadoop.io.Text;
-
-public class SplitRecoveryTest extends FunctionalTest {
-  
-  @Override
-  public void cleanup() throws Exception {
-    
-  }
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.emptyList();
-  }
-  
-  private KeyExtent nke(String table, String endRow, String prevEndRow) {
-    return new KeyExtent(new Text(table), endRow == null ? null : new Text(endRow), prevEndRow
== null ? null : new Text(prevEndRow));
-  }
-  
-  @Override
-  public void run() throws Exception {
-    String zPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + "/testLock";
-    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    zoo.putPersistentData(zPath, "".getBytes(), NodeExistsPolicy.OVERWRITE);
-    ZooLock zl = new ZooLock(zPath);
-    boolean gotLock = zl.tryLock(new LockWatcher() {
-      
-      @Override
-      public void lostLock(LockLossReason reason) {
-        System.exit(-1);
-        
-      }
-      
-      @Override
-      public void unableToMonitorLockNode(Throwable e) {
-        System.exit(-1);
-      }
-    }, "foo".getBytes());
-    
-    if (!gotLock) {
-      System.err.println("Failed to get lock " + zPath);
-    }
-    
-    // run test for a table with one tablet
-    runSplitRecoveryTest(0, "sp", 0, zl, nke("foo0", null, null));
-    runSplitRecoveryTest(1, "sp", 0, zl, nke("foo1", null, null));
-    
-    // run test for tables with two tablets, run test on first and last tablet
-    runSplitRecoveryTest(0, "k", 0, zl, nke("foo2", "m", null), nke("foo2", null, "m"));
-    runSplitRecoveryTest(1, "k", 0, zl, nke("foo3", "m", null), nke("foo3", null, "m"));
-    runSplitRecoveryTest(0, "o", 1, zl, nke("foo4", "m", null), nke("foo4", null, "m"));
-    runSplitRecoveryTest(1, "o", 1, zl, nke("foo5", "m", null), nke("foo5", null, "m"));
-    
-    // run test for table w/ three tablets, run test on middle tablet
-    runSplitRecoveryTest(0, "o", 1, zl, nke("foo6", "m", null), nke("foo6", "r", "m"), nke("foo6",
null, "r"));
-    runSplitRecoveryTest(1, "o", 1, zl, nke("foo7", "m", null), nke("foo7", "r", "m"), nke("foo7",
null, "r"));
-    
-    // run test for table w/ three tablets, run test on first
-    runSplitRecoveryTest(0, "g", 0, zl, nke("foo8", "m", null), nke("foo8", "r", "m"), nke("foo8",
null, "r"));
-    runSplitRecoveryTest(1, "g", 0, zl, nke("foo9", "m", null), nke("foo9", "r", "m"), nke("foo9",
null, "r"));
-    
-    // run test for table w/ three tablets, run test on last tablet
-    runSplitRecoveryTest(0, "w", 2, zl, nke("fooa", "m", null), nke("fooa", "r", "m"), nke("fooa",
null, "r"));
-    runSplitRecoveryTest(1, "w", 2, zl, nke("foob", "m", null), nke("foob", "r", "m"), nke("foob",
null, "r"));
-  }
-  
-  private void runSplitRecoveryTest(int failPoint, String mr, int extentToSplit, ZooLock
zl, KeyExtent... extents) throws Exception {
-    
-    Text midRow = new Text(mr);
-    
-    SortedMap<FileRef,DataFileValue> splitMapFiles = null;
-    
-    for (int i = 0; i < extents.length; i++) {
-      KeyExtent extent = extents[i];
-      
-      String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId().toString()
+ "/dir_" + i;
-      MetadataTableUtil.addTablet(extent, tdir, SystemCredentials.get(), TabletTime.LOGICAL_TIME_ID,
zl);
-      SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<FileRef,DataFileValue>();
-      mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017
+ i, 10000 + i));
-      
-      if (i == extentToSplit) {
-        splitMapFiles = mapFiles;
-      }
-      int tid = 0;
-      TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
-      MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", SystemCredentials.get(),
zl);
-    }
-    
-    KeyExtent extent = extents[extentToSplit];
-    
-    KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow);
-    KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow());
-    
-    splitPartiallyAndRecover(extent, high, low, .4, splitMapFiles, midRow, "localhost:1234",
failPoint, zl);
-  }
-  
-  private void splitPartiallyAndRecover(KeyExtent extent, KeyExtent high, KeyExtent low,
double splitRatio, SortedMap<FileRef,DataFileValue> mapFiles,
-      Text midRow, String location, int steps, ZooLock zl) throws Exception {
-    
-    SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>();
-    SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
-    List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
-    
-    MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(),
mapFiles, lowDatafileSizes,
-        highDatafileSizes, highDatafilesToRemove);
-    
-    MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SystemCredentials.get(),
zl);
-    TServerInstance instance = new TServerInstance(location, zl.getSessionId());
-    Writer writer = new Writer(HdfsZooInstance.getInstance(), SystemCredentials.get(), MetadataTable.ID);
-    Assignment assignment = new Assignment(high, instance);
-    Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
-    m.put(TabletsSection.FutureLocationColumnFamily.NAME, assignment.server.asColumnQualifier(),
assignment.server.asMutationValue());
-    writer.update(m);
-    
-    if (steps >= 1) {
-      Map<FileRef,Long> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(),
extent);
-      MetadataTableUtil.addNewTablet(low, "/lowDir", instance, lowDatafileSizes, bulkFiles,
SystemCredentials.get(), TabletTime.LOGICAL_TIME_ID + "0", -1l,
-          -1l, zl);
-    }
-    if (steps >= 2)
-      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SystemCredentials.get(),
zl);
-    
-    TabletServer.verifyTabletInformation(high, instance, null, "127.0.0.1:0", zl);
-    
-    if (steps >= 1) {
-      ensureTabletHasNoUnexpectedMetadataEntries(low, lowDatafileSizes);
-      ensureTabletHasNoUnexpectedMetadataEntries(high, highDatafileSizes);
-      
-      Map<FileRef,Long> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(),
low);
-      Map<FileRef,Long> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(),
high);
-      
-      if (!lowBulkFiles.equals(highBulkFiles)) {
-        throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " "
+ high);
-      }
-      
-      if (lowBulkFiles.size() == 0) {
-        throw new Exception(" no bulk files " + low);
-      }
-    } else {
-      ensureTabletHasNoUnexpectedMetadataEntries(extent, mapFiles);
-    }
-  }
-  
-  private void ensureTabletHasNoUnexpectedMetadataEntries(KeyExtent extent, SortedMap<FileRef,DataFileValue>
expectedMapFiles) throws Exception {
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get(),
MetadataTable.ID, Authorizations.EMPTY);
-    scanner.setRange(extent.toMetadataRange());
-    
-    HashSet<ColumnFQ> expectedColumns = new HashSet<ColumnFQ>();
-    expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
-    expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
-    expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
-    expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
-    
-    HashSet<Text> expectedColumnFamilies = new HashSet<Text>();
-    expectedColumnFamilies.add(DataFileColumnFamily.NAME);
-    expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
-    expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
-    expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
-    expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
-    
-    Iterator<Entry<Key,Value>> iter = scanner.iterator();
-    while (iter.hasNext()) {
-      Key key = iter.next().getKey();
-      
-      if (!key.getRow().equals(extent.getMetadataEntry())) {
-        throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME
+ " entry " + key);
-      }
-      
-      if (expectedColumnFamilies.contains(key.getColumnFamily())) {
-        continue;
-      }
-      
-      if (expectedColumns.remove(new ColumnFQ(key))) {
-        continue;
-      }
-      
-      throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME
+ " entry " + key);
-    }
-    System.out.println("expectedColumns " + expectedColumns);
-    if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
-      throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
-    }
-    
-    SortedMap<FileRef,DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent,
SystemCredentials.get());
-    verifySame(expectedMapFiles, fixedMapFiles);
-  }
-  
-  private void verifySame(SortedMap<FileRef,DataFileValue> datafileSizes, SortedMap<FileRef,DataFileValue>
fixedDatafileSizes) throws Exception {
-    
-    if (!datafileSizes.keySet().containsAll(fixedDatafileSizes.keySet()) || !fixedDatafileSizes.keySet().containsAll(datafileSizes.keySet()))
{
-      throw new Exception("Key sets not the same " + datafileSizes.keySet() + " !=  " + fixedDatafileSizes.keySet());
-    }
-    
-    for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
-      DataFileValue dfv = entry.getValue();
-      DataFileValue otherDfv = fixedDatafileSizes.get(entry.getKey());
-      
-      if (!dfv.equals(otherDfv)) {
-        throw new Exception(entry.getKey() + " dfv not equal  " + dfv + "  " + otherDfv);
-      }
-    }
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9e32d86e/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
index 8fd50f5..8644440 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 
 public class FunctionalTestUtils {
+  
   static void checkRFiles(Connector c, String tableName, int minTablets, int maxTablets,
int minRFiles, int maxRFiles) throws Exception {
     Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     String tableId = c.tableOperations().tableIdMap().get(tableName);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/9e32d86e/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
index 4d97886..e976493 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
@@ -18,13 +18,250 @@ package org.apache.accumulo.test.functional;
 
 import static org.junit.Assert.assertEquals;
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.impl.ScannerImpl;
+import org.apache.accumulo.core.client.impl.Writer;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.ColumnFQ;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
+import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.master.state.Assignment;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.security.SystemCredentials;
+import org.apache.accumulo.server.tabletserver.TabletServer;
+import org.apache.accumulo.server.tabletserver.TabletTime;
+import org.apache.accumulo.server.util.FileUtil;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.server.zookeeper.TransactionWatcher;
+import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
 public class SplitRecoveryIT extends ConfigurableMacIT {
+  
+  
+  private KeyExtent nke(String table, String endRow, String prevEndRow) {
+    return new KeyExtent(new Text(table), endRow == null ? null : new Text(endRow), prevEndRow
== null ? null : new Text(prevEndRow));
+  }
+  
+  private void run() throws Exception {
+    String zPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + "/testLock";
+    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
+    zoo.putPersistentData(zPath, "".getBytes(), NodeExistsPolicy.OVERWRITE);
+    ZooLock zl = new ZooLock(zPath);
+    boolean gotLock = zl.tryLock(new LockWatcher() {
+      
+      @Override
+      public void lostLock(LockLossReason reason) {
+        System.exit(-1);
+        
+      }
+      
+      @Override
+      public void unableToMonitorLockNode(Throwable e) {
+        System.exit(-1);
+      }
+    }, "foo".getBytes());
+    
+    if (!gotLock) {
+      System.err.println("Failed to get lock " + zPath);
+    }
+    
+    // run test for a table with one tablet
+    runSplitRecoveryTest(0, "sp", 0, zl, nke("foo0", null, null));
+    runSplitRecoveryTest(1, "sp", 0, zl, nke("foo1", null, null));
+    
+    // run test for tables with two tablets, run test on first and last tablet
+    runSplitRecoveryTest(0, "k", 0, zl, nke("foo2", "m", null), nke("foo2", null, "m"));
+    runSplitRecoveryTest(1, "k", 0, zl, nke("foo3", "m", null), nke("foo3", null, "m"));
+    runSplitRecoveryTest(0, "o", 1, zl, nke("foo4", "m", null), nke("foo4", null, "m"));
+    runSplitRecoveryTest(1, "o", 1, zl, nke("foo5", "m", null), nke("foo5", null, "m"));
+    
+    // run test for table w/ three tablets, run test on middle tablet
+    runSplitRecoveryTest(0, "o", 1, zl, nke("foo6", "m", null), nke("foo6", "r", "m"), nke("foo6",
null, "r"));
+    runSplitRecoveryTest(1, "o", 1, zl, nke("foo7", "m", null), nke("foo7", "r", "m"), nke("foo7",
null, "r"));
+    
+    // run test for table w/ three tablets, run test on first
+    runSplitRecoveryTest(0, "g", 0, zl, nke("foo8", "m", null), nke("foo8", "r", "m"), nke("foo8",
null, "r"));
+    runSplitRecoveryTest(1, "g", 0, zl, nke("foo9", "m", null), nke("foo9", "r", "m"), nke("foo9",
null, "r"));
+    
+    // run test for table w/ three tablets, run test on last tablet
+    runSplitRecoveryTest(0, "w", 2, zl, nke("fooa", "m", null), nke("fooa", "r", "m"), nke("fooa",
null, "r"));
+    runSplitRecoveryTest(1, "w", 2, zl, nke("foob", "m", null), nke("foob", "r", "m"), nke("foob",
null, "r"));
+  }
+  
+  private void runSplitRecoveryTest(int failPoint, String mr, int extentToSplit, ZooLock
zl, KeyExtent... extents) throws Exception {
+    
+    Text midRow = new Text(mr);
+    
+    SortedMap<FileRef,DataFileValue> splitMapFiles = null;
+    
+    for (int i = 0; i < extents.length; i++) {
+      KeyExtent extent = extents[i];
+      
+      String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId().toString()
+ "/dir_" + i;
+      MetadataTableUtil.addTablet(extent, tdir, SystemCredentials.get(), TabletTime.LOGICAL_TIME_ID,
zl);
+      SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<FileRef,DataFileValue>();
+      mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017
+ i, 10000 + i));
+      
+      if (i == extentToSplit) {
+        splitMapFiles = mapFiles;
+      }
+      int tid = 0;
+      TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
+      MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", SystemCredentials.get(),
zl);
+    }
+    
+    KeyExtent extent = extents[extentToSplit];
+    
+    KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow);
+    KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow());
+    
+    splitPartiallyAndRecover(extent, high, low, .4, splitMapFiles, midRow, "localhost:1234",
failPoint, zl);
+  }
+  
+  private void splitPartiallyAndRecover(KeyExtent extent, KeyExtent high, KeyExtent low,
double splitRatio, SortedMap<FileRef,DataFileValue> mapFiles,
+      Text midRow, String location, int steps, ZooLock zl) throws Exception {
+    
+    SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>();
+    SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
+    List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
+    
+    MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(),
mapFiles, lowDatafileSizes,
+        highDatafileSizes, highDatafilesToRemove);
+    
+    MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SystemCredentials.get(),
zl);
+    TServerInstance instance = new TServerInstance(location, zl.getSessionId());
+    Writer writer = new Writer(HdfsZooInstance.getInstance(), SystemCredentials.get(), MetadataTable.ID);
+    Assignment assignment = new Assignment(high, instance);
+    Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
+    m.put(TabletsSection.FutureLocationColumnFamily.NAME, assignment.server.asColumnQualifier(),
assignment.server.asMutationValue());
+    writer.update(m);
+    
+    if (steps >= 1) {
+      Map<FileRef,Long> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(),
extent);
+      MetadataTableUtil.addNewTablet(low, "/lowDir", instance, lowDatafileSizes, bulkFiles,
SystemCredentials.get(), TabletTime.LOGICAL_TIME_ID + "0", -1l,
+          -1l, zl);
+    }
+    if (steps >= 2)
+      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SystemCredentials.get(),
zl);
+    
+    TabletServer.verifyTabletInformation(high, instance, null, "127.0.0.1:0", zl);
+    
+    if (steps >= 1) {
+      ensureTabletHasNoUnexpectedMetadataEntries(low, lowDatafileSizes);
+      ensureTabletHasNoUnexpectedMetadataEntries(high, highDatafileSizes);
+      
+      Map<FileRef,Long> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(),
low);
+      Map<FileRef,Long> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(),
high);
+      
+      if (!lowBulkFiles.equals(highBulkFiles)) {
+        throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " "
+ high);
+      }
+      
+      if (lowBulkFiles.size() == 0) {
+        throw new Exception(" no bulk files " + low);
+      }
+    } else {
+      ensureTabletHasNoUnexpectedMetadataEntries(extent, mapFiles);
+    }
+  }
+  
+  private void ensureTabletHasNoUnexpectedMetadataEntries(KeyExtent extent, SortedMap<FileRef,DataFileValue>
expectedMapFiles) throws Exception {
+    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get(),
MetadataTable.ID, Authorizations.EMPTY);
+    scanner.setRange(extent.toMetadataRange());
+    
+    HashSet<ColumnFQ> expectedColumns = new HashSet<ColumnFQ>();
+    expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
+    expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
+    expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
+    expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
+    
+    HashSet<Text> expectedColumnFamilies = new HashSet<Text>();
+    expectedColumnFamilies.add(DataFileColumnFamily.NAME);
+    expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
+    expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
+    expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
+    expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
+    
+    Iterator<Entry<Key,Value>> iter = scanner.iterator();
+    while (iter.hasNext()) {
+      Key key = iter.next().getKey();
+      
+      if (!key.getRow().equals(extent.getMetadataEntry())) {
+        throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME
+ " entry " + key);
+      }
+      
+      if (expectedColumnFamilies.contains(key.getColumnFamily())) {
+        continue;
+      }
+      
+      if (expectedColumns.remove(new ColumnFQ(key))) {
+        continue;
+      }
+      
+      throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME
+ " entry " + key);
+    }
+    System.out.println("expectedColumns " + expectedColumns);
+    if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
+      throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
+    }
+    
+    SortedMap<FileRef,DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent,
SystemCredentials.get());
+    verifySame(expectedMapFiles, fixedMapFiles);
+  }
+  
+  private void verifySame(SortedMap<FileRef,DataFileValue> datafileSizes, SortedMap<FileRef,DataFileValue>
fixedDatafileSizes) throws Exception {
+    
+    if (!datafileSizes.keySet().containsAll(fixedDatafileSizes.keySet()) || !fixedDatafileSizes.keySet().containsAll(datafileSizes.keySet()))
{
+      throw new Exception("Key sets not the same " + datafileSizes.keySet() + " !=  " + fixedDatafileSizes.keySet());
+    }
+    
+    for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
+      DataFileValue dfv = entry.getValue();
+      DataFileValue otherDfv = fixedDatafileSizes.get(entry.getKey());
+      
+      if (!dfv.equals(otherDfv)) {
+        throw new Exception(entry.getKey() + " dfv not equal  " + dfv + "  " + otherDfv);
+      }
+    }
+  }
 
+  
+  public static void main(String[] args) throws Exception {
+    new SplitRecoveryIT().run();
+  }
+  
   @Test(timeout = 10 * 1000)
   public void test() throws Exception {
-    assertEquals(0, exec(SplitRecoveryTest.class).waitFor());
+    assertEquals(0, exec(SplitRecoveryIT.class).waitFor());
   }
 
 }


Mime
View raw message