accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [27/53] [abbrv] ACCUMULO-658 Move tests and resources to correct modules
Date Fri, 06 Sep 2013 18:22:55 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
----------------------------------------------------------------------
diff --git a/server/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java b/server/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
deleted file mode 100644
index 499b6bd..0000000
--- a/server/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.metanalysis;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.tserver.logger.LogFileValue;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Looks up and prints mutations indexed by IndexMeta
- */
-public class PrintEvents {
-  
-  static class Opts extends ClientOpts {
-    @Parameter(names = {"-t", "--tableId"}, description = "table id", required = true)
-    String tableId;
-    @Parameter(names = {"-e", "--endRow"}, description = "end row")
-    String endRow;
-    @Parameter(names = {"-t", "--time"}, description = "time, in milliseconds", required = true)
-    long time;
-  }
-  
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(PrintEvents.class.getName(), args);
-    
-    Connector conn = opts.getConnector();
-    
-    printEvents(conn, opts.tableId, opts.endRow, opts.time);
-  }
-  
-  private static void printEvents(Connector conn, String tableId, String endRow, Long time) throws Exception {
-    Scanner scanner = conn.createScanner("tabletEvents", new Authorizations());
-    String metaRow = tableId + (endRow == null ? "<" : ";" + endRow);
-    scanner.setRange(new Range(new Key(metaRow, String.format("%020d", time)), true, new Key(metaRow).followingKey(PartialKey.ROW), false));
-    int count = 0;
-    
-    String lastLog = null;
-    
-    loop1: for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnQualifier().toString().equals("log")) {
-        if (lastLog == null || !lastLog.equals(entry.getValue().toString()))
-          System.out.println("Log : " + entry.getValue());
-        lastLog = entry.getValue().toString();
-      } else if (entry.getKey().getColumnQualifier().toString().equals("mut")) {
-        DataInputStream dis = new DataInputStream(new ByteArrayInputStream(entry.getValue().get()));
-        Mutation m = new Mutation();
-        m.readFields(dis);
-        
-        LogFileValue lfv = new LogFileValue();
-        lfv.mutations = Collections.singletonList(m);
-        
-        System.out.println(LogFileValue.format(lfv, 1));
-        
-        List<ColumnUpdate> columnsUpdates = m.getUpdates();
-        for (ColumnUpdate cu : columnsUpdates) {
-          if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && count > 0) {
-            System.out.println("Saw change to prevrow, stopping printing events.");
-            break loop1;
-          }
-        }
-        count++;
-      }
-    }
-    
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/main/java/org/apache/accumulo/server/metanalysis/package-info.java
----------------------------------------------------------------------
diff --git a/server/server/src/main/java/org/apache/accumulo/server/metanalysis/package-info.java b/server/server/src/main/java/org/apache/accumulo/server/metanalysis/package-info.java
deleted file mode 100644
index 4d404ed..0000000
--- a/server/server/src/main/java/org/apache/accumulo/server/metanalysis/package-info.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Provides programs to analyze metadata mutations written to write ahead logs.  
- * 
- * <p>
- * These programs can be used when write ahead logs are archived.   The best way to find
- * which write ahead logs contain metadata mutations is to grep the tablet server logs.  
- * Grep for events where walogs were added to metadata tablets, then take the unique set 
- * of walogs.
- *
- * <p>
- * To use these programs, use IndexMeta to index the metadata mutations in walogs into 
- * Accumulo tables.  Then use FindTable and PrintEvents to analyze those indexes.  
- * FilterMetaiallows filtering walogs down to just metadata events.  This is useful for the
- * case where the walogs need to be exported from the cluster for analysis.
- *
- * @since 1.5
- */
-package org.apache.accumulo.server.metanalysis;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
----------------------------------------------------------------------
diff --git a/server/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java b/server/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
deleted file mode 100644
index d7837b8..0000000
--- a/server/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.util;
-
-import java.util.Iterator;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Credentials;
-import org.apache.accumulo.master.LiveTServerSet;
-import org.apache.accumulo.master.LiveTServerSet.Listener;
-import org.apache.accumulo.master.state.DistributedStoreException;
-import org.apache.accumulo.master.state.MetaDataTableScanner;
-import org.apache.accumulo.master.state.TServerInstance;
-import org.apache.accumulo.master.state.TabletLocationState;
-import org.apache.accumulo.master.state.TabletState;
-import org.apache.accumulo.master.state.ZooTabletStateStore;
-import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.server.security.SystemCredentials;
-import org.apache.accumulo.server.tables.TableManager;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-
-public class FindOfflineTablets {
-  private static final Logger log = Logger.getLogger(FindOfflineTablets.class);
-
-  /**
-   * @param args
-   */
-  public static void main(String[] args) throws Exception {
-    ClientOpts opts = new ClientOpts();
-    opts.parseArgs(FindOfflineTablets.class.getName(), args);
-    Instance instance = opts.getInstance();
-    SystemCredentials creds = SystemCredentials.get();
-
-    findOffline(instance, creds, null);
-  }
-
-  static int findOffline(Instance instance, Credentials creds, String tableName) throws AccumuloException, TableNotFoundException {
-
-    final AtomicBoolean scanning = new AtomicBoolean(false);
-
-    LiveTServerSet tservers = new LiveTServerSet(instance, DefaultConfiguration.getDefaultConfiguration(), new Listener() {
-      @Override
-      public void update(LiveTServerSet current, Set<TServerInstance> deleted, Set<TServerInstance> added) {
-        if (!deleted.isEmpty() && scanning.get())
-          log.warn("Tablet servers deleted while scanning: " + deleted);
-        if (!added.isEmpty() && scanning.get())
-          log.warn("Tablet servers added while scanning: " + added);
-      }
-    });
-    tservers.startListeningForTabletServerChanges();
-    scanning.set(true);
-
-    Iterator<TabletLocationState> zooScanner;
-    try {
-      zooScanner = new ZooTabletStateStore().iterator();
-    } catch (DistributedStoreException e) {
-      throw new AccumuloException(e);
-    }
-
-    int offline = 0;
-
-    System.out.println("Scanning zookeeper");
-    if ((offline = checkTablets(zooScanner, tservers)) > 0)
-      return offline;
-
-    if (RootTable.NAME.equals(tableName))
-      return 0;
-
-    System.out.println("Scanning " + RootTable.NAME);
-    Iterator<TabletLocationState> rootScanner = new MetaDataTableScanner(instance, creds, MetadataSchema.TabletsSection.getRange(), RootTable.NAME);
-    if ((offline = checkTablets(rootScanner, tservers)) > 0)
-      return offline;
-
-    if (MetadataTable.NAME.equals(tableName))
-      return 0;
-
-    System.out.println("Scanning " + MetadataTable.NAME);
-
-    Range range = MetadataSchema.TabletsSection.getRange();
-    if (tableName != null) {
-      String tableId = Tables.getTableId(instance, tableName);
-      range = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
-    }
-
-    Iterator<TabletLocationState> metaScanner = new MetaDataTableScanner(instance, creds, range, MetadataTable.NAME);
-    return checkTablets(metaScanner, tservers);
-  }
-
-  private static int checkTablets(Iterator<TabletLocationState> scanner, LiveTServerSet tservers) {
-    int offline = 0;
-
-    while (scanner.hasNext()) {
-      TabletLocationState locationState = scanner.next();
-      TabletState state = locationState.getState(tservers.getCurrentServers());
-      if (state != null && state != TabletState.HOSTED
-          && TableManager.getInstance().getTableState(locationState.extent.getTableId().toString()) != TableState.OFFLINE) {
-        System.out.println(locationState + " is " + state + "  #walogs:" + locationState.walogs.size());
-        offline++;
-      }
-    }
-
-    return offline;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
----------------------------------------------------------------------
diff --git a/server/server/src/main/java/org/apache/accumulo/server/util/Initialize.java b/server/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
deleted file mode 100644
index 0eb6d36..0000000
--- a/server/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
+++ /dev/null
@@ -1,522 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.util;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map.Entry;
-import java.util.UUID;
-
-import jline.console.ConsoleReader;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.cli.Help;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.conf.SiteConfiguration;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVWriter;
-import org.apache.accumulo.core.iterators.user.VersioningIterator;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.master.thrift.MasterGoalState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
-import org.apache.accumulo.core.security.SecurityUtil;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfiguration;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SystemCredentials;
-import org.apache.accumulo.server.tables.TableManager;
-import org.apache.accumulo.server.tabletserver.TabletTime;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.tserver.constraints.MetadataConstraints;
-import org.apache.accumulo.tserver.iterators.MetadataBulkLoadFilter;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZooDefs.Ids;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * This class is used to setup the directory structure and the root tablet to get an instance started
- * 
- */
-public class Initialize {
-  private static final Logger log = Logger.getLogger(Initialize.class);
-  private static final String DEFAULT_ROOT_USER = "root";
-  public static final String TABLE_TABLETS_TABLET_DIR = "/table_info";
-  
-  private static ConsoleReader reader = null;
-  
-  private static ConsoleReader getConsoleReader() throws IOException {
-    if (reader == null)
-      reader = new ConsoleReader();
-    return reader;
-  }
-  
-  private static HashMap<String,String> initialMetadataConf = new HashMap<String,String>();
-  static {
-    initialMetadataConf.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "32K");
-    initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), "5");
-    initialMetadataConf.put(Property.TABLE_WALOG_ENABLED.getKey(), "true");
-    initialMetadataConf.put(Property.TABLE_MAJC_RATIO.getKey(), "1");
-    initialMetadataConf.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "64M");
-    initialMetadataConf.put(Property.TABLE_CONSTRAINT_PREFIX.getKey() + "1", MetadataConstraints.class.getName());
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers", "10," + VersioningIterator.class.getName());
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers.opt.maxVersions", "1");
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers", "10," + VersioningIterator.class.getName());
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers.opt.maxVersions", "1");
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers", "10," + VersioningIterator.class.getName());
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers.opt.maxVersions", "1");
-    initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.bulkLoadFilter", "20," + MetadataBulkLoadFilter.class.getName());
-    initialMetadataConf.put(Property.TABLE_FAILURES_IGNORE.getKey(), "false");
-    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "tablet",
-        String.format("%s,%s", TabletsSection.TabletColumnFamily.NAME, TabletsSection.CurrentLocationColumnFamily.NAME));
-    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server", String.format("%s,%s,%s,%s", DataFileColumnFamily.NAME,
-        LogColumnFamily.NAME, TabletsSection.ServerColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME));
-    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "tablet,server");
-    initialMetadataConf.put(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "");
-    initialMetadataConf.put(Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true");
-    initialMetadataConf.put(Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "true");
-  }
-  
-  public static boolean doInit(Opts opts, Configuration conf, VolumeManager fs) throws IOException {
-    if (!ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_DFS_URI).equals(""))
-      log.info("Hadoop Filesystem is " + ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_DFS_URI));
-    else
-      log.info("Hadoop Filesystem is " + FileSystem.getDefaultUri(conf));
-    
-    log.info("Accumulo data dirs are " + Arrays.asList(ServerConstants.getBaseDirs()));
-    log.info("Zookeeper server is " + ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_ZK_HOST));
-    log.info("Checking if Zookeeper is available. If this hangs, then you need to make sure zookeeper is running");
-    if (!zookeeperAvailable()) {
-      log.fatal("Zookeeper needs to be up and running in order to init. Exiting ...");
-      return false;
-    }
-    if (ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_SECRET).equals(Property.INSTANCE_SECRET.getDefaultValue())) {
-      ConsoleReader c = getConsoleReader();
-      c.beep();
-      c.println();
-      c.println();
-      c.println("Warning!!! Your instance secret is still set to the default, this is not secure. We highly recommend you change it.");
-      c.println();
-      c.println();
-      c.println("You can change the instance secret in accumulo by using:");
-      c.println("   bin/accumulo " + org.apache.accumulo.server.util.ChangeSecret.class.getName() + " oldPassword newPassword.");
-      c.println("You will also need to edit your secret in your configuration file by adding the property instance.secret to your conf/accumulo-site.xml. Without this accumulo will not operate correctly");
-    }
-    
-    try {
-      if (isInitialized(fs)) {
-        log.fatal("It appears this location was previously initialized, exiting ... ");
-        return false;
-      }
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-    
-    // prompt user for instance name and root password early, in case they
-    // abort, we don't leave an inconsistent HDFS/ZooKeeper structure
-    String instanceNamePath;
-    try {
-      instanceNamePath = getInstanceNamePath(opts);
-    } catch (Exception e) {
-      log.fatal("Failed to talk to zookeeper", e);
-      return false;
-    }
-    opts.rootpass = getRootPassword(opts);
-    return initialize(opts, instanceNamePath, fs);
-  }
-  
-  public static boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs) {
-    
-    UUID uuid = UUID.randomUUID();
-    try {
-      initZooKeeper(opts, uuid.toString(), instanceNamePath);
-    } catch (Exception e) {
-      log.fatal("Failed to initialize zookeeper", e);
-      return false;
-    }
-    
-    try {
-      initFileSystem(opts, fs, uuid);
-    } catch (Exception e) {
-      log.fatal("Failed to initialize filesystem", e);
-      return false;
-    }
-    
-    try {
-      initSecurity(opts, uuid.toString());
-    } catch (Exception e) {
-      log.fatal("Failed to initialize security", e);
-      return false;
-    }
-    return true;
-  }
-  
-  private static boolean zookeeperAvailable() {
-    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    try {
-      return zoo.exists("/");
-    } catch (KeeperException e) {
-      return false;
-    } catch (InterruptedException e) {
-      return false;
-    }
-  }
-  
-  private static Path[] paths(String[] paths) {
-    Path[] result = new Path[paths.length];
-    for (int i = 0; i < paths.length; i++) {
-      result[i] = new Path(paths[i]);
-    }
-    return result;
-  }
-  
-  private static <T> T[] concat(T[] a, T[] b) {
-    List<T> result = new ArrayList<T>(a.length + b.length);
-    for (int i = 0; i < a.length; i++) {
-      result.add(a[i]);
-    }
-    for (int i = 0; i < b.length; i++) {
-      result.add(b[i]);
-    }
-    return result.toArray(a);
-  }
-  
-  private static void initFileSystem(Opts opts, VolumeManager fs, UUID uuid) throws IOException {
-    FileStatus fstat;
-    
-    // the actual disk locations of the root table and tablets
-    final Path rootTablet = new Path(ServerConstants.getRootTabletDir());
-    
-    // the actual disk locations of the metadata table and tablets
-    final Path[] metadataTableDirs = paths(ServerConstants.getMetadataTableDirs());
-    final Path[] tableMetadataTabletDirs = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), TABLE_TABLETS_TABLET_DIR));
-    final Path[] defaultMetadataTabletDirs = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), Constants.DEFAULT_TABLET_LOCATION));
-    
-    fs.mkdirs(new Path(ServerConstants.getDataVersionLocation(), "" + ServerConstants.DATA_VERSION));
-    
-    // create an instance id
-    fs.mkdirs(ServerConstants.getInstanceIdLocation());
-    fs.createNewFile(new Path(ServerConstants.getInstanceIdLocation(), uuid.toString()));
-    
-    // initialize initial metadata config in zookeeper
-    initMetadataConfig();
-    
-    // create metadata table
-    for (Path mtd : metadataTableDirs) {
-      try {
-        fstat = fs.getFileStatus(mtd);
-        if (!fstat.isDir()) {
-          log.fatal("location " + mtd.toString() + " exists but is not a directory");
-          return;
-        }
-      } catch (FileNotFoundException fnfe) {
-        if (!fs.mkdirs(mtd)) {
-          log.fatal("unable to create directory " + mtd.toString());
-          return;
-        }
-      }
-    }
-    
-    // create root table and tablet
-    try {
-      fstat = fs.getFileStatus(rootTablet);
-      if (!fstat.isDir()) {
-        log.fatal("location " + rootTablet.toString() + " exists but is not a directory");
-        return;
-      }
-    } catch (FileNotFoundException fnfe) {
-      if (!fs.mkdirs(rootTablet)) {
-        log.fatal("unable to create directory " + rootTablet.toString());
-        return;
-      }
-    }
-    
-    // populate the root tablet with info about the default tablet
-    // the root tablet contains the key extent and locations of all the
-    // metadata tablets
-    String initRootTabFile = rootTablet + "/00000_00000." + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
-    FileSystem ns = fs.getFileSystemByPath(new Path(initRootTabFile));
-    FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, ns, ns.getConf(), AccumuloConfiguration.getDefaultConfiguration());
-    mfw.startDefaultLocalityGroup();
-    
-    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
-    
-    // table tablet's directory
-    Key tableDirKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
-        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
-    mfw.append(tableDirKey, new Value(TABLE_TABLETS_TABLET_DIR.getBytes()));
-    
-    // table tablet time
-    Key tableTimeKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
-        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
-    mfw.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
-    
-    // table tablet's prevrow
-    Key tablePrevRowKey = new Key(tableExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
-        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
-    mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(null));
-    
-    // ----------] default tablet info
-    Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), null));
-    
-    // default's directory
-    Key defaultDirKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
-        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
-    mfw.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes()));
-    
-    // default's time
-    Key defaultTimeKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
-        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
-    mfw.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
-    
-    // default's prevrow
-    Key defaultPrevRowKey = new Key(defaultExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
-        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
-    mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
-    
-    mfw.close();
-    
-    // create table and default tablets directories
-    for (Path dir : concat(defaultMetadataTabletDirs, tableMetadataTabletDirs)) {
-      try {
-        fstat = fs.getFileStatus(dir);
-        if (!fstat.isDir()) {
-          log.fatal("location " + dir.toString() + " exists but is not a directory");
-          return;
-        }
-      } catch (FileNotFoundException fnfe) {
-        try {
-          fstat = fs.getFileStatus(dir);
-          if (!fstat.isDir()) {
-            log.fatal("location " + dir.toString() + " exists but is not a directory");
-            return;
-          }
-        } catch (FileNotFoundException fnfe2) {
-          // create table info dir
-          if (!fs.mkdirs(dir)) {
-            log.fatal("unable to create directory " + dir.toString());
-            return;
-          }
-        }
-        
-        // create default dir
-        if (!fs.mkdirs(dir)) {
-          log.fatal("unable to create directory " + dir.toString());
-          return;
-        }
-      }
-    }
-  }
-  
-  private static void initZooKeeper(Opts opts, String uuid, String instanceNamePath) throws KeeperException, InterruptedException {
-    // setup basic data in zookeeper
-    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    ZooUtil.putPersistentData(zoo.getZooKeeper(), Constants.ZROOT, new byte[0], -1, NodeExistsPolicy.SKIP, Ids.OPEN_ACL_UNSAFE);
-    ZooUtil.putPersistentData(zoo.getZooKeeper(), Constants.ZROOT + Constants.ZINSTANCES, new byte[0], -1, NodeExistsPolicy.SKIP, Ids.OPEN_ACL_UNSAFE);
-    
-    // setup instance name
-    if (opts.clearInstanceName)
-      zoo.recursiveDelete(instanceNamePath, NodeMissingPolicy.SKIP);
-    zoo.putPersistentData(instanceNamePath, uuid.getBytes(), NodeExistsPolicy.FAIL);
-    
-    // setup the instance
-    String zkInstanceRoot = Constants.ZROOT + "/" + uuid;
-    zoo.putPersistentData(zkInstanceRoot, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLES, Constants.ZTABLES_INITIAL_ID, NodeExistsPolicy.FAIL);
-    TableManager.prepareNewTableState(uuid, RootTable.ID, RootTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL);
-    TableManager.prepareNewTableState(uuid, MetadataTable.ID, MetadataTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZTSERVERS, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZPROBLEMS, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_WALOGS, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZTRACERS, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTERS, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_LOCK, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_GOAL_STATE, MasterGoalState.NORMAL.toString().getBytes(), NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZGC, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZGC_LOCK, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZCONFIG, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLE_LOCKS, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZHDFS_RESERVATIONS, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZNEXT_FILE, new byte[] {'0'}, NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZRECOVERY, new byte[] {'0'}, NodeExistsPolicy.FAIL);
-  }
-  
-  private static String getInstanceNamePath(Opts opts) throws IOException, KeeperException, InterruptedException {
-    // setup the instance name
-    String instanceName, instanceNamePath = null;
-    boolean exists = true;
-    do {
-      if (opts.cliInstanceName == null) {
-        instanceName = getConsoleReader().readLine("Instance name : ");
-      } else {
-        instanceName = opts.cliInstanceName;
-      }
-      if (instanceName == null)
-        System.exit(0);
-      instanceName = instanceName.trim();
-      if (instanceName.length() == 0)
-        continue;
-      instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + instanceName;
-      if (opts.clearInstanceName) {
-        exists = false;
-        break;
-      } else if (exists = ZooReaderWriter.getInstance().exists(instanceNamePath)) {
-        String decision = getConsoleReader().readLine("Instance name \"" + instanceName + "\" exists. Delete existing entry from zookeeper? [Y/N] : ");
-        if (decision == null)
-          System.exit(0);
-        if (decision.length() == 1 && decision.toLowerCase(Locale.ENGLISH).charAt(0) == 'y') {
-          opts.clearInstanceName = true;
-          exists = false;
-        }
-      }
-    } while (exists);
-    return instanceNamePath;
-  }
-  
-  private static byte[] getRootPassword(Opts opts) throws IOException {
-    if (opts.cliPassword != null) {
-      return opts.cliPassword.getBytes();
-    }
-    String rootpass;
-    String confirmpass;
-    do {
-      rootpass = getConsoleReader()
-          .readLine("Enter initial password for " + DEFAULT_ROOT_USER + " (this may not be applicable for your security setup): ", '*');
-      if (rootpass == null)
-        System.exit(0);
-      confirmpass = getConsoleReader().readLine("Confirm initial password for " + DEFAULT_ROOT_USER + ": ", '*');
-      if (confirmpass == null)
-        System.exit(0);
-      if (!rootpass.equals(confirmpass))
-        log.error("Passwords do not match");
-    } while (!rootpass.equals(confirmpass));
-    return rootpass.getBytes();
-  }
-  
-  private static void initSecurity(Opts opts, String iid) throws AccumuloSecurityException, ThriftSecurityException {
-    AuditedSecurityOperation.getInstance(iid, true).initializeSecurity(SystemCredentials.get().toThrift(HdfsZooInstance.getInstance()), DEFAULT_ROOT_USER,
-        opts.rootpass);
-  }
-  
-  protected static void initMetadataConfig() throws IOException {
-    try {
-      Configuration conf = CachedConfiguration.getInstance();
-      int max = conf.getInt("dfs.replication.max", 512);
-      // Hadoop 0.23 switched the min value configuration name
-      int min = Math.max(conf.getInt("dfs.replication.min", 1), conf.getInt("dfs.namenode.replication.min", 1));
-      if (max < 5)
-        setMetadataReplication(max, "max");
-      if (min > 5)
-        setMetadataReplication(min, "min");
-      for (Entry<String,String> entry : initialMetadataConf.entrySet()) {
-        if (!TablePropUtil.setTableProperty(RootTable.ID, entry.getKey(), entry.getValue()))
-          throw new IOException("Cannot create per-table property " + entry.getKey());
-        if (!TablePropUtil.setTableProperty(MetadataTable.ID, entry.getKey(), entry.getValue()))
-          throw new IOException("Cannot create per-table property " + entry.getKey());
-      }
-    } catch (Exception e) {
-      log.fatal("error talking to zookeeper", e);
-      throw new IOException(e);
-    }
-  }
-  
-  private static void setMetadataReplication(int replication, String reason) throws IOException {
-    String rep = getConsoleReader().readLine(
-        "Your HDFS replication " + reason + " is not compatible with our default " + MetadataTable.NAME + " replication of 5. What do you want to set your "
-            + MetadataTable.NAME + " replication to? (" + replication + ") ");
-    if (rep == null || rep.length() == 0)
-      rep = Integer.toString(replication);
-    else
-      // Lets make sure it's a number
-      Integer.parseInt(rep);
-    initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), rep);
-  }
-  
-  public static boolean isInitialized(VolumeManager fs) throws IOException {
-    return (fs.exists(ServerConstants.getInstanceIdLocation()) || fs.exists(ServerConstants.getDataVersionLocation()));
-  }
-  
-  static class Opts extends Help {
-    @Parameter(names = "--reset-security", description = "just update the security information")
-    boolean resetSecurity = false;
-    @Parameter(names = "--clear-instance-name", description = "delete any existing instance name without prompting")
-    boolean clearInstanceName = false;
-    @Parameter(names = "--instance-name", description = "the instance name, if not provided, will prompt")
-    String cliInstanceName;
-    @Parameter(names = "--password", description = "set the password on the command line")
-    String cliPassword;
-    
-    byte[] rootpass = null;
-  }
-  
-  public static void main(String[] args) {
-    Opts opts = new Opts();
-    opts.parseArgs(Initialize.class.getName(), args);
-    
-    try {
-      SecurityUtil.serverLogin();
-      Configuration conf = CachedConfiguration.getInstance();
-      
-      @SuppressWarnings("deprecation")
-      VolumeManager fs = VolumeManagerImpl.get(SiteConfiguration.getSiteConfiguration());
-      
-      if (opts.resetSecurity) {
-        if (isInitialized(fs)) {
-          opts.rootpass = getRootPassword(opts);
-          initSecurity(opts, HdfsZooInstance.getInstance().getInstanceID());
-        } else {
-          log.fatal("Attempted to reset security on accumulo before it was initialized");
-        }
-      } else if (!doInit(opts, conf, fs))
-        System.exit(-1);
-    } catch (Exception e) {
-      log.fatal(e, e);
-      throw new RuntimeException(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/main/resources/randomwalk/Basic.xml
----------------------------------------------------------------------
diff --git a/server/server/src/main/resources/randomwalk/Basic.xml b/server/server/src/main/resources/randomwalk/Basic.xml
deleted file mode 100644
index 2dead02..0000000
--- a/server/server/src/main/resources/randomwalk/Basic.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<module>
-
-<package prefix="test" value="org.apache.accumulo.test.randomwalk.unit"/>
-
-<init id="test.CreateTable"/>
-
-<node id="test.CreateTable">
-  <edge id="unit/Simple.xml" weight="1"/>
-</node>
-
-<node id="unit/Simple.xml">
-  <edge id="unit/Simple.xml" weight="3"/>
-  <edge id="test.DeleteTable" weight="1"/>
-</node>
-
-<node id="test.DeleteTable">
-  <edge id="END" weight="1"/>
-</node>
-
-</module>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/main/resources/randomwalk/Simple.xml
----------------------------------------------------------------------
diff --git a/server/server/src/main/resources/randomwalk/Simple.xml b/server/server/src/main/resources/randomwalk/Simple.xml
deleted file mode 100644
index cad940e..0000000
--- a/server/server/src/main/resources/randomwalk/Simple.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<module>
-
-<package prefix="test" value="org.apache.accumulo.test.randomwalk.unit"/>
-
-<init id="dummy.all"/>
-
-<node id="dummy.all">
-  <edge id="test.Ingest" weight="1"/>
-  <edge id="test.Verify" weight="1"/>
-  <edge id="test.Scan" weight="1"/>
-  <edge id="END" weight="1"/>
-</node>
-
-<node id="test.Ingest">
-  <edge id="dummy.all" weight="1"/>
-</node>
-
-<node id="test.Verify">
-  <edge id="dummy.all" weight="1"/>
-</node>
-
-<node id="test.Scan">
-  <edge id="dummy.all" weight="1"/>
-</node>
-
-</module>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/main/resources/randomwalk/module.xsd
----------------------------------------------------------------------
diff --git a/server/server/src/main/resources/randomwalk/module.xsd b/server/server/src/main/resources/randomwalk/module.xsd
deleted file mode 100644
index bcdaaae0..0000000
--- a/server/server/src/main/resources/randomwalk/module.xsd
+++ /dev/null
@@ -1,69 +0,0 @@
-<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-  <xsd:element name="module" type="ModuleType"/>
-
-  <xsd:complexType name="ModuleType">
-    <xsd:sequence>
-      <xsd:element name="package" type="PrefixType" minOccurs="0" maxOccurs="unbounded"/>
-      <xsd:element name="fixture" type="InitType" minOccurs="0" maxOccurs="1"/>
-      <xsd:element name="init" type="InitType"/>
-      <xsd:element name="node" type="NodeType" minOccurs="1" maxOccurs="unbounded"/>
-   </xsd:sequence>
-  </xsd:complexType>
-
-  <xsd:complexType name="PrefixType">
-    <xsd:attribute name="prefix" type="xsd:string"/>
-    <xsd:attribute name="value" type="xsd:string"/>
-  </xsd:complexType>
-
-  <xsd:complexType name="InitType">
-    <xsd:attribute name="id" type="xsd:string"/>
-    <xsd:attribute name="maxHops" type="xsd:nonNegativeInteger"/>
-    <xsd:attribute name="maxSec" type="xsd:nonNegativeInteger"/>
-    <xsd:attribute name="teardown" type="xsd:boolean"/>
-  </xsd:complexType>
-
-  <xsd:complexType name="NodeType">
-    <xsd:sequence>
-      <xsd:element name="alias" type="AliasType" minOccurs="0" maxOccurs="unbounded"/>
-      <xsd:element name="property" type="PropertyType" minOccurs="0" maxOccurs="unbounded"/>
-      <xsd:element name="edge" type="EdgeType" minOccurs="1" maxOccurs="unbounded"/>
-    </xsd:sequence>
-    <xsd:attribute name="id" type="xsd:string"/>
-    <xsd:attribute name="src" type="xsd:string"/>
-    <xsd:attribute name="maxHops" type="xsd:nonNegativeInteger"/>
-    <xsd:attribute name="maxSec" type="xsd:nonNegativeInteger"/>
-    <xsd:attribute name="teardown" type="xsd:boolean"/>
-  </xsd:complexType>
-
-  <xsd:complexType name="EdgeType">
-    <xsd:attribute name="id" type="xsd:string"/>
-    <xsd:attribute name="weight" type="xsd:positiveInteger"/>
-  </xsd:complexType>
-
-  <xsd:complexType name="AliasType">
-    <xsd:attribute name="name" type="xsd:string"/>
-  </xsd:complexType>
-  
-  <xsd:complexType name="PropertyType">
-    <xsd:attribute name="key" type="xsd:string"/>
-    <xsd:attribute name="value" type="xsd:string"/>
-  </xsd:complexType>
-
-</xsd:schema>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java
----------------------------------------------------------------------
diff --git a/server/server/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java b/server/server/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java
deleted file mode 100644
index fb4a3dc..0000000
--- a/server/server/src/test/java/org/apache/accumulo/server/client/BulkImporterTest.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.client;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.TabletLocator;
-import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocation;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVWriter;
-import org.apache.accumulo.core.security.Credentials;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.commons.lang.NotImplementedException;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class BulkImporterTest {
-  
-  static final SortedSet<KeyExtent> fakeMetaData = new TreeSet<KeyExtent>();
-  static final Text tableId = new Text("1");
-  static {
-    fakeMetaData.add(new KeyExtent(tableId, new Text("a"), null));
-    for (String part : new String[] {"b", "bm", "c", "cm", "d", "dm", "e", "em", "f", "g", "h", "i", "j", "k", "l"}) {
-      fakeMetaData.add(new KeyExtent(tableId, new Text(part), fakeMetaData.last().getEndRow()));
-    }
-    fakeMetaData.add(new KeyExtent(tableId, null, fakeMetaData.last().getEndRow()));
-  }
-  
-  class MockTabletLocator extends TabletLocator {
-    int invalidated = 0;
-    
-    @Override
-    public TabletLocation locateTablet(Credentials credentials, Text row, boolean skipRow, boolean retry) throws AccumuloException, AccumuloSecurityException,
-        TableNotFoundException {
-      return new TabletLocation(fakeMetaData.tailSet(new KeyExtent(tableId, row, null)).first(), "localhost");
-    }
-    
-    @Override
-    public <T extends Mutation> void binMutations(Credentials credentials, List<T> mutations, Map<String,TabletServerMutations<T>> binnedMutations, List<T> failures)
-        throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-      throw new NotImplementedException();
-    }
-    
-    @Override
-    public List<Range> binRanges(Credentials credentials, List<Range> ranges, Map<String,Map<KeyExtent,List<Range>>> binnedRanges) throws AccumuloException,
-        AccumuloSecurityException, TableNotFoundException {
-      throw new NotImplementedException();
-    }
-    
-    @Override
-    public void invalidateCache(KeyExtent failedExtent) {
-      invalidated++;
-    }
-    
-    @Override
-    public void invalidateCache(Collection<KeyExtent> keySet) {
-      throw new NotImplementedException();
-    }
-    
-    @Override
-    public void invalidateCache() {
-      throw new NotImplementedException();
-    }
-    
-    @Override
-    public void invalidateCache(String server) {
-      throw new NotImplementedException();
-    }
-  }
-  
-  @Test
-  public void testFindOverlappingTablets() throws Exception {
-    Credentials credentials = null;
-    MockTabletLocator locator = new MockTabletLocator();
-    FileSystem fs = FileSystem.getLocal(CachedConfiguration.getInstance());
-    AccumuloConfiguration acuConf = AccumuloConfiguration.getDefaultConfiguration();
-    String file = "target/testFile.rf";
-    fs.delete(new Path(file), true);
-    FileSKVWriter writer = FileOperations.getInstance().openWriter(file, fs, fs.getConf(), acuConf);
-    writer.startDefaultLocalityGroup();
-    Value empty = new Value(new byte[] {});
-    writer.append(new Key("a", "cf", "cq"), empty);
-    writer.append(new Key("a", "cf", "cq1"), empty);
-    writer.append(new Key("a", "cf", "cq2"), empty);
-    writer.append(new Key("a", "cf", "cq3"), empty);
-    writer.append(new Key("a", "cf", "cq4"), empty);
-    writer.append(new Key("a", "cf", "cq5"), empty);
-    writer.append(new Key("d", "cf", "cq"), empty);
-    writer.append(new Key("d", "cf", "cq1"), empty);
-    writer.append(new Key("d", "cf", "cq2"), empty);
-    writer.append(new Key("d", "cf", "cq3"), empty);
-    writer.append(new Key("d", "cf", "cq4"), empty);
-    writer.append(new Key("d", "cf", "cq5"), empty);
-    writer.append(new Key("dd", "cf", "cq1"), empty);
-    writer.append(new Key("ichabod", "cf", "cq"), empty);
-    writer.append(new Key("icky", "cf", "cq1"), empty);
-    writer.append(new Key("iffy", "cf", "cq2"), empty);
-    writer.append(new Key("internal", "cf", "cq3"), empty);
-    writer.append(new Key("is", "cf", "cq4"), empty);
-    writer.append(new Key("iterator", "cf", "cq5"), empty);
-    writer.append(new Key("xyzzy", "cf", "cq"), empty);
-    writer.close();
-    List<TabletLocation> overlaps = BulkImporter.findOverlappingTablets(acuConf, fs, locator, new Path(file), credentials);
-    Assert.assertEquals(5, overlaps.size());
-    Collections.sort(overlaps);
-    Assert.assertEquals(new KeyExtent(tableId, new Text("a"), null), overlaps.get(0).tablet_extent);
-    Assert.assertEquals(new KeyExtent(tableId, new Text("d"), new Text("cm")), overlaps.get(1).tablet_extent);
-    Assert.assertEquals(new KeyExtent(tableId, new Text("dm"), new Text("d")), overlaps.get(2).tablet_extent);
-    Assert.assertEquals(new KeyExtent(tableId, new Text("j"), new Text("i")), overlaps.get(3).tablet_extent);
-    Assert.assertEquals(new KeyExtent(tableId, null, new Text("l")), overlaps.get(4).tablet_extent);
-    
-    List<TabletLocation> overlaps2 = BulkImporter.findOverlappingTablets(acuConf, fs, locator, new Path(file), new KeyExtent(tableId, new Text("h"), new Text(
-        "b")), credentials);
-    Assert.assertEquals(3, overlaps2.size());
-    Assert.assertEquals(new KeyExtent(tableId, new Text("d"), new Text("cm")), overlaps2.get(0).tablet_extent);
-    Assert.assertEquals(new KeyExtent(tableId, new Text("dm"), new Text("d")), overlaps2.get(1).tablet_extent);
-    Assert.assertEquals(new KeyExtent(tableId, new Text("j"), new Text("i")), overlaps2.get(2).tablet_extent);
-    Assert.assertEquals(locator.invalidated, 1);
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
----------------------------------------------------------------------
diff --git a/server/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java b/server/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
deleted file mode 100644
index fbae24c..0000000
--- a/server/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.constraints;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import java.util.List;
-
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
-import org.apache.accumulo.tserver.constraints.MetadataConstraints;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.junit.Test;
-
-public class MetadataConstraintsTest {
-  
-  static class TestMetadataConstraints extends MetadataConstraints {
-    @Override
-    protected Arbitrator getArbitrator() {
-      return new Arbitrator() {
-        
-        @Override
-        public boolean transactionAlive(String type, long tid) throws Exception {
-          if (tid == 9)
-            throw new RuntimeException("txid 9 reserved for future use");
-          return tid == 5 || tid == 7;
-        }
-        
-        @Override
-        public boolean transactionComplete(String type, long tid) throws Exception {
-          return tid != 5 && tid != 7;
-        }
-      };
-    }
-  }
-  
-  @Test
-  public void testCheck() {
-    Logger.getLogger(AccumuloConfiguration.class).setLevel(Level.ERROR);
-    Mutation m = new Mutation(new Text("0;foo"));
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("1foo".getBytes()));
-    
-    MetadataConstraints mc = new MetadataConstraints();
-    
-    List<Short> violations = mc.check(null, m);
-    
-    assertNotNull(violations);
-    assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short) 3), violations.get(0));
-    
-    m = new Mutation(new Text("0:foo"));
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
-    
-    violations = mc.check(null, m);
-    
-    assertNotNull(violations);
-    assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short) 4), violations.get(0));
-    
-    m = new Mutation(new Text("0;foo"));
-    m.put(new Text("bad_column_name"), new Text(""), new Value("e".getBytes()));
-    
-    violations = mc.check(null, m);
-    
-    assertNotNull(violations);
-    assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short) 2), violations.get(0));
-    
-    m = new Mutation(new Text("!!<"));
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
-    
-    violations = mc.check(null, m);
-    
-    assertNotNull(violations);
-    assertEquals(2, violations.size());
-    assertEquals(Short.valueOf((short) 4), violations.get(0));
-    assertEquals(Short.valueOf((short) 5), violations.get(1));
-    
-    m = new Mutation(new Text("0;foo"));
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("".getBytes()));
-    
-    violations = mc.check(null, m);
-    
-    assertNotNull(violations);
-    assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short) 6), violations.get(0));
-    
-    m = new Mutation(new Text("0;foo"));
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
-    
-    violations = mc.check(null, m);
-    
-    assertEquals(null, violations);
-    
-    m = new Mutation(new Text("!0<"));
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
-    
-    violations = mc.check(null, m);
-    
-    assertEquals(null, violations);
-    
-    m = new Mutation(new Text("!1<"));
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
-    
-    violations = mc.check(null, m);
-    
-    assertNotNull(violations);
-    assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short) 4), violations.get(0));
-    
-  }
-  
-  @Test
-  public void testBulkFileCheck() {
-    MetadataConstraints mc = new TestMetadataConstraints();
-    Mutation m;
-    List<Short> violations;
-    
-    // inactive txid
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
-    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
-    violations = mc.check(null, m);
-    assertNotNull(violations);
-    assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short) 8), violations.get(0));
-    
-    // txid that throws exception
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("9".getBytes()));
-    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
-    violations = mc.check(null, m);
-    assertNotNull(violations);
-    assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short) 8), violations.get(0));
-    
-    // active txid w/ file
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
-    violations = mc.check(null, m);
-    assertNull(violations);
-    
-    // active txid w/o file
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
-    violations = mc.check(null, m);
-    assertNotNull(violations);
-    assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short) 8), violations.get(0));
-    
-    // two active txids w/ files
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("7".getBytes()));
-    m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new Value("1,1".getBytes()));
-    violations = mc.check(null, m);
-    assertNotNull(violations);
-    assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short) 8), violations.get(0));
-    
-    // two files w/ one active txid
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("5".getBytes()));
-    m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new Value("1,1".getBytes()));
-    violations = mc.check(null, m);
-    assertNull(violations);
-    
-    // two loaded w/ one active txid and one file
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("5".getBytes()));
-    violations = mc.check(null, m);
-    assertNotNull(violations);
-    assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short) 8), violations.get(0));
-    
-    // active txid, mutation that looks like split
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
-    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
-    violations = mc.check(null, m);
-    assertNull(violations);
-    
-    // inactive txid, mutation that looks like split
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
-    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
-    violations = mc.check(null, m);
-    assertNull(violations);
-    
-    // active txid, mutation that looks like a load
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
-    violations = mc.check(null, m);
-    assertNull(violations);
-    
-    // inactive txid, mutation that looks like a load
-    m = new Mutation(new Text("0;foo"));
-    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
-    m.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
-    violations = mc.check(null, m);
-    assertNull(violations);
-    
-    // deleting a load flag
-    m = new Mutation(new Text("0;foo"));
-    m.putDelete(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"));
-    violations = mc.check(null, m);
-    assertNull(violations);
-    
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/test/java/org/apache/accumulo/server/data/ServerMutationTest.java
----------------------------------------------------------------------
diff --git a/server/server/src/test/java/org/apache/accumulo/server/data/ServerMutationTest.java b/server/server/src/test/java/org/apache/accumulo/server/data/ServerMutationTest.java
deleted file mode 100644
index 0df27f1..0000000
--- a/server/server/src/test/java/org/apache/accumulo/server/data/ServerMutationTest.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.data;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.List;
-
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.junit.Test;
-
-public class ServerMutationTest {
-  
-  @Test
-  public void test() throws Exception {
-    ServerMutation m = new ServerMutation(new Text("r1"));
-    m.put(new Text("cf1"), new Text("cq1"), new Value("v1".getBytes()));
-    m.put(new Text("cf2"), new Text("cq2"), 56, new Value("v2".getBytes()));
-    m.setSystemTimestamp(42);
-    
-    List<ColumnUpdate> updates = m.getUpdates();
-    
-    assertEquals(2, updates.size());
-    
-    assertEquals("r1", new String(m.getRow()));
-    ColumnUpdate cu = updates.get(0);
-    
-    assertEquals("cf1", new String(cu.getColumnFamily()));
-    assertEquals("cq1", new String(cu.getColumnQualifier()));
-    assertEquals("", new String(cu.getColumnVisibility()));
-    assertFalse(cu.hasTimestamp());
-    assertEquals(42l, cu.getTimestamp());
-    
-    ServerMutation m2 = new ServerMutation();
-    ReflectionUtils.copy(CachedConfiguration.getInstance(), m, m2);
-    
-    updates = m2.getUpdates();
-    
-    assertEquals(2, updates.size());
-    assertEquals("r1", new String(m2.getRow()));
-    
-    cu = updates.get(0);
-    assertEquals("cf1", new String(cu.getColumnFamily()));
-    assertEquals("cq1", new String(cu.getColumnQualifier()));
-    assertFalse(cu.hasTimestamp());
-    assertEquals(42l, cu.getTimestamp());
-    
-    cu = updates.get(1);
-    
-    assertEquals("r1", new String(m2.getRow()));
-    assertEquals("cf2", new String(cu.getColumnFamily()));
-    assertEquals("cq2", new String(cu.getColumnQualifier()));
-    assertTrue(cu.hasTimestamp());
-    assertEquals(56, cu.getTimestamp());
-    
-    
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
----------------------------------------------------------------------
diff --git a/server/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java b/server/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
deleted file mode 100644
index 1b95531..0000000
--- a/server/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.iterators;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.data.ByteSequence;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.iterators.SortedMapIterator;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
-import org.apache.accumulo.tserver.iterators.MetadataBulkLoadFilter;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * 
- */
-public class MetadataBulkLoadFilterTest {
-  static class TestArbitrator implements Arbitrator {
-    @Override
-    public boolean transactionAlive(String type, long tid) throws Exception {
-      return tid == 5;
-    }
-    
-    @Override
-    public boolean transactionComplete(String type, long tid) throws Exception {
-      if (tid == 9)
-        throw new RuntimeException();
-      return tid != 5 && tid != 7;
-    }
-  }
-  
-  static class TestMetadataBulkLoadFilter extends MetadataBulkLoadFilter {
-    @Override
-    protected Arbitrator getArbitrator() {
-      return new TestArbitrator();
-    }
-  }
-  
-  private static void put(TreeMap<Key,Value> tm, String row, ColumnFQ cfq, String val) {
-    Key k = new Key(new Text(row), cfq.getColumnFamily(), cfq.getColumnQualifier());
-    tm.put(k, new Value(val.getBytes()));
-  }
-  
-  private static void put(TreeMap<Key,Value> tm, String row, Text cf, String cq, String val) {
-    Key k = new Key(new Text(row), cf, new Text(cq));
-    if (val == null) {
-      k.setDeleted(true);
-      tm.put(k, new Value("".getBytes()));
-    } else
-      tm.put(k, new Value(val.getBytes()));
-  }
-  
-  @Test
-  public void testBasic() throws IOException {
-    TreeMap<Key,Value> tm1 = new TreeMap<Key,Value>();
-    TreeMap<Key,Value> expected = new TreeMap<Key,Value>();
-    
-    // following should not be deleted by filter
-    put(tm1, "2;m", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t1");
-    put(tm1, "2;m", DataFileColumnFamily.NAME, "/t1/file1", "1,1");
-    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file1", "5");
-    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file3", "7");
-    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file4", "9");
-    put(tm1, "2<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t2");
-    put(tm1, "2<", DataFileColumnFamily.NAME, "/t2/file2", "1,1");
-    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file6", "5");
-    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file7", "7");
-    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file8", "9");
-    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/fileC", null);
-    
-    expected.putAll(tm1);
-    
-    // the following should be deleted by filter
-    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file5", "8");
-    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file9", "8");
-    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/fileA", "2");
-    
-    TestMetadataBulkLoadFilter iter = new TestMetadataBulkLoadFilter();
-    iter.init(new SortedMapIterator(tm1), new HashMap<String,String>(), new IteratorEnvironment() {
-      
-      @Override
-      public SortedKeyValueIterator<Key,Value> reserveMapFileReader(String mapFileName) throws IOException {
-        return null;
-      }
-      
-      @Override
-      public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {}
-      
-      @Override
-      public boolean isFullMajorCompaction() {
-        return false;
-      }
-      
-      @Override
-      public IteratorScope getIteratorScope() {
-        return IteratorScope.majc;
-      }
-      
-      @Override
-      public AccumuloConfiguration getConfig() {
-        return null;
-      }
-    });
-    
-    iter.seek(new Range(), new ArrayList<ByteSequence>(), false);
-    
-    TreeMap<Key,Value> actual = new TreeMap<Key,Value>();
-    
-    while (iter.hasTop()) {
-      actual.put(iter.getTopKey(), iter.getTopValue());
-      iter.next();
-    }
-    
-    Assert.assertEquals(expected, actual);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
----------------------------------------------------------------------
diff --git a/server/server/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java b/server/server/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
deleted file mode 100644
index f29fb27..0000000
--- a/server/server/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.security;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.UUID;
-
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.ConnectorImpl;
-import org.apache.accumulo.core.security.Credentials;
-import org.apache.accumulo.server.security.SystemCredentials.SystemToken;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * 
- */
-public class SystemCredentialsTest {
-  
-  @BeforeClass
-  public static void setUp() throws IOException {
-    File testInstanceId = new File(new File(new File(new File("target"), "instanceTest"), "instance_id"), UUID.fromString(
-        "00000000-0000-0000-0000-000000000000").toString());
-    if (!testInstanceId.exists()) {
-      testInstanceId.getParentFile().mkdirs();
-      testInstanceId.createNewFile();
-    }
-  }
-  
-  /**
-   * This is a test to ensure the string literal in {@link ConnectorImpl#ConnectorImpl(Instance, Credentials)} is kept up-to-date if we move the
-   * {@link SystemToken}<br/>
-   * This check will not be needed after ACCUMULO-1578
-   */
-  @Test
-  public void testSystemToken() {
-    assertEquals("org.apache.accumulo.server.security.SystemCredentials$SystemToken", SystemToken.class.getName());
-    assertEquals(SystemCredentials.get().getToken().getClass(), SystemToken.class);
-  }
-  
-  @Test
-  public void testSystemCredentials() {
-    Credentials a = SystemCredentials.get();
-    Credentials b = SystemCredentials.get();
-    assertTrue(a == b);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/test/java/org/apache/accumulo/server/security/handler/ZKAuthenticatorTest.java
----------------------------------------------------------------------
diff --git a/server/server/src/test/java/org/apache/accumulo/server/security/handler/ZKAuthenticatorTest.java b/server/server/src/test/java/org/apache/accumulo/server/security/handler/ZKAuthenticatorTest.java
deleted file mode 100644
index 9700c8a..0000000
--- a/server/server/src/test/java/org/apache/accumulo/server/security/handler/ZKAuthenticatorTest.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.security.handler;
-
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.SystemPermission;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.ByteArraySet;
-import org.apache.accumulo.server.security.handler.ZKSecurityTool;
-
-import junit.framework.TestCase;
-
-public class ZKAuthenticatorTest extends TestCase {
-  public void testPermissionIdConversions() {
-    for (SystemPermission s : SystemPermission.values())
-      assertTrue(s.equals(SystemPermission.getPermissionById(s.getId())));
-    
-    for (TablePermission s : TablePermission.values())
-      assertTrue(s.equals(TablePermission.getPermissionById(s.getId())));
-  }
-  
-  public void testAuthorizationConversion() {
-    ByteArraySet auths = new ByteArraySet();
-    for (int i = 0; i < 300; i += 3)
-      auths.add(Integer.toString(i).getBytes());
-    
-    Authorizations converted = new Authorizations(auths);
-    byte[] test = ZKSecurityTool.convertAuthorizations(converted);
-    Authorizations test2 = ZKSecurityTool.convertAuthorizations(test);
-    assertTrue(auths.size() == test2.size());
-    for (byte[] s : auths) {
-      assertTrue(test2.contains(s));
-    }
-  }
-  
-  public void testSystemConversion() {
-    Set<SystemPermission> perms = new TreeSet<SystemPermission>();
-    for (SystemPermission s : SystemPermission.values())
-      perms.add(s);
-    
-    Set<SystemPermission> converted = ZKSecurityTool.convertSystemPermissions(ZKSecurityTool.convertSystemPermissions(perms));
-    assertTrue(perms.size() == converted.size());
-    for (SystemPermission s : perms)
-      assertTrue(converted.contains(s));
-  }
-  
-  public void testTableConversion() {
-    Set<TablePermission> perms = new TreeSet<TablePermission>();
-    for (TablePermission s : TablePermission.values())
-      perms.add(s);
-    
-    Set<TablePermission> converted = ZKSecurityTool.convertTablePermissions(ZKSecurityTool.convertTablePermissions(perms));
-    assertTrue(perms.size() == converted.size());
-    for (TablePermission s : perms)
-      assertTrue(converted.contains(s));
-  }
-  
-  public void testEncryption() {
-    byte[] rawPass = "myPassword".getBytes();
-    byte[] storedBytes;
-    try {
-      storedBytes = ZKSecurityTool.createPass(rawPass);
-      assertTrue(ZKSecurityTool.checkPass(rawPass, storedBytes));
-    } catch (AccumuloException e) {
-      e.printStackTrace();
-      assertTrue(false);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/4ba20cf3/server/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java
----------------------------------------------------------------------
diff --git a/server/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java b/server/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java
deleted file mode 100644
index aa7e7a4..0000000
--- a/server/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.server.tabletserver;
-
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.master.state.TServerInstance;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class CheckTabletMetadataTest {
-  
-  private static Key nk(String row, ColumnFQ cfq) {
-    return new Key(new Text(row), cfq.getColumnFamily(), cfq.getColumnQualifier());
-  }
-  
-  private static Key nk(String row, Text cf, String cq) {
-    return new Key(row, cf.toString(), cq);
-  }
-  
-  private static void put(TreeMap<Key,Value> tabletMeta, String row, ColumnFQ cfq, byte[] val) {
-    Key k = new Key(new Text(row), cfq.getColumnFamily(), cfq.getColumnQualifier());
-    tabletMeta.put(k, new Value(val));
-  }
-  
-  private static void put(TreeMap<Key,Value> tabletMeta, String row, Text cf, String cq, String val) {
-    Key k = new Key(new Text(row), cf, new Text(cq));
-    tabletMeta.put(k, new Value(val.getBytes()));
-  }
-  
-  private static void assertFail(TreeMap<Key,Value> tabletMeta, KeyExtent ke, TServerInstance tsi) {
-    try {
-      Assert.assertNull(TabletServer.checkTabletMetadata(ke, tsi, tabletMeta, ke.getMetadataEntry()));
-    } catch (Exception e) {
-      
-    }
-  }
-  
-  private static void assertFail(TreeMap<Key,Value> tabletMeta, KeyExtent ke, TServerInstance tsi, Key keyToDelete) {
-    TreeMap<Key,Value> copy = new TreeMap<Key,Value>(tabletMeta);
-    Assert.assertNotNull(copy.remove(keyToDelete));
-    try {
-      Assert.assertNull(TabletServer.checkTabletMetadata(ke, tsi, copy, ke.getMetadataEntry()));
-    } catch (Exception e) {
-      
-    }
-  }
-  
-  @Test
-  public void testBadTabletMetadata() throws Exception {
-    
-    KeyExtent ke = new KeyExtent(new Text("1"), null, null);
-    
-    TreeMap<Key,Value> tabletMeta = new TreeMap<Key,Value>();
-    
-    put(tabletMeta, "1<", TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(null).get());
-    put(tabletMeta, "1<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t1".getBytes());
-    put(tabletMeta, "1<", TabletsSection.ServerColumnFamily.TIME_COLUMN, "M0".getBytes());
-    put(tabletMeta, "1<", TabletsSection.FutureLocationColumnFamily.NAME, "4", "127.0.0.1:9997");
-    
-    TServerInstance tsi = new TServerInstance("127.0.0.1:9997", 4);
-    
-    Assert.assertNotNull(TabletServer.checkTabletMetadata(ke, tsi, tabletMeta, ke.getMetadataEntry()));
-    
-    assertFail(tabletMeta, ke, new TServerInstance("127.0.0.1:9998", 4));
-    assertFail(tabletMeta, ke, new TServerInstance("127.0.0.1:9998", 5));
-    assertFail(tabletMeta, ke, new TServerInstance("127.0.0.1:9997", 5));
-    assertFail(tabletMeta, ke, new TServerInstance("127.0.0.2:9997", 4));
-    assertFail(tabletMeta, ke, new TServerInstance("127.0.0.2:9997", 5));
-    
-    assertFail(tabletMeta, new KeyExtent(new Text("1"), null, new Text("m")), tsi);
-    
-    assertFail(tabletMeta, new KeyExtent(new Text("1"), new Text("r"), new Text("m")), tsi);
-    
-    assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN));
-    
-    assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN));
-    
-    assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.ServerColumnFamily.TIME_COLUMN));
-    
-    assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.FutureLocationColumnFamily.NAME, "4"));
-    
-    TreeMap<Key,Value> copy = new TreeMap<Key,Value>(tabletMeta);
-    put(copy, "1<", TabletsSection.CurrentLocationColumnFamily.NAME, "4", "127.0.0.1:9997");
-    assertFail(copy, ke, tsi);
-    assertFail(copy, ke, tsi, nk("1<", TabletsSection.FutureLocationColumnFamily.NAME, "4"));
-    
-    copy = new TreeMap<Key,Value>(tabletMeta);
-    put(copy, "1<", TabletsSection.CurrentLocationColumnFamily.NAME, "5", "127.0.0.1:9998");
-    assertFail(copy, ke, tsi);
-    put(copy, "1<", TabletsSection.CurrentLocationColumnFamily.NAME, "6", "127.0.0.1:9999");
-    assertFail(copy, ke, tsi);
-    
-    copy = new TreeMap<Key,Value>(tabletMeta);
-    put(copy, "1<", TabletsSection.FutureLocationColumnFamily.NAME, "5", "127.0.0.1:9998");
-    assertFail(copy, ke, tsi);
-    
-    assertFail(new TreeMap<Key,Value>(), ke, tsi);
-    
-  }
-}


Mime
View raw message