accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [08/59] [abbrv] ACCUMULO-658 Move master to its own module
Date Sat, 07 Sep 2013 03:28:11 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
new file mode 100644
index 0000000..fa14f43
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.math.BigInteger;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
+import org.apache.accumulo.fate.zookeeper.ZooReservation;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.zookeeper.ZooQueueLock;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.log4j.Logger;
+import org.apache.zookeeper.KeeperException;
+
+public class Utils {
+  
+  static void checkTableDoesNotExist(Instance instance, String tableName, String tableId, TableOperation operation) throws ThriftTableOperationException {
+    
+    String id = Tables.getNameToIdMap(instance).get(tableName);
+    
+    if (id != null && !id.equals(tableId))
+      throw new ThriftTableOperationException(null, tableName, operation, TableOperationExceptionType.EXISTS, null);
+  }
+  
+  static String getNextTableId(String tableName, Instance instance) throws ThriftTableOperationException {
+    
+    String tableId = null;
+    try {
+      IZooReaderWriter zoo = ZooReaderWriter.getRetryingInstance();
+      final String ntp = ZooUtil.getRoot(instance) + Constants.ZTABLES;
+      byte[] nid = zoo.mutate(ntp, "0".getBytes(), ZooUtil.PUBLIC, new Mutator() {
+        @Override
+        public byte[] mutate(byte[] currentValue) throws Exception {
+          BigInteger nextId = new BigInteger(new String(currentValue), Character.MAX_RADIX);
+          nextId = nextId.add(BigInteger.ONE);
+          return nextId.toString(Character.MAX_RADIX).getBytes();
+        }
+      });
+      return new String(nid);
+    } catch (Exception e1) {
+      Logger.getLogger(CreateTable.class).error("Failed to assign tableId to " + tableName, e1);
+      throw new ThriftTableOperationException(tableId, tableName, TableOperation.CREATE, TableOperationExceptionType.OTHER, e1.getMessage());
+    }
+  }
+  
+  static final Lock tableNameLock = new ReentrantLock();
+  static final Lock idLock = new ReentrantLock();
+  private static final Logger log = Logger.getLogger(Utils.class);
+  
+  public static long reserveTable(String tableId, long tid, boolean writeLock, boolean tableMustExist, TableOperation op) throws Exception {
+    if (getLock(tableId, tid, writeLock).tryLock()) {
+      if (tableMustExist) {
+        Instance instance = HdfsZooInstance.getInstance();
+        IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
+        if (!zk.exists(ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId))
+          throw new ThriftTableOperationException(tableId, "", op, TableOperationExceptionType.NOTFOUND, "Table does not exists");
+      }
+      log.info("table " + tableId + " (" + Long.toHexString(tid) + ") locked for " + (writeLock ? "write" : "read") + " operation: " + op);
+      return 0;
+    } else
+      return 100;
+  }
+  
+  public static void unreserveTable(String tableId, long tid, boolean writeLock) throws Exception {
+    getLock(tableId, tid, writeLock).unlock();
+    log.info("table " + tableId + " (" + Long.toHexString(tid) + ") unlocked for " + (writeLock ? "write" : "read"));
+  }
+  
+  public static long reserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
+    Instance instance = HdfsZooInstance.getInstance();
+    
+    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/" + new String(Base64.encodeBase64(directory.getBytes()));
+    
+    IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
+    
+    if (ZooReservation.attempt(zk, resvPath, String.format("%016x", tid), "")) {
+      return 0;
+    } else
+      return 50;
+  }
+  
+  public static void unreserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
+    Instance instance = HdfsZooInstance.getInstance();
+    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/" + new String(Base64.encodeBase64(directory.getBytes()));
+    ZooReservation.release(ZooReaderWriter.getRetryingInstance(), resvPath, String.format("%016x", tid));
+  }
+  
+  private static Lock getLock(String tableId, long tid, boolean writeLock) throws Exception {
+    byte[] lockData = String.format("%016x", tid).getBytes();
+    ZooQueueLock qlock = new ZooQueueLock(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZTABLE_LOCKS + "/" + tableId, false);
+    Lock lock = DistributedReadWriteLock.recoverLock(qlock, lockData);
+    if (lock == null) {
+      DistributedReadWriteLock locker = new DistributedReadWriteLock(qlock, lockData);
+      if (writeLock)
+        lock = locker.writeLock();
+      else
+        lock = locker.readLock();
+    }
+    return lock;
+  }
+  
+  public static Lock getReadLock(String tableId, long tid) throws Exception {
+    return Utils.getLock(tableId, tid, false);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
new file mode 100644
index 0000000..765b83a
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tserverOps;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.master.EventCoordinator.Listener;
+import org.apache.accumulo.master.LiveTServerSet.TServerConnection;
+import org.apache.accumulo.master.state.TServerInstance;
+import org.apache.accumulo.master.tableOps.MasterRepo;
+import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.log4j.Logger;
+import org.apache.thrift.transport.TTransportException;
+
+public class ShutdownTServer extends MasterRepo {
+  
+  private static final long serialVersionUID = 1L;
+  private static final Logger log = Logger.getLogger(ShutdownTServer.class);
+  private TServerInstance server;
+  private boolean force;
+  
+  public ShutdownTServer(TServerInstance server, boolean force) {
+    this.server = server;
+    this.force = force;
+  }
+  
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+  
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    // suppress assignment of tablets to the server
+    if (force) {
+      String tserver = AddressUtil.toString(server.getLocation());
+      String path = ZooUtil.getRoot(master.getInstance()) + Constants.ZTSERVERS + "/" + tserver;
+      ZooLock.deleteLock(path);
+      path = ZooUtil.getRoot(master.getInstance()) + Constants.ZDEADTSERVERS + "/" + tserver;
+      IZooReaderWriter zoo = ZooReaderWriter.getInstance();
+      zoo.putPersistentData(path, "forced down".getBytes(), NodeExistsPolicy.OVERWRITE);
+      return null;
+    }
+    
+    // TODO move this to isReady() and drop while loop? - ACCUMULO-1259
+    Listener listener = master.getEventCoordinator().getListener();
+    master.shutdownTServer(server);
+    while (master.onlineTabletServers().contains(server)) {
+      TServerConnection connection = master.getConnection(server);
+      if (connection != null) {
+        try {
+          TabletServerStatus status = connection.getTableMap(false);
+          if (status.tableMap != null && status.tableMap.isEmpty()) {
+            log.info("tablet server hosts no tablets " + server);
+            connection.halt(master.getMasterLock());
+            log.info("tablet server asked to halt " + server);
+            break;
+          }
+        } catch (TTransportException ex) {
+          // expected
+        } catch (Exception ex) {
+          log.error("Error talking to tablet server " + server + ": " + ex);
+        }
+      }
+      listener.waitForEvents(1000);
+    }
+    
+    return null;
+  }
+  
+  @Override
+  public void undo(long tid, Master m) throws Exception {}
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java b/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
new file mode 100644
index 0000000..f794112
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/util/FateAdmin.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.util;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.Help;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.AdminUtil;
+import org.apache.accumulo.fate.ZooStore;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+import com.beust.jcommander.Parameters;
+
+/**
+ * A utility to administer FATE operations
+ */
+public class FateAdmin {
+  
+  static class TxOpts {
+    @Parameter(description = "<txid>", required = true)
+    List<String> args = new ArrayList<String>();
+  }
+  
+  @Parameters(commandDescription = "Stop an existing FATE by transaction id")
+  static class FailOpts extends TxOpts {}
+  
+  @Parameters(commandDescription = "Delete an existing FATE by transaction id")
+  static class DeleteOpts extends TxOpts {}
+  
+  @Parameters(commandDescription = "List the existing FATE transactions")
+  static class PrintOpts {}
+  
+  public static void main(String[] args) throws Exception {
+    Help opts = new Help();
+    JCommander jc = new JCommander(opts);
+    jc.setProgramName(FateAdmin.class.getName());
+    jc.addCommand("fail", new FailOpts());
+    jc.addCommand("delete", new DeleteOpts());
+    jc.addCommand("print", new PrintOpts());
+    jc.parse(args);
+    if (opts.help || jc.getParsedCommand() == null) {
+      jc.usage();
+      System.exit(1);
+    }
+    
+    System.err.printf("This tool has been deprecated%nFATE administration now available within 'accumulo shell'%n$ fate fail <txid>... | delete <txid>... | print [<txid>...]%n%n");
+    
+    AdminUtil<Master> admin = new AdminUtil<Master>();
+    
+    Instance instance = HdfsZooInstance.getInstance();
+    String path = ZooUtil.getRoot(instance) + Constants.ZFATE;
+    String masterPath = ZooUtil.getRoot(instance) + Constants.ZMASTER_LOCK;
+    IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
+    ZooStore<Master> zs = new ZooStore<Master>(path, zk);
+    
+    if (jc.getParsedCommand().equals("fail")) {
+      admin.prepFail(zs, zk, masterPath, args[1]);
+    } else if (jc.getParsedCommand().equals("delete")) {
+      admin.prepDelete(zs, zk, masterPath, args[1]);
+      admin.deleteLocks(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS, args[1]);
+    } else if (jc.getParsedCommand().equals("print")) {
+      admin.print(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/master/src/test/java/org/apache/accumulo/master/DefaultMapTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/DefaultMapTest.java b/server/master/src/test/java/org/apache/accumulo/master/DefaultMapTest.java
new file mode 100644
index 0000000..3389aa3
--- /dev/null
+++ b/server/master/src/test/java/org/apache/accumulo/master/DefaultMapTest.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master;
+
+import org.apache.accumulo.server.util.DefaultMap;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class DefaultMapTest {
+  
+  @Test
+  public void testDefaultMap() {
+    DefaultMap<String,String> map = new DefaultMap<String,String>("");
+    map.put("key", "value");
+    String empty = map.get("otherKey");
+    assertEquals(map.get("key"), "value");
+    assertEquals(empty, "");
+    assertTrue(empty == map.get("otherKey"));
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/master/src/test/java/org/apache/accumulo/master/TestMergeState.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/TestMergeState.java b/server/master/src/test/java/org/apache/accumulo/master/TestMergeState.java
new file mode 100644
index 0000000..20e4291
--- /dev/null
+++ b/server/master/src/test/java/org/apache/accumulo/master/TestMergeState.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master;
+
+import java.net.InetSocketAddress;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.BatchDeleter;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.Credentials;
+import org.apache.accumulo.master.state.Assignment;
+import org.apache.accumulo.master.state.CurrentState;
+import org.apache.accumulo.master.state.MergeInfo;
+import org.apache.accumulo.master.state.MergeState;
+import org.apache.accumulo.master.state.MergeStats;
+import org.apache.accumulo.master.state.MetaDataStateStore;
+import org.apache.accumulo.master.state.TServerInstance;
+import org.apache.accumulo.master.state.TabletLocationState;
+import org.apache.accumulo.master.state.TabletState;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * 
+ */
+public class TestMergeState {
+  
+  class MockCurrentState implements CurrentState {
+    
+    TServerInstance someTServer = new TServerInstance(new InetSocketAddress("127.0.0.1", 1234), 0x123456);
+    MergeInfo mergeInfo;
+    
+    MockCurrentState(MergeInfo info) {
+      this.mergeInfo = info;
+    }
+    
+    @Override
+    public Set<String> onlineTables() {
+      return Collections.singleton("t");
+    }
+    
+    @Override
+    public Set<TServerInstance> onlineTabletServers() {
+      return Collections.singleton(someTServer);
+    }
+    
+    @Override
+    public Collection<MergeInfo> merges() {
+      return Collections.singleton(mergeInfo);
+    }
+  }
+  
+  private static void update(Connector c, Mutation m) throws TableNotFoundException, MutationsRejectedException {
+    BatchWriter bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+    bw.addMutation(m);
+    bw.close();
+  }
+  
+  @Test
+  public void test() throws Exception {
+    Instance instance = new MockInstance();
+    Connector connector = instance.getConnector("root", new PasswordToken(""));
+    BatchWriter bw = connector.createBatchWriter("!METADATA", new BatchWriterConfig());
+    
+    // Create a fake METADATA table with these splits
+    String splits[] = {"a", "e", "j", "o", "t", "z"};
+    // create metadata for a table "t" with the splits above
+    Text tableId = new Text("t");
+    Text pr = null;
+    for (String s : splits) {
+      Text split = new Text(s);
+      Mutation prevRow = KeyExtent.getPrevRowUpdateMutation(new KeyExtent(tableId, split, pr));
+      prevRow.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
+      ChoppedColumnFamily.CHOPPED_COLUMN.put(prevRow, new Value("junk".getBytes()));
+      bw.addMutation(prevRow);
+      pr = split;
+    }
+    // Add the default tablet
+    Mutation defaultTablet = KeyExtent.getPrevRowUpdateMutation(new KeyExtent(tableId, null, pr));
+    defaultTablet.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
+    bw.addMutation(defaultTablet);
+    bw.close();
+    
+    // Read out the TabletLocationStates
+    MockCurrentState state = new MockCurrentState(new MergeInfo(new KeyExtent(tableId, new Text("p"), new Text("e")), MergeInfo.Operation.MERGE));
+    Credentials credentials = new Credentials("root", new PasswordToken(new byte[0]));
+    
+    // Verify the tablet state: hosted, and count
+    MetaDataStateStore metaDataStateStore = new MetaDataStateStore(instance, credentials, state);
+    int count = 0;
+    for (TabletLocationState tss : metaDataStateStore) {
+      Assert.assertEquals(TabletState.HOSTED, tss.getState(state.onlineTabletServers()));
+      count++;
+    }
+    Assert.assertEquals(splits.length + 1, count);
+    
+    // Create the hole
+    // Split the tablet at one end of the range
+    Mutation m = new KeyExtent(tableId, new Text("t"), new Text("p")).getPrevRowUpdateMutation();
+    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
+    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(new Text("o")));
+    update(connector, m);
+    
+    // do the state check
+    MergeStats stats = scan(state, metaDataStateStore);
+    MergeState newState = stats.nextMergeState(connector, state);
+    Assert.assertEquals(MergeState.WAITING_FOR_OFFLINE, newState);
+    
+    // unassign the tablets
+    BatchDeleter deleter = connector.createBatchDeleter("!METADATA", Authorizations.EMPTY, 1000, new BatchWriterConfig());
+    deleter.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    deleter.setRanges(Collections.singletonList(new Range()));
+    deleter.delete();
+    
+    // now we should be ready to merge but, we have an inconsistent !METADATA table
+    stats = scan(state, metaDataStateStore);
+    Assert.assertEquals(MergeState.WAITING_FOR_OFFLINE, stats.nextMergeState(connector, state));
+    
+    // finish the split
+    KeyExtent tablet = new KeyExtent(tableId, new Text("p"), new Text("o"));
+    m = tablet.getPrevRowUpdateMutation();
+    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
+    update(connector, m);
+    metaDataStateStore.setLocations(Collections.singletonList(new Assignment(tablet, state.someTServer)));
+    
+    // onos... there's a new tablet online
+    stats = scan(state, metaDataStateStore);
+    Assert.assertEquals(MergeState.WAITING_FOR_CHOPPED, stats.nextMergeState(connector, state));
+    
+    // chop it
+    m = tablet.getPrevRowUpdateMutation();
+    ChoppedColumnFamily.CHOPPED_COLUMN.put(m, new Value("junk".getBytes()));
+    update(connector, m);
+    
+    stats = scan(state, metaDataStateStore);
+    Assert.assertEquals(MergeState.WAITING_FOR_OFFLINE, stats.nextMergeState(connector, state));
+    
+    // take it offline
+    m = tablet.getPrevRowUpdateMutation();
+    Collection<Collection<String>> walogs = Collections.emptyList();
+    metaDataStateStore.unassign(Collections.singletonList(new TabletLocationState(tablet, null, state.someTServer, null, walogs, false)));
+    
+    // now we can split
+    stats = scan(state, metaDataStateStore);
+    Assert.assertEquals(MergeState.MERGING, stats.nextMergeState(connector, state));
+    
+  }
+  
+  /**
+   * @param state
+   * @param metaDataStateStore
+   * @param locations
+   * @return
+   */
+  private MergeStats scan(MockCurrentState state, MetaDataStateStore metaDataStateStore) {
+    MergeStats stats = new MergeStats(state.mergeInfo);
+    stats.getMergeInfo().setState(MergeState.WAITING_FOR_OFFLINE);
+    for (TabletLocationState tss : metaDataStateStore) {
+      stats.update(tss.extent, tss.getState(state.onlineTabletServers()), tss.chopped, false);
+    }
+    return stats;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/master/src/test/java/org/apache/accumulo/master/balancer/ChaoticLoadBalancerTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/balancer/ChaoticLoadBalancerTest.java b/server/master/src/test/java/org/apache/accumulo/master/balancer/ChaoticLoadBalancerTest.java
new file mode 100644
index 0000000..7f4b306
--- /dev/null
+++ b/server/master/src/test/java/org/apache/accumulo/master/balancer/ChaoticLoadBalancerTest.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.balancer;
+
+import static org.junit.Assert.assertEquals;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
+import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.master.balancer.ChaoticLoadBalancer;
+import org.apache.accumulo.master.state.TServerInstance;
+import org.apache.accumulo.master.state.TabletMigration;
+import org.apache.hadoop.io.Text;
+import org.apache.thrift.TException;
+import org.junit.Test;
+
+public class ChaoticLoadBalancerTest {
+  
+  class FakeTServer {
+    List<KeyExtent> extents = new ArrayList<KeyExtent>();
+    
+    TabletServerStatus getStatus(TServerInstance server) {
+      TabletServerStatus result = new TabletServerStatus();
+      result.tableMap = new HashMap<String,TableInfo>();
+      for (KeyExtent extent : extents) {
+        String table = extent.getTableId().toString();
+        TableInfo info = result.tableMap.get(table);
+        if (info == null)
+          result.tableMap.put(table, info = new TableInfo());
+        info.onlineTablets++;
+        info.recs = info.onlineTablets;
+        info.ingestRate = 123.;
+        info.queryRate = 456.;
+      }
+      return result;
+    }
+  }
+  
+  Map<TServerInstance,FakeTServer> servers = new HashMap<TServerInstance,FakeTServer>();
+  
+  class TestChaoticLoadBalancer extends ChaoticLoadBalancer {
+    
+    @Override
+    public List<TabletStats> getOnlineTabletsForTable(TServerInstance tserver, String table) throws ThriftSecurityException, TException {
+      List<TabletStats> result = new ArrayList<TabletStats>();
+      for (KeyExtent extent : servers.get(tserver).extents) {
+        if (extent.getTableId().toString().equals(table)) {
+          result.add(new TabletStats(extent.toThrift(), null, null, null, 0l, 0., 0., 0));
+        }
+      }
+      return result;
+    }
+  }
+  
+  @Test
+  public void testAssignMigrations() {
+    servers.clear();
+    servers.put(new TServerInstance(AddressUtil.parseAddress("127.0.0.1", 1234), "a"), new FakeTServer());
+    servers.put(new TServerInstance(AddressUtil.parseAddress("127.0.0.1", 1235), "b"), new FakeTServer());
+    servers.put(new TServerInstance(AddressUtil.parseAddress("127.0.0.1", 1236), "c"), new FakeTServer());
+    Map<KeyExtent,TServerInstance> metadataTable = new TreeMap<KeyExtent,TServerInstance>();
+    String table = "t1";
+    metadataTable.put(makeExtent(table, null, null), null);
+    table = "t2";
+    metadataTable.put(makeExtent(table, "a", null), null);
+    metadataTable.put(makeExtent(table, null, "a"), null);
+    table = "t3";
+    metadataTable.put(makeExtent(table, "a", null), null);
+    metadataTable.put(makeExtent(table, "b", "a"), null);
+    metadataTable.put(makeExtent(table, "c", "b"), null);
+    metadataTable.put(makeExtent(table, "d", "c"), null);
+    metadataTable.put(makeExtent(table, "e", "d"), null);
+    metadataTable.put(makeExtent(table, null, "e"), null);
+    
+    TestChaoticLoadBalancer balancer = new TestChaoticLoadBalancer();
+    
+    SortedMap<TServerInstance,TabletServerStatus> current = new TreeMap<TServerInstance,TabletServerStatus>();
+    for (Entry<TServerInstance,FakeTServer> entry : servers.entrySet()) {
+      current.put(entry.getKey(), entry.getValue().getStatus(entry.getKey()));
+    }
+    
+    Map<KeyExtent,TServerInstance> assignments = new HashMap<KeyExtent,TServerInstance>();
+    balancer.getAssignments(getAssignments(servers), metadataTable, assignments);
+    
+    assertEquals(assignments.size(), metadataTable.size());
+  }
+  
+  SortedMap<TServerInstance,TabletServerStatus> getAssignments(Map<TServerInstance,FakeTServer> servers) {
+    SortedMap<TServerInstance,TabletServerStatus> result = new TreeMap<TServerInstance,TabletServerStatus>();
+    for (Entry<TServerInstance,FakeTServer> entry : servers.entrySet()) {
+      result.put(entry.getKey(), entry.getValue().getStatus(entry.getKey()));
+    }
+    return result;
+  }
+  
+  @Test
+  public void testUnevenAssignment() {
+    servers.clear();
+    for (char c : "abcdefghijklmnopqrstuvwxyz".toCharArray()) {
+      String cString = Character.toString(c);
+      InetSocketAddress fakeAddress = AddressUtil.parseAddress("127.0.0.1", c);
+      String fakeInstance = cString;
+      TServerInstance tsi = new TServerInstance(fakeAddress, fakeInstance);
+      FakeTServer fakeTServer = new FakeTServer();
+      servers.put(tsi, fakeTServer);
+      fakeTServer.extents.add(makeExtent(cString, null, null));
+    }
+    // Put more tablets on one server, but not more than the number of servers
+    Entry<TServerInstance,FakeTServer> first = servers.entrySet().iterator().next();
+    first.getValue().extents.add(makeExtent("newTable", "a", null));
+    first.getValue().extents.add(makeExtent("newTable", "b", "a"));
+    first.getValue().extents.add(makeExtent("newTable", "c", "b"));
+    first.getValue().extents.add(makeExtent("newTable", "d", "c"));
+    first.getValue().extents.add(makeExtent("newTable", "e", "d"));
+    first.getValue().extents.add(makeExtent("newTable", "f", "e"));
+    first.getValue().extents.add(makeExtent("newTable", "g", "f"));
+    first.getValue().extents.add(makeExtent("newTable", "h", "g"));
+    first.getValue().extents.add(makeExtent("newTable", "i", null));
+    TestChaoticLoadBalancer balancer = new TestChaoticLoadBalancer();
+    Set<KeyExtent> migrations = Collections.emptySet();
+    
+    // Just want to make sure it gets some migrations, randomness prevents guarantee of a defined amount, or even expected amount
+    List<TabletMigration> migrationsOut = new ArrayList<TabletMigration>();
+    while (migrationsOut.size() != 0) {
+      balancer.balance(getAssignments(servers), migrations, migrationsOut);
+    }
+  }
+  
+  private static KeyExtent makeExtent(String table, String end, String prev) {
+    return new KeyExtent(new Text(table), toText(end), toText(prev));
+  }
+  
+  private static Text toText(String value) {
+    if (value != null)
+      return new Text(value);
+    return null;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/master/src/test/java/org/apache/accumulo/master/balancer/DefaultLoadBalancerTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/balancer/DefaultLoadBalancerTest.java b/server/master/src/test/java/org/apache/accumulo/master/balancer/DefaultLoadBalancerTest.java
new file mode 100644
index 0000000..d005436
--- /dev/null
+++ b/server/master/src/test/java/org/apache/accumulo/master/balancer/DefaultLoadBalancerTest.java
@@ -0,0 +1,281 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.balancer;
+
+import static org.junit.Assert.*;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
+import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.master.balancer.DefaultLoadBalancer;
+import org.apache.accumulo.master.state.TServerInstance;
+import org.apache.accumulo.master.state.TabletMigration;
+import org.apache.hadoop.io.Text;
+import org.apache.thrift.TException;
+import org.junit.Test;
+
+public class DefaultLoadBalancerTest {
+  
+  class FakeTServer {
+    List<KeyExtent> extents = new ArrayList<KeyExtent>();
+    
+    TabletServerStatus getStatus(TServerInstance server) {
+      TabletServerStatus result = new TabletServerStatus();
+      result.tableMap = new HashMap<String,TableInfo>();
+      for (KeyExtent extent : extents) {
+        String table = extent.getTableId().toString();
+        TableInfo info = result.tableMap.get(table);
+        if (info == null)
+          result.tableMap.put(table, info = new TableInfo());
+        info.onlineTablets++;
+        info.recs = info.onlineTablets;
+        info.ingestRate = 123.;
+        info.queryRate = 456.;
+      }
+      return result;
+    }
+  }
+  
+  Map<TServerInstance,FakeTServer> servers = new HashMap<TServerInstance,FakeTServer>();
+  
+  class TestDefaultLoadBalancer extends DefaultLoadBalancer {
+    
+    @Override
+    public List<TabletStats> getOnlineTabletsForTable(TServerInstance tserver, String table) throws ThriftSecurityException, TException {
+      List<TabletStats> result = new ArrayList<TabletStats>();
+      for (KeyExtent extent : servers.get(tserver).extents) {
+        if (extent.getTableId().toString().equals(table)) {
+          result.add(new TabletStats(extent.toThrift(), null, null, null, 0l, 0., 0., 0));
+        }
+      }
+      return result;
+    }
+  }
+  
+  @Test
+  public void testAssignMigrations() {
+    servers.clear();
+    servers.put(new TServerInstance(AddressUtil.parseAddress("127.0.0.1", 1234), "a"), new FakeTServer());
+    servers.put(new TServerInstance(AddressUtil.parseAddress("127.0.0.1", 1235), "b"), new FakeTServer());
+    servers.put(new TServerInstance(AddressUtil.parseAddress("127.0.0.1", 1236), "c"), new FakeTServer());
+    List<KeyExtent> metadataTable = new ArrayList<KeyExtent>();
+    String table = "t1";
+    metadataTable.add(makeExtent(table, null, null));
+    table = "t2";
+    metadataTable.add(makeExtent(table, "a", null));
+    metadataTable.add(makeExtent(table, null, "a"));
+    table = "t3";
+    metadataTable.add(makeExtent(table, "a", null));
+    metadataTable.add(makeExtent(table, "b", "a"));
+    metadataTable.add(makeExtent(table, "c", "b"));
+    metadataTable.add(makeExtent(table, "d", "c"));
+    metadataTable.add(makeExtent(table, "e", "d"));
+    metadataTable.add(makeExtent(table, null, "e"));
+    Collections.sort(metadataTable);
+    
+    TestDefaultLoadBalancer balancer = new TestDefaultLoadBalancer();
+    
+    SortedMap<TServerInstance,TabletServerStatus> current = new TreeMap<TServerInstance,TabletServerStatus>();
+    for (Entry<TServerInstance,FakeTServer> entry : servers.entrySet()) {
+      current.put(entry.getKey(), entry.getValue().getStatus(entry.getKey()));
+    }
+    assignTablets(metadataTable, servers, current, balancer);
+    
+    // Verify that the counts on the tables are correct
+    Map<String,Integer> expectedCounts = new HashMap<String,Integer>();
+    expectedCounts.put("t1", 1);
+    expectedCounts.put("t2", 1);
+    expectedCounts.put("t3", 2);
+    checkBalance(metadataTable, servers, expectedCounts);
+    
+    // Rebalance once
+    for (Entry<TServerInstance,FakeTServer> entry : servers.entrySet()) {
+      current.put(entry.getKey(), entry.getValue().getStatus(entry.getKey()));
+    }
+    
+    // Nothing should happen, we are balanced
+    ArrayList<TabletMigration> out = new ArrayList<TabletMigration>();
+    balancer.getMigrations(current, out);
+    assertEquals(out.size(), 0);
+    
+    // Take down a tabletServer
+    TServerInstance first = current.keySet().iterator().next();
+    current.remove(first);
+    FakeTServer remove = servers.remove(first);
+    
+    // reassign offline extents
+    assignTablets(remove.extents, servers, current, balancer);
+    checkBalance(metadataTable, servers, null);
+  }
+  
+  private void assignTablets(List<KeyExtent> metadataTable, Map<TServerInstance,FakeTServer> servers, SortedMap<TServerInstance,TabletServerStatus> status,
+      TestDefaultLoadBalancer balancer) {
+    // Assign tablets
+    for (KeyExtent extent : metadataTable) {
+      TServerInstance assignment = balancer.getAssignment(status, extent, null);
+      assertNotNull(assignment);
+      assertFalse(servers.get(assignment).extents.contains(extent));
+      servers.get(assignment).extents.add(extent);
+    }
+  }
+  
+  SortedMap<TServerInstance,TabletServerStatus> getAssignments(Map<TServerInstance,FakeTServer> servers) {
+    SortedMap<TServerInstance,TabletServerStatus> result = new TreeMap<TServerInstance,TabletServerStatus>();
+    for (Entry<TServerInstance,FakeTServer> entry : servers.entrySet()) {
+      result.put(entry.getKey(), entry.getValue().getStatus(entry.getKey()));
+    }
+    return result;
+  }
+  
+  @Test
+  public void testUnevenAssignment() {
+    servers.clear();
+    for (char c : "abcdefghijklmnopqrstuvwxyz".toCharArray()) {
+      String cString = Character.toString(c);
+      InetSocketAddress fakeAddress = AddressUtil.parseAddress("127.0.0.1", (int) c);
+      String fakeInstance = cString;
+      TServerInstance tsi = new TServerInstance(fakeAddress, fakeInstance);
+      FakeTServer fakeTServer = new FakeTServer();
+      servers.put(tsi, fakeTServer);
+      fakeTServer.extents.add(makeExtent(cString, null, null));
+    }
+    // Put more tablets on one server, but not more than the number of servers
+    Entry<TServerInstance,FakeTServer> first = servers.entrySet().iterator().next();
+    first.getValue().extents.add(makeExtent("newTable", "a", null));
+    first.getValue().extents.add(makeExtent("newTable", "b", "a"));
+    first.getValue().extents.add(makeExtent("newTable", "c", "b"));
+    first.getValue().extents.add(makeExtent("newTable", "d", "c"));
+    first.getValue().extents.add(makeExtent("newTable", "e", "d"));
+    first.getValue().extents.add(makeExtent("newTable", "f", "e"));
+    first.getValue().extents.add(makeExtent("newTable", "g", "f"));
+    first.getValue().extents.add(makeExtent("newTable", "h", "g"));
+    first.getValue().extents.add(makeExtent("newTable", "i", null));
+    TestDefaultLoadBalancer balancer = new TestDefaultLoadBalancer();
+    Set<KeyExtent> migrations = Collections.emptySet();
+    int moved = 0;
+    // balance until we can't balance no more!
+    while (true) {
+      List<TabletMigration> migrationsOut = new ArrayList<TabletMigration>();
+      balancer.balance(getAssignments(servers), migrations, migrationsOut);
+      if (migrationsOut.size() == 0)
+        break;
+      for (TabletMigration migration : migrationsOut) {
+        if (servers.get(migration.oldServer).extents.remove(migration.tablet))
+          moved++;
+        servers.get(migration.newServer).extents.add(migration.tablet);
+      }
+    }
+    assertEquals(8, moved);
+  }
+  
+  @Test
+  public void testUnevenAssignment2() {
+    servers.clear();
+    // make 26 servers
+    for (char c : "abcdefghijklmnopqrstuvwxyz".toCharArray()) {
+      String cString = Character.toString(c);
+      InetSocketAddress fakeAddress = AddressUtil.parseAddress("127.0.0.1", (int) c);
+      String fakeInstance = cString;
+      TServerInstance tsi = new TServerInstance(fakeAddress, fakeInstance);
+      FakeTServer fakeTServer = new FakeTServer();
+      servers.put(tsi, fakeTServer);
+    }
+    // put 60 tablets on 25 of them
+    List<Entry<TServerInstance,FakeTServer>> shortList = new ArrayList<Entry<TServerInstance,FakeTServer>>(servers.entrySet());
+    Entry<TServerInstance,FakeTServer> shortServer = shortList.remove(0);
+    int c = 0;
+    for (int i = 0; i < 60; i++) {
+      for (Entry<TServerInstance,FakeTServer> entry : shortList) {
+        entry.getValue().extents.add(makeExtent("t" + c, null, null));
+      }
+    }
+    // put 10 on the that short server:
+    for (int i = 0; i < 10; i++) {
+      shortServer.getValue().extents.add(makeExtent("s" + i, null, null));
+    }
+    
+    TestDefaultLoadBalancer balancer = new TestDefaultLoadBalancer();
+    Set<KeyExtent> migrations = Collections.emptySet();
+    int moved = 0;
+    // balance until we can't balance no more!
+    while (true) {
+      List<TabletMigration> migrationsOut = new ArrayList<TabletMigration>();
+      balancer.balance(getAssignments(servers), migrations, migrationsOut);
+      if (migrationsOut.size() == 0)
+        break;
+      for (TabletMigration migration : migrationsOut) {
+        if (servers.get(migration.oldServer).extents.remove(migration.tablet))
+          moved++;
+        servers.get(migration.newServer).extents.add(migration.tablet);
+      }
+    }
+    // average is 58, with 2 at 59: we need 48 more moved to the short server
+    assertEquals(48, moved);
+  }
+  
+  private void checkBalance(List<KeyExtent> metadataTable, Map<TServerInstance,FakeTServer> servers, Map<String,Integer> expectedCounts) {
+    // Verify they are spread evenly over the cluster
+    int average = metadataTable.size() / servers.size();
+    for (FakeTServer server : servers.values()) {
+      int diff = server.extents.size() - average;
+      if (diff < 0)
+        fail("average number of tablets is " + average + " but a server has " + server.extents.size());
+      if (diff > 1)
+        fail("average number of tablets is " + average + " but a server has " + server.extents.size());
+    }
+    
+    if (expectedCounts != null) {
+      for (FakeTServer server : servers.values()) {
+        Map<String,Integer> counts = new HashMap<String,Integer>();
+        for (KeyExtent extent : server.extents) {
+          String t = extent.getTableId().toString();
+          if (counts.get(t) == null)
+            counts.put(t, 0);
+          counts.put(t, counts.get(t) + 1);
+        }
+        for (Entry<String,Integer> entry : counts.entrySet()) {
+          assertEquals(expectedCounts.get(entry.getKey()), counts.get(entry.getKey()));
+        }
+      }
+    }
+  }
+  
+  private static KeyExtent makeExtent(String table, String end, String prev) {
+    return new KeyExtent(new Text(table), toText(end), toText(prev));
+  }
+  
+  private static Text toText(String value) {
+    if (value != null)
+      return new Text(value);
+    return null;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/master/src/test/java/org/apache/accumulo/master/balancer/TableLoadBalancerTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/balancer/TableLoadBalancerTest.java b/server/master/src/test/java/org/apache/accumulo/master/balancer/TableLoadBalancerTest.java
new file mode 100644
index 0000000..653e1aa
--- /dev/null
+++ b/server/master/src/test/java/org/apache/accumulo/master/balancer/TableLoadBalancerTest.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.balancer;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
+import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.master.state.TServerInstance;
+import org.apache.accumulo.master.state.TabletMigration;
+import org.apache.hadoop.io.Text;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TableLoadBalancerTest {
+  
+  static private TServerInstance mkts(String address, String session) throws Exception {
+    return new TServerInstance(AddressUtil.parseAddress(address, 1234), session);
+  }
+  
+  static private TabletServerStatus status(Object... config) {
+    TabletServerStatus result = new TabletServerStatus();
+    result.tableMap = new HashMap<String,TableInfo>();
+    String tablename = null;
+    for (Object c : config) {
+      if (c instanceof String) {
+        tablename = (String) c;
+      } else {
+        TableInfo info = new TableInfo();
+        int count = (Integer) c;
+        info.onlineTablets = count;
+        info.tablets = count;
+        result.tableMap.put(tablename, info);
+      }
+    }
+    return result;
+  }
+  
+  static MockInstance instance = new MockInstance("mockamatic");
+  
+  static SortedMap<TServerInstance,TabletServerStatus> state;
+  
+  static List<TabletStats> generateFakeTablets(TServerInstance tserver, String tableId) {
+    List<TabletStats> result = new ArrayList<TabletStats>();
+    TabletServerStatus tableInfo = state.get(tserver);
+    // generate some fake tablets
+    for (int i = 0; i < tableInfo.tableMap.get(tableId).onlineTablets; i++) {
+      TabletStats stats = new TabletStats();
+      stats.extent = new KeyExtent(new Text(tableId), new Text(tserver.host() + String.format("%03d", i + 1)), new Text(tserver.host()
+          + String.format("%03d", i))).toThrift();
+      result.add(stats);
+    }
+    return result;
+  }
+  
+  static class DefaultLoadBalancer extends org.apache.accumulo.master.balancer.DefaultLoadBalancer {
+    
+    public DefaultLoadBalancer(String table) {
+      super(table);
+    }
+    
+    @Override
+    public List<TabletStats> getOnlineTabletsForTable(TServerInstance tserver, String tableId) throws ThriftSecurityException, TException {
+      return generateFakeTablets(tserver, tableId);
+    }
+  }
+  
+  // ugh... so wish I had provided mock objects to the LoadBalancer in the master
+  static class TableLoadBalancer extends org.apache.accumulo.master.balancer.TableLoadBalancer {
+    
+    TableLoadBalancer() {
+      super();
+    }
+    
+    // need to use our mock instance
+    @Override
+    protected TableOperations getTableOperations() {
+      try {
+        return instance.getConnector("user", new PasswordToken("pass")).tableOperations();
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    }
+    
+    // use our new classname to test class loading
+    @Override
+    protected String getLoadBalancerClassNameForTable(String table) {
+      return DefaultLoadBalancer.class.getName();
+    }
+    
+    // we don't have real tablet servers to ask: invent some online tablets
+    @Override
+    public List<TabletStats> getOnlineTabletsForTable(TServerInstance tserver, String tableId) throws ThriftSecurityException, TException {
+      return generateFakeTablets(tserver, tableId);
+    }
+  }
+  
+  @Test
+  public void test() throws Exception {
+    Connector c = instance.getConnector("user", new PasswordToken("pass"));
+    c.tableOperations().create("t1");
+    c.tableOperations().create("t2");
+    c.tableOperations().create("t3");
+    state = new TreeMap<TServerInstance,TabletServerStatus>();
+    TServerInstance svr = mkts("10.0.0.1:1234", "0x01020304");
+    state.put(svr, status("t1", 10, "t2", 10, "t3", 10));
+    
+    Set<KeyExtent> migrations = Collections.emptySet();
+    List<TabletMigration> migrationsOut = new ArrayList<TabletMigration>();
+    TableLoadBalancer tls = new TableLoadBalancer();
+    tls.balance(state, migrations, migrationsOut);
+    Assert.assertEquals(0, migrationsOut.size());
+    
+    state.put(mkts("10.0.0.2:1234", "0x02030405"), status());
+    tls = new TableLoadBalancer();
+    tls.balance(state, migrations, migrationsOut);
+    int count = 0;
+    Map<String,Integer> movedByTable = new HashMap<String,Integer>();
+    movedByTable.put("t1", new Integer(0));
+    movedByTable.put("t2", new Integer(0));
+    movedByTable.put("t3", new Integer(0));
+    for (TabletMigration migration : migrationsOut) {
+      if (migration.oldServer.equals(svr))
+        count++;
+      String key = migration.tablet.getTableId().toString();
+      movedByTable.put(key, movedByTable.get(key) + 1);
+    }
+    Assert.assertEquals(15, count);
+    for (Integer moved : movedByTable.values()) {
+      Assert.assertEquals(5, moved.intValue());
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/master/src/test/java/org/apache/accumulo/master/state/MergeInfoTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/state/MergeInfoTest.java b/server/master/src/test/java/org/apache/accumulo/master/state/MergeInfoTest.java
new file mode 100644
index 0000000..85ee5ec
--- /dev/null
+++ b/server/master/src/test/java/org/apache/accumulo/master/state/MergeInfoTest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.state;
+
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.master.state.MergeInfo;
+import org.apache.accumulo.master.state.MergeState;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class MergeInfoTest {
+  
+  MergeInfo readWrite(MergeInfo info) throws Exception {
+    DataOutputBuffer buffer = new DataOutputBuffer();
+    info.write(buffer);
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(buffer.getData(), 0, buffer.getLength());
+    MergeInfo info2 = new MergeInfo();
+    info2.readFields(in);
+    Assert.assertEquals(info.extent, info2.extent);
+    Assert.assertEquals(info.state, info2.state);
+    Assert.assertEquals(info.operation, info2.operation);
+    return info2;
+  }
+  
+  KeyExtent ke(String tableId, String endRow, String prevEndRow) {
+    return new KeyExtent(new Text(tableId), endRow == null ? null : new Text(endRow), prevEndRow == null ? null : new Text(prevEndRow));
+  }
+  
+  @Test
+  public void testWritable() throws Exception {
+    MergeInfo info;
+    info = readWrite(new MergeInfo(ke("a", null, "b"), MergeInfo.Operation.MERGE));
+    info = readWrite(new MergeInfo(ke("a", "b", null), MergeInfo.Operation.MERGE));
+    info = readWrite(new MergeInfo(ke("x", "b", "a"), MergeInfo.Operation.MERGE));
+    info = readWrite(new MergeInfo(ke("x", "b", "a"), MergeInfo.Operation.DELETE));
+    Assert.assertTrue(info.isDelete());
+    info.setState(MergeState.COMPLETE);
+  }
+  
+  @Test
+  public void testNeedsToBeChopped() throws Exception {
+    MergeInfo info = new MergeInfo(ke("x", "b", "a"), MergeInfo.Operation.DELETE);
+    Assert.assertTrue(info.needsToBeChopped(ke("x", "c", "b")));
+    Assert.assertTrue(info.overlaps(ke("x", "c", "b")));
+    Assert.assertFalse(info.needsToBeChopped(ke("y", "c", "b")));
+    Assert.assertFalse(info.needsToBeChopped(ke("x", "c", "bb")));
+    Assert.assertFalse(info.needsToBeChopped(ke("x", "b", "a")));
+    info = new MergeInfo(ke("x", "b", "a"), MergeInfo.Operation.MERGE);
+    Assert.assertTrue(info.needsToBeChopped(ke("x", "c", "a")));
+    Assert.assertTrue(info.needsToBeChopped(ke("x", "aa", "a")));
+    Assert.assertTrue(info.needsToBeChopped(ke("x", null, null)));
+    Assert.assertFalse(info.needsToBeChopped(ke("x", "c", "b")));
+    Assert.assertFalse(info.needsToBeChopped(ke("y", "c", "b")));
+    Assert.assertFalse(info.needsToBeChopped(ke("x", "c", "bb")));
+    Assert.assertTrue(info.needsToBeChopped(ke("x", "b", "a")));
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/master/src/test/java/org/apache/accumulo/master/state/RootTabletStateStoreTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/state/RootTabletStateStoreTest.java b/server/master/src/test/java/org/apache/accumulo/master/state/RootTabletStateStoreTest.java
new file mode 100644
index 0000000..158e948
--- /dev/null
+++ b/server/master/src/test/java/org/apache/accumulo/master/state/RootTabletStateStoreTest.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.state;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.master.state.Assignment;
+import org.apache.accumulo.master.state.DistributedStore;
+import org.apache.accumulo.master.state.DistributedStoreException;
+import org.apache.accumulo.master.state.TServerInstance;
+import org.apache.accumulo.master.state.TabletLocationState;
+import org.apache.accumulo.master.state.ZooTabletStateStore;
+import org.apache.accumulo.master.state.TabletLocationState.BadLocationStateException;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class RootTabletStateStoreTest {
+  
+  static class Node {
+    Node(String name) {
+      this.name = name;
+    }
+    
+    List<Node> children = new ArrayList<Node>();
+    String name;
+    byte[] value = new byte[] {};
+    
+    Node find(String name) {
+      for (Node node : children)
+        if (node.name.equals(name))
+          return node;
+      return null;
+    }
+  };
+  
+  static class FakeZooStore implements DistributedStore {
+    
+    Node root = new Node("/");
+    
+    private Node recurse(Node root, String[] path, int depth) {
+      if (depth == path.length)
+        return root;
+      Node child = root.find(path[depth]);
+      if (child == null)
+        return null;
+      return recurse(child, path, depth + 1);
+    }
+    
+    private Node navigate(String path) {
+      path = path.replaceAll("/$", "");
+      return recurse(root, path.split("/"), 1);
+    }
+    
+    @Override
+    public List<String> getChildren(String path) throws DistributedStoreException {
+      Node node = navigate(path);
+      if (node == null)
+        return Collections.emptyList();
+      List<String> children = new ArrayList<String>(node.children.size());
+      for (Node child : node.children)
+        children.add(child.name);
+      return children;
+    }
+    
+    @Override
+    public void put(String path, byte[] bs) throws DistributedStoreException {
+      create(path).value = bs;
+    }
+    
+    private Node create(String path) {
+      String[] parts = path.split("/");
+      return recurseCreate(root, parts, 1);
+    }
+    
+    private Node recurseCreate(Node root, String[] path, int index) {
+      if (path.length == index)
+        return root;
+      Node node = root.find(path[index]);
+      if (node == null) {
+        node = new Node(path[index]);
+        root.children.add(node);
+      }
+      return recurseCreate(node, path, index + 1);
+    }
+    
+    @Override
+    public void remove(String path) throws DistributedStoreException {
+      String[] parts = path.split("/");
+      String[] parentPath = Arrays.copyOf(parts, parts.length - 1);
+      Node parent = recurse(root, parentPath, 1);
+      if (parent == null)
+        return;
+      Node child = parent.find(parts[parts.length - 1]);
+      if (child != null)
+        parent.children.remove(child);
+    }
+    
+    @Override
+    public byte[] get(String path) throws DistributedStoreException {
+      Node node = navigate(path);
+      if (node != null)
+        return node.value;
+      return null;
+    }
+  }
+  
+  @Test
+  public void testFakeZoo() throws DistributedStoreException {
+    DistributedStore store = new FakeZooStore();
+    store.put("/a/b/c", "abc".getBytes());
+    byte[] abc = store.get("/a/b/c");
+    assertArrayEquals(abc, "abc".getBytes());
+    byte[] empty = store.get("/a/b");
+    assertArrayEquals(empty, "".getBytes());
+    store.put("/a/b", "ab".getBytes());
+    assertArrayEquals(store.get("/a/b"), "ab".getBytes());
+    store.put("/a/b/b", "abb".getBytes());
+    List<String> children = store.getChildren("/a/b");
+    assertEquals(new HashSet<String>(children), new HashSet<String>(Arrays.asList("b", "c")));
+    store.remove("/a/b/c");
+    children = store.getChildren("/a/b");
+    assertEquals(new HashSet<String>(children), new HashSet<String>(Arrays.asList("b")));
+  }
+  
+  @Test
+  public void testRootTabletStateStore() throws DistributedStoreException {
+    ZooTabletStateStore tstore = new ZooTabletStateStore(new FakeZooStore());
+    KeyExtent root = RootTable.EXTENT;
+    String sessionId = "this is my unique session data";
+    TServerInstance server = new TServerInstance(AddressUtil.parseAddress("127.0.0.1", 10000), sessionId);
+    List<Assignment> assignments = Collections.singletonList(new Assignment(root, server));
+    tstore.setFutureLocations(assignments);
+    int count = 0;
+    for (TabletLocationState location : tstore) {
+      assertEquals(location.extent, root);
+      assertEquals(location.future, server);
+      assertNull(location.current);
+      count++;
+    }
+    assertEquals(count, 1);
+    tstore.setLocations(assignments);
+    count = 0;
+    for (TabletLocationState location : tstore) {
+      assertEquals(location.extent, root);
+      assertNull(location.future);
+      assertEquals(location.current, server);
+      count++;
+    }
+    assertEquals(count, 1);
+    TabletLocationState assigned = null;
+    try {
+      assigned = new TabletLocationState(root, server, null, null, null, false);
+    } catch (BadLocationStateException e) {
+      fail("Unexpected error " + e);
+    }
+    tstore.unassign(Collections.singletonList(assigned));
+    count = 0;
+    for (TabletLocationState location : tstore) {
+      assertEquals(location.extent, root);
+      assertNull(location.future);
+      assertNull(location.current);
+      count++;
+    }
+    assertEquals(count, 1);
+    
+    KeyExtent notRoot = new KeyExtent(new Text("0"), null, null);
+    try {
+      tstore.setLocations(Collections.singletonList(new Assignment(notRoot, server)));
+      Assert.fail("should not get here");
+    } catch (IllegalArgumentException ex) {}
+    
+    try {
+      tstore.setFutureLocations(Collections.singletonList(new Assignment(notRoot, server)));
+      Assert.fail("should not get here");
+    } catch (IllegalArgumentException ex) {}
+    
+    TabletLocationState broken = null;
+    try {
+      broken = new TabletLocationState(notRoot, server, null, null, null, false);
+    } catch (BadLocationStateException e) {
+      fail("Unexpected error " + e);
+    }
+    try {
+      tstore.unassign(Collections.singletonList(broken));
+      Assert.fail("should not get here");
+    } catch (IllegalArgumentException ex) {}
+  }
+  
+  // @Test
+  // public void testMetaDataStore() { } // see functional test
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
index c82d4d4..1337c38 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
@@ -34,7 +34,6 @@ import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.gc.thrift.GCMonitorService;
 import org.apache.accumulo.core.gc.thrift.GCStatus;
-import org.apache.accumulo.core.master.thrift.Compacting;
 import org.apache.accumulo.core.master.thrift.MasterClientService;
 import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
 import org.apache.accumulo.core.master.thrift.TableInfo;
@@ -72,6 +71,7 @@ import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.accumulo.server.security.SystemCredentials;
+import org.apache.accumulo.server.util.TableInfoUtil;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.WatchedEvent;
@@ -84,7 +84,7 @@ import org.apache.zookeeper.ZooKeeper;
 public class Monitor {
   private static final Logger log = Logger.getLogger(Monitor.class);
   
-  public static final int REFRESH_TIME = 5;
+  private static final int REFRESH_TIME = 5;
   private static long lastRecalc = 0L;
   private static double totalIngestRate = 0.0;
   private static double totalIngestByteRate = 0.0;
@@ -150,62 +150,6 @@ public class Monitor {
   
   private static EmbeddedWebServer server;
   
-  public static Map<String,Double> summarizeTableStats(MasterMonitorInfo mmi) {
-    Map<String,Double> compactingByTable = new HashMap<String,Double>();
-    if (mmi != null && mmi.tServerInfo != null) {
-      for (TabletServerStatus status : mmi.tServerInfo) {
-        if (status != null && status.tableMap != null) {
-          for (String table : status.tableMap.keySet()) {
-            Double holdTime = compactingByTable.get(table);
-            compactingByTable.put(table, Math.max(holdTime == null ? 0. : holdTime.doubleValue(), status.holdTime));
-          }
-        }
-      }
-    }
-    return compactingByTable;
-  }
-  
-  public static void add(TableInfo total, TableInfo more) {
-    if (total.minors == null)
-      total.minors = new Compacting();
-    if (total.majors == null)
-      total.majors = new Compacting();
-    if (total.scans == null)
-      total.scans = new Compacting();
-    if (more.minors != null) {
-      total.minors.running += more.minors.running;
-      total.minors.queued += more.minors.queued;
-    }
-    if (more.majors != null) {
-      total.majors.running += more.majors.running;
-      total.majors.queued += more.majors.queued;
-    }
-    if (more.scans != null) {
-      total.scans.running += more.scans.running;
-      total.scans.queued += more.scans.queued;
-    }
-    total.onlineTablets += more.onlineTablets;
-    total.recs += more.recs;
-    total.recsInMemory += more.recsInMemory;
-    total.tablets += more.tablets;
-    total.ingestRate += more.ingestRate;
-    total.ingestByteRate += more.ingestByteRate;
-    total.queryRate += more.queryRate;
-    total.queryByteRate += more.queryByteRate;
-    total.scanRate += more.scanRate;
-  }
-  
-  public static TableInfo summarizeTableStats(TabletServerStatus status) {
-    TableInfo summary = new TableInfo();
-    summary.majors = new Compacting();
-    summary.minors = new Compacting();
-    summary.scans = new Compacting();
-    for (TableInfo rates : status.tableMap.values()) {
-      add(summary, rates);
-    }
-    return summary;
-  }
-  
   private static class EventCounter {
     
     Map<String,Pair<Long,Long>> prevSamples = new HashMap<String,Pair<Long,Long>>();
@@ -320,7 +264,7 @@ public class Monitor {
         dataCacheRequestTracker.startingUpdates();
         
         for (TabletServerStatus server : mmi.tServerInfo) {
-          TableInfo summary = Monitor.summarizeTableStats(server);
+          TableInfo summary = TableInfoUtil.summarizeTableStats(server);
           totalIngestRate += summary.ingestRate;
           totalIngestByteRate += summary.ingestByteRate;
           totalQueryRate += summary.queryRate;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/JSONServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/JSONServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/JSONServlet.java
index 8980435..09dbb6e 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/JSONServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/JSONServlet.java
@@ -30,6 +30,7 @@ import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.monitor.Monitor;
 import org.apache.accumulo.monitor.util.celltypes.TServerLinkType;
+import org.apache.accumulo.server.util.TableInfoUtil;
 
 import com.google.gson.Gson;
 
@@ -74,7 +75,7 @@ public class JSONServlet extends BasicServlet {
     List<Map<String,Object>> servers = new ArrayList<Map<String,Object>>();
     
     for (TabletServerStatus status : Monitor.getMmi().tServerInfo) {
-      TableInfo summary = Monitor.summarizeTableStats(status);
+      TableInfo summary = TableInfoUtil.summarizeTableStats(status);
       servers.add(addServer(status.name, TServerLinkType.displayName(status.name), status.osLoad, summary.ingestRate, summary.queryRate,
           summary.ingestByteRate / 1000000.0, summary.queryByteRate / 1000000.0, summary.scans.running + summary.scans.queued, Monitor.getLookupRate(),
           status.holdTime));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/OperationServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/OperationServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/OperationServlet.java
index 17c6a80..4581d96 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/OperationServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/OperationServlet.java
@@ -25,9 +25,9 @@ import javax.servlet.http.HttpServletResponse;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.master.state.DeadServerList;
 import org.apache.accumulo.monitor.LogService;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.master.state.DeadServerList;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.log4j.Logger;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
index c2cb943..48b1feb 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TServersServlet.java
@@ -38,6 +38,7 @@ import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
 import org.apache.accumulo.core.util.AddressUtil;
 import org.apache.accumulo.core.util.Duration;
 import org.apache.accumulo.core.util.ThriftUtil;
+import org.apache.accumulo.master.state.TabletServerState;
 import org.apache.accumulo.monitor.Monitor;
 import org.apache.accumulo.monitor.util.Table;
 import org.apache.accumulo.monitor.util.TableRow;
@@ -49,9 +50,9 @@ import org.apache.accumulo.monitor.util.celltypes.PercentageType;
 import org.apache.accumulo.monitor.util.celltypes.ProgressChartType;
 import org.apache.accumulo.monitor.util.celltypes.TServerLinkType;
 import org.apache.accumulo.monitor.util.celltypes.TableLinkType;
-import org.apache.accumulo.server.master.state.TabletServerState;
 import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.util.ActionStatsUpdator;
+import org.apache.accumulo.server.util.TableInfoUtil;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.commons.codec.binary.Base64;
 
@@ -339,7 +340,7 @@ public class TServersServlet extends BasicServlet {
     for (TabletServerStatus status : tservers) {
       if (status == null)
         status = NO_STATUS;
-      TableInfo summary = Monitor.summarizeTableStats(status);
+      TableInfo summary = TableInfoUtil.summarizeTableStats(status);
       if (tableId != null)
         summary = status.tableMap.get(tableId);
       if (summary == null)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
index aad618b..4efedd8 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/TablesServlet.java
@@ -35,6 +35,8 @@ import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.master.state.MetaDataTableScanner;
+import org.apache.accumulo.master.state.TabletLocationState;
 import org.apache.accumulo.monitor.Monitor;
 import org.apache.accumulo.monitor.util.Table;
 import org.apache.accumulo.monitor.util.TableRow;
@@ -44,10 +46,9 @@ import org.apache.accumulo.monitor.util.celltypes.NumberType;
 import org.apache.accumulo.monitor.util.celltypes.TableLinkType;
 import org.apache.accumulo.monitor.util.celltypes.TableStateType;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.master.state.MetaDataTableScanner;
-import org.apache.accumulo.server.master.state.TabletLocationState;
 import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tables.TableManager;
+import org.apache.accumulo.server.util.TableInfoUtil;
 import org.apache.hadoop.io.Text;
 
 public class TablesServlet extends BasicServlet {
@@ -113,7 +114,7 @@ public class TablesServlet extends BasicServlet {
       for (Entry<String,TableInfo> te : Monitor.getMmi().tableMap.entrySet())
         tableStats.put(Tables.getPrintableTableNameFromId(tidToNameMap, te.getKey()), te.getValue());
     
-    Map<String,Double> compactingByTable = Monitor.summarizeTableStats(Monitor.getMmi());
+    Map<String,Double> compactingByTable = TableInfoUtil.summarizeTableStats(Monitor.getMmi());
     TableManager tableManager = TableManager.getInstance();
     
     for (Entry<String,String> tableName_tableId : Tables.getNameToIdMap(HdfsZooInstance.getInstance()).entrySet()) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/XMLServlet.java
----------------------------------------------------------------------
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/XMLServlet.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/XMLServlet.java
index 05b47d4..e9c4bd3 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/XMLServlet.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/XMLServlet.java
@@ -30,10 +30,11 @@ import org.apache.accumulo.core.master.thrift.Compacting;
 import org.apache.accumulo.core.master.thrift.DeadServer;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.master.state.TabletServerState;
 import org.apache.accumulo.monitor.Monitor;
 import org.apache.accumulo.monitor.util.celltypes.TServerLinkType;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.master.state.TabletServerState;
+import org.apache.accumulo.server.util.TableInfoUtil;
 
 public class XMLServlet extends BasicServlet {
   private static final long serialVersionUID = 1L;
@@ -71,7 +72,7 @@ public class XMLServlet extends BasicServlet {
       sb.append("<lastContact>").append(System.currentTimeMillis() - status.lastContact).append("</lastContact>\n");
       sb.append("<osload>").append(status.osLoad).append("</osload>\n");
       
-      TableInfo summary = Monitor.summarizeTableStats(status);
+      TableInfo summary = TableInfoUtil.summarizeTableStats(status);
       sb.append("<compactions>\n");
       sb.append("<major>").append("<running>").append(summary.majors.running).append("</running>").append("<queued>").append(summary.majors.queued)
           .append("</queued>").append("</major>\n");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/pom.xml
----------------------------------------------------------------------
diff --git a/server/pom.xml b/server/pom.xml
index e1dfc2f..4a79c58 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -28,7 +28,9 @@
   <modules>
     <module>base</module>
     <module>gc</module>
+    <module>master</module>
     <module>monitor</module>
     <module>server</module>
+    <module>tserver</module>
   </modules>
 </project>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/server/pom.xml
----------------------------------------------------------------------
diff --git a/server/server/pom.xml b/server/server/pom.xml
index 401d361..841e6e2 100644
--- a/server/server/pom.xml
+++ b/server/server/pom.xml
@@ -51,6 +51,10 @@
     </dependency>
     <dependency>
       <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-master</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-monitor</artifactId>
     </dependency>
     <dependency>
@@ -62,6 +66,10 @@
       <artifactId>accumulo-trace</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-tserver</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.apache.thrift</groupId>
       <artifactId>libthrift</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/server/src/main/java/org/apache/accumulo/server/data/ServerColumnUpdate.java
----------------------------------------------------------------------
diff --git a/server/server/src/main/java/org/apache/accumulo/server/data/ServerColumnUpdate.java b/server/server/src/main/java/org/apache/accumulo/server/data/ServerColumnUpdate.java
deleted file mode 100644
index af992a6..0000000
--- a/server/server/src/main/java/org/apache/accumulo/server/data/ServerColumnUpdate.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.data;
-
-import org.apache.accumulo.core.data.ColumnUpdate;
-
-public class ServerColumnUpdate extends ColumnUpdate {
-  
-  ServerMutation parent;
-
-  public ServerColumnUpdate(byte[] cf, byte[] cq, byte[] cv, boolean hasts, long ts, boolean deleted, byte[] val, ServerMutation serverMutation) {
-    super(cf, cq, cv, hasts, ts, deleted, val);
-    parent = serverMutation;
-  }
-
-  public long getTimestamp() {
-    if (hasTimestamp())
-      return super.getTimestamp();
-    return parent.getSystemTimestamp();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/88079cc3/server/server/src/main/java/org/apache/accumulo/server/data/ServerMutation.java
----------------------------------------------------------------------
diff --git a/server/server/src/main/java/org/apache/accumulo/server/data/ServerMutation.java b/server/server/src/main/java/org/apache/accumulo/server/data/ServerMutation.java
deleted file mode 100644
index 28a3515..0000000
--- a/server/server/src/main/java/org/apache/accumulo/server/data/ServerMutation.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.data;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.thrift.TMutation;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableUtils;
-
-/**
- * Mutation that holds system time as computed by the tablet server when not provided by the user.
- */
-public class ServerMutation extends Mutation {
-  private long systemTime = 0l;
-  
-  public ServerMutation(TMutation tmutation) {
-    super(tmutation);
-  }
-
-  public ServerMutation(Text key) {
-    super(key);
-  }
-
-  public ServerMutation() {
-  }
-
-  protected void droppingOldTimestamp(long ts) {
-    this.systemTime = ts;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    // new format writes system time with the mutation
-    if (getSerializedFormat() == SERIALIZED_FORMAT.VERSION2)
-      systemTime = WritableUtils.readVLong(in);
-  }
-  
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    WritableUtils.writeVLong(out, systemTime);
-  }
-
-  public void setSystemTimestamp(long v) {
-    this.systemTime = v;
-  }
-  
-  public long getSystemTimestamp() {
-    return this.systemTime;
-  }
-
-  @Override
-  protected ColumnUpdate newColumnUpdate(byte[] cf, byte[] cq, byte[] cv, boolean hasts, long ts, boolean deleted, byte[] val) {
-    return new ServerColumnUpdate(cf, cq, cv, hasts, ts, deleted, val, this);
-  }
-
-  @Override
-  public long estimatedMemoryUsed() {
-    return super.estimatedMemoryUsed() + 8;
-  }
-}


Mime
View raw message