accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ktur...@apache.org
Subject git commit: ACCUMULO-1773 made AGC use relative paths for decisions, made AGC algorithm testable, fixed some bugs w/ create and clone table
Date Tue, 29 Oct 2013 03:34:51 GMT
Updated Branches:
  refs/heads/master 7200e43a6 -> 26a8f5f52


ACCUMULO-1773 made AGC use relative paths for decisions, made AGC algorithm testable, fixed some bugs w/ create and clone table


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/26a8f5f5
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/26a8f5f5
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/26a8f5f5

Branch: refs/heads/master
Commit: 26a8f5f52a2729cc69611fbb216828ac656b0647
Parents: 7200e43
Author: Keith Turner <kturner@apache.org>
Authored: Mon Oct 28 23:29:33 2013 -0400
Committer: Keith Turner <kturner@apache.org>
Committed: Mon Oct 28 23:29:33 2013 -0400

----------------------------------------------------------------------
 .../server/gc/GarbageCollectionAlgorithm.java   | 271 +++++++
 .../server/gc/GarbageCollectionEnvironment.java |  52 ++
 .../server/gc/SimpleGarbageCollector.java       | 745 +++++++------------
 .../server/master/tableOps/CloneTable.java      |   2 +-
 .../server/master/tableOps/CreateTable.java     |  41 +-
 .../accumulo/server/util/MetadataTableUtil.java |   6 +-
 .../server/gc/GarbageCollectionTest.java        | 453 +++++++++++
 .../accumulo/server/gc/TestConfirmDeletes.java  | 139 ----
 8 files changed, 1100 insertions(+), 609 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/26a8f5f5/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionAlgorithm.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionAlgorithm.java b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionAlgorithm.java
new file mode 100644
index 0000000..04d4c5d
--- /dev/null
+++ b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionAlgorithm.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.gc;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.trace.instrument.Span;
+import org.apache.accumulo.trace.instrument.Trace;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
+
+/**
+ * 
+ */
+public class GarbageCollectionAlgorithm {
+
+  private static final Logger log = Logger.getLogger(GarbageCollectionAlgorithm.class);
+
+  private String makeRelative(String path, int expectedLen) {
+    String relPath = path;
+
+    if (relPath.startsWith("../"))
+      relPath = relPath.substring(3);
+
+    while (relPath.endsWith("/"))
+      relPath = relPath.substring(0, relPath.length() - 1);
+
+    while (relPath.startsWith("/"))
+      relPath = relPath.substring(1);
+
+    String[] tokens = relPath.split("/");
+
+    // handle paths like a//b///c
+    boolean containsEmpty = false;
+    for (String token : tokens) {
+      if (token.equals("")) {
+        containsEmpty = true;
+        break;
+      }
+    }
+
+    if (containsEmpty) {
+      ArrayList<String> tmp = new ArrayList<String>();
+      for (String token : tokens) {
+        if (!token.equals("")) {
+          tmp.add(token);
+        }
+      }
+
+      tokens = tmp.toArray(new String[tmp.size()]);
+    }
+
+    if (tokens.length > 3) {
+      if (!path.contains(":"))
+        throw new IllegalArgumentException(path);
+
+      if (tokens[tokens.length - 4].equals(ServerConstants.TABLE_DIR) && (expectedLen == 0 || expectedLen == 3)) {
+        relPath = tokens[tokens.length - 3] + "/" + tokens[tokens.length - 2] + "/" + tokens[tokens.length - 1];
+      } else if (tokens[tokens.length - 3].equals(ServerConstants.TABLE_DIR) && (expectedLen == 0 || expectedLen == 2)) {
+        relPath = tokens[tokens.length - 2] + "/" + tokens[tokens.length - 1];
+      } else {
+        throw new IllegalArgumentException(path);
+      }
+    } else if (tokens.length == 3 && (expectedLen == 0 || expectedLen == 3)) {
+      relPath = tokens[0] + "/" + tokens[1] + "/" + tokens[2];
+    } else if (tokens.length == 2 && (expectedLen == 0 || expectedLen == 2)) {
+      relPath = tokens[0] + "/" + tokens[1];
+    } else {
+      throw new IllegalArgumentException(path);
+    }
+
+    return relPath;
+  }
+
+  private SortedMap<String,String> makeRelative(Collection<String> candidates) {
+
+    SortedMap<String,String> ret = new TreeMap<String,String>();
+
+    for (String candidate : candidates) {
+      String relPath = makeRelative(candidate, 0);
+      ret.put(relPath, candidate);
+    }
+
+    return ret;
+  }
+
+  protected void confirmDeletes(GarbageCollectionEnvironment gce, SortedMap<String,String> candidateMap) throws TableNotFoundException, AccumuloException,
+      AccumuloSecurityException {
+    boolean checkForBulkProcessingFiles = false;
+    for (String candidate : candidateMap.keySet())
+      checkForBulkProcessingFiles |= candidate.toLowerCase(Locale.ENGLISH).contains(Constants.BULK_PREFIX);
+
+    if (checkForBulkProcessingFiles) {
+      Iterator<String> blipiter = gce.getBlipIterator();
+
+      // WARNING: This block is IMPORTANT
+      // You MUST REMOVE candidates that are in the same folder as a bulk
+      // processing flag!
+
+      while (blipiter.hasNext()) {
+        String blipPath = blipiter.next();
+        blipPath = makeRelative(blipPath, 2);
+
+        Iterator<String> tailIter = candidateMap.tailMap(blipPath).keySet().iterator();
+
+        int count = 0;
+
+        while (tailIter.hasNext()) {
+          if (tailIter.next().startsWith(blipPath)) {
+            count++;
+            tailIter.remove();
+          } else {
+            break;
+          }
+        }
+
+        if (count > 0)
+          log.debug("Folder has bulk processing flag: " + blipPath);
+      }
+
+    }
+
+    Iterator<Entry<Key,Value>> iter = gce.getReferenceIterator();
+    while (iter.hasNext()) {
+      Entry<Key,Value> entry = iter.next();
+      Key key = entry.getKey();
+      Text cft = key.getColumnFamily();
+
+      if (cft.equals(DataFileColumnFamily.NAME) || cft.equals(ScanFileColumnFamily.NAME)) {
+        String cq = key.getColumnQualifier().toString();
+
+        String reference = cq;
+        if (cq.startsWith("/")) {
+          String tableID = new String(KeyExtent.tableOfMetadataRow(key.getRow()));
+          reference = "/" + tableID + cq;
+        } else if (!cq.contains(":") && !cq.startsWith("../")) {
+          throw new RuntimeException("Bad file reference " + cq);
+        }
+
+        reference = makeRelative(reference, 3);
+
+        // WARNING: This line is EXTREMELY IMPORTANT.
+        // You MUST REMOVE candidates that are still in use
+        if (candidateMap.remove(reference) != null)
+          log.debug("Candidate was still in use: " + reference);
+
+        String dir = reference.substring(0, reference.lastIndexOf('/'));
+        if (candidateMap.remove(dir) != null)
+          log.debug("Candidate was still in use: " + reference);
+
+      } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
+        String tableID = new String(KeyExtent.tableOfMetadataRow(key.getRow()));
+        String dir = entry.getValue().toString();
+        if (!dir.contains(":")) {
+          if (!dir.startsWith("/"))
+            throw new RuntimeException("Bad directory " + dir);
+          dir = "/" + tableID + dir;
+        }
+
+        dir = makeRelative(dir, 2);
+
+        if (candidateMap.remove(dir) != null)
+          log.debug("Candidate was still in use: " + dir);
+      } else
+        throw new RuntimeException("Scanner over metadata table returned unexpected column : " + entry.getKey());
+    }
+  }
+
+  private void cleanUpDeletedTableDirs(GarbageCollectionEnvironment gce, SortedMap<String,String> candidateMap) throws IOException {
+    HashSet<String> tableIdsWithDeletes = new HashSet<String>();
+
+    // find the table ids that had dirs deleted
+    for (String delete : candidateMap.keySet()) {
+      String[] tokens = delete.split("/");
+      if (tokens.length == 2) {
+        // its a directory
+        String tableId = delete.split("/")[0];
+        tableIdsWithDeletes.add(tableId);
+      }
+    }
+
+    Set<String> tableIdsInZookeeper = gce.getTableIDs();
+
+    tableIdsWithDeletes.removeAll(tableIdsInZookeeper);
+
+    // tableIdsWithDeletes should now contain the set of deleted tables that had dirs deleted
+
+    for (String delTableId : tableIdsWithDeletes) {
+      gce.deleteTableDirIfEmpty(delTableId);
+    }
+
+  }
+
+  public void collect(GarbageCollectionEnvironment gce) throws TableNotFoundException, AccumuloException, AccumuloSecurityException, IOException {
+
+    String lastCandidate = "";
+
+    while (true) {
+      Span candidatesSpan = Trace.start("getCandidates");
+      List<String> candidates;
+      try {
+        candidates = gce.getCandidates(lastCandidate);
+      } finally {
+        candidatesSpan.stop();
+      }
+
+      if (candidates.size() == 0)
+        break;
+      else
+        lastCandidate = candidates.get(candidates.size() - 1);
+
+      long origSize = candidates.size();
+      gce.incrementCandidatesStat(origSize);
+
+      SortedMap<String,String> candidateMap = makeRelative(candidates);
+
+      Span confirmDeletesSpan = Trace.start("confirmDeletes");
+      try {
+        confirmDeletes(gce, candidateMap);
+      } finally {
+        confirmDeletesSpan.stop();
+      }
+      gce.incrementInUseStat(origSize - candidateMap.size());
+
+      Span deleteSpan = Trace.start("deleteFiles");
+      try {
+        gce.delete(candidateMap);
+      } finally {
+        deleteSpan.stop();
+      }
+
+      cleanUpDeletedTableDirs(gce, candidateMap);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/26a8f5f5/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionEnvironment.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionEnvironment.java b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionEnvironment.java
new file mode 100644
index 0000000..2b939dd
--- /dev/null
+++ b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionEnvironment.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.gc;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+
+/**
+ * 
+ */
+public interface GarbageCollectionEnvironment {
+
+  List<String> getCandidates(String continuePoint) throws TableNotFoundException, AccumuloException, AccumuloSecurityException;
+
+  Iterator<String> getBlipIterator() throws TableNotFoundException, AccumuloException, AccumuloSecurityException;
+
+  Iterator<Entry<Key,Value>> getReferenceIterator() throws TableNotFoundException, AccumuloException, AccumuloSecurityException;
+
+  Set<String> getTableIDs();
+
+  void delete(SortedMap<String,String> candidateMap) throws IOException;
+
+  void deleteTableDirIfEmpty(String tableID) throws IOException;
+
+  void incrementCandidatesStat(long i);
+
+  void incrementInUseStat(long i);
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/26a8f5f5/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java b/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
index 18829b2..b598ec1 100644
--- a/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
+++ b/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
@@ -20,17 +20,12 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Locale;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
+import java.util.SortedMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
@@ -49,11 +44,10 @@ import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.gc.thrift.GCMonitorService.Iface;
 import org.apache.accumulo.core.gc.thrift.GCMonitorService.Processor;
 import org.apache.accumulo.core.gc.thrift.GCStatus;
@@ -103,6 +97,8 @@ import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
 
 import com.beust.jcommander.Parameter;
+import com.google.common.base.Function;
+import com.google.common.collect.Iterators;
 import com.google.common.net.HostAndPort;
 
 public class SimpleGarbageCollector implements Iface {
@@ -113,25 +109,19 @@ public class SimpleGarbageCollector implements Iface {
     boolean verbose = false;
     @Parameter(names = {"-s", "--safemode"}, description = "safe mode will not delete files")
     boolean safeMode = false;
-    @Parameter(names = {"-o", "--offline"},
-        description = "offline mode will run once and check data files directly; this is dangerous if accumulo is running or not shut down properly")
-    boolean offline = false;
   }
   
   // how much of the JVM's available memory should it use gathering candidates
   private static final float CANDIDATE_MEMORY_PERCENTAGE = 0.75f;
-  private boolean candidateMemExceeded;
-  
+
   private static final Logger log = Logger.getLogger(SimpleGarbageCollector.class);
   
   private Credentials credentials;
   private long gcStartDelay;
-  private boolean checkForBulkProcessingFiles;
   private VolumeManager fs;
   private boolean useTrash = true;
   private Opts opts = new Opts();
   private ZooLock lock;
-  private Key continueKey = null;
   
   private GCStatus status = new GCStatus(new GcCycleStats(), new GcCycleStats(), new GcCycleStats(), new GcCycleStats());
   
@@ -166,36 +156,288 @@ public class SimpleGarbageCollector implements Iface {
     gcStartDelay = instance.getConfiguration().getTimeInMillis(Property.GC_CYCLE_START);
     long gcDelay = instance.getConfiguration().getTimeInMillis(Property.GC_CYCLE_DELAY);
     numDeleteThreads = instance.getConfiguration().getCount(Property.GC_DELETE_THREADS);
-    log.info("start delay: " + (opts.offline ? 0 + " sec (offline)" : gcStartDelay + " milliseconds"));
+    log.info("start delay: " + gcStartDelay + " milliseconds");
     log.info("time delay: " + gcDelay + " milliseconds");
     log.info("safemode: " + opts.safeMode);
-    log.info("offline: " + opts.offline);
     log.info("verbose: " + opts.verbose);
     log.info("memory threshold: " + CANDIDATE_MEMORY_PERCENTAGE + " of " + Runtime.getRuntime().maxMemory() + " bytes");
     log.info("delete threads: " + numDeleteThreads);
     useTrash = !noTrash;
   }
   
+  private class GCEnv implements GarbageCollectionEnvironment {
+
+    private String tableName;
+
+    GCEnv(String tableName) {
+      this.tableName = tableName;
+    }
+
+    @Override
+    public List<String> getCandidates(String continuePoint) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+      // want to ensure GC makes progress... if the 1st N deletes are stable and we keep processing them,
+      // then will never inspect deletes after N
+      Range range = MetadataSchema.DeletesSection.getRange();
+      if (continuePoint != null && !continuePoint.isEmpty()) {
+        String continueRow = MetadataSchema.DeletesSection.getRowPrefix() + continuePoint;
+        range = new Range(new Key(continueRow).followingKey(PartialKey.ROW), true, range.getEndKey(), range.isEndKeyInclusive());
+      }
+
+      Scanner scanner = instance.getConnector(credentials.getPrincipal(), credentials.getToken()).createScanner(tableName, Authorizations.EMPTY);
+      scanner.setRange(range);
+      List<String> result = new ArrayList<String>();
+      // find candidates for deletion; chop off the prefix
+      for (Entry<Key,Value> entry : scanner) {
+        String cand = entry.getKey().getRow().toString().substring(MetadataSchema.DeletesSection.getRowPrefix().length());
+        result.add(cand);
+        if (almostOutOfMemory()) {
+          log.info("List of delete candidates has exceeded the memory threshold. Attempting to delete what has been gathered so far.");
+          break;
+        }
+      }
+
+      return result;
+
+    }
+
+    @Override
+    public Iterator<String> getBlipIterator() throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+      IsolatedScanner scanner = new IsolatedScanner(instance.getConnector(credentials.getPrincipal(), credentials.getToken()).createScanner(tableName,
+          Authorizations.EMPTY));
+
+      scanner.setRange(MetadataSchema.BlipSection.getRange());
+
+      return Iterators.transform(scanner.iterator(), new Function<Entry<Key,Value>,String>() {
+        @Override
+        public String apply(Entry<Key,Value> entry) {
+          return entry.getKey().getRow().toString().substring(MetadataSchema.BlipSection.getRowPrefix().length());
+        }
+      });
+    }
+
+    @Override
+    public Iterator<Entry<Key,Value>> getReferenceIterator() throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+      IsolatedScanner scanner = new IsolatedScanner(instance.getConnector(credentials.getPrincipal(), credentials.getToken()).createScanner(tableName,
+          Authorizations.EMPTY));
+      scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+      scanner.fetchColumnFamily(ScanFileColumnFamily.NAME);
+      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+      TabletIterator tabletIterator = new TabletIterator(scanner, MetadataSchema.TabletsSection.getRange(), false, true);
+
+      return Iterators.concat(Iterators.transform(tabletIterator, new Function<Map<Key,Value>,Iterator<Entry<Key,Value>>>() {
+        @Override
+        public Iterator<Entry<Key,Value>> apply(Map<Key,Value> input) {
+          return input.entrySet().iterator();
+        }
+      }));
+    }
+
+    @Override
+    public Set<String> getTableIDs() {
+      return Tables.getIdToNameMap(instance).keySet();
+    }
+
+    @Override
+    public void delete(SortedMap<String,String> confirmedDeletes) throws IOException {
+
+      if (opts.safeMode) {
+        if (opts.verbose)
+          System.out.println("SAFEMODE: There are " + confirmedDeletes.size() + " data file candidates marked for deletion.%n"
+              + "          Examine the log files to identify them.%n" + "          They can be removed by executing: bin/accumulo gc --offline%n"
+              + "WARNING:  Do not run the garbage collector in offline mode unless you are positive%n"
+              + "          that the accumulo METADATA table is in a clean state, or that accumulo%n"
+              + "          has not yet been run, in the case of an upgrade.");
+        log.info("SAFEMODE: Listing all data file candidates for deletion");
+        for (String s : confirmedDeletes.values())
+          log.info("SAFEMODE: " + s);
+        log.info("SAFEMODE: End candidates for deletion");
+        return;
+      }
+
+      // create a batchwriter to remove the delete flags for successful
+      // deletes; Need separate writer for the root tablet.
+      BatchWriter writer = null;
+      Connector c;
+      try {
+        c = instance.getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
+        writer = c.createBatchWriter(tableName, new BatchWriterConfig());
+      } catch (AccumuloException e) {
+        log.error("Unable to connect to Accumulo to write deletes", e);
+      } catch (AccumuloSecurityException e) {
+        log.error("Unable to connect to Accumulo to write deletes", e);
+      } catch (TableNotFoundException e) {
+        log.error("Unable to create writer to remove file from the " + e.getTableName() + " table", e);
+      }
+
+      // when deleting a dir and all files in that dir, only need to delete the dir
+      // the dir will sort right before the files... so remove the files in this case
+      // to minimize namenode ops
+      Iterator<Entry<String,String>> cdIter = confirmedDeletes.entrySet().iterator();
+
+      String lastDir = null;
+      while (cdIter.hasNext()) {
+        Entry<String,String> entry = cdIter.next();
+        String relPath = entry.getKey();
+        String absPath = fs.getFullPath(FileType.TABLE, entry.getValue()).toString();
+
+        if (isDir(relPath)) {
+          lastDir = absPath;
+        } else if (lastDir != null) {
+          if (absPath.startsWith(lastDir)) {
+            log.debug("Ignoring " + entry.getValue() + " because " + lastDir + " exist");
+            try {
+              putMarkerDeleteMutation(entry.getValue(), writer);
+            } catch (MutationsRejectedException e) {
+              throw new RuntimeException(e);
+            }
+            cdIter.remove();
+          } else {
+            lastDir = null;
+          }
+        }
+      }
+
+      final BatchWriter finalWriter = writer;
+
+      ExecutorService deleteThreadPool = Executors.newFixedThreadPool(numDeleteThreads, new NamingThreadFactory("deleting"));
+
+      for (final String delete : confirmedDeletes.values()) {
+
+        Runnable deleteTask = new Runnable() {
+          @Override
+          public void run() {
+            boolean removeFlag;
+
+            try {
+              Path fullPath = fs.getFullPath(FileType.TABLE, delete);
+
+              log.debug("Deleting " + fullPath);
+
+              if (moveToTrash(fullPath) || fs.deleteRecursively(fullPath)) {
+                // delete succeeded, still want to delete
+                removeFlag = true;
+                synchronized (SimpleGarbageCollector.this) {
+                  ++status.current.deleted;
+                }
+              } else if (fs.exists(fullPath)) {
+                // leave the entry in the METADATA table; we'll try again
+                // later
+                removeFlag = false;
+                synchronized (SimpleGarbageCollector.this) {
+                  ++status.current.errors;
+                }
+                log.warn("File exists, but was not deleted for an unknown reason: " + fullPath);
+              } else {
+                // this failure, we still want to remove the METADATA table
+                // entry
+                removeFlag = true;
+                synchronized (SimpleGarbageCollector.this) {
+                  ++status.current.errors;
+                }
+                String parts[] = delete.split("/");
+                if (parts.length > 2) {
+                  String tableId = parts[parts.length - 3];
+                  String tabletDir = parts[parts.length - 2];
+                  TableManager.getInstance().updateTableStateCache(tableId);
+                  TableState tableState = TableManager.getInstance().getTableState(tableId);
+                  if (tableState != null && tableState != TableState.DELETING) {
+                    // clone directories don't always exist
+                    if (!tabletDir.startsWith("c-"))
+                      log.warn("File doesn't exist: " + fullPath);
+                  }
+                } else {
+                  log.warn("Very strange path name: " + delete);
+                }
+              }
+
+              // proceed to clearing out the flags for successful deletes and
+              // non-existent files
+              if (removeFlag && finalWriter != null) {
+                putMarkerDeleteMutation(delete, finalWriter);
+              }
+            } catch (Exception e) {
+              log.error(e, e);
+            }
+
+          }
+
+        };
+
+        deleteThreadPool.execute(deleteTask);
+      }
+
+      deleteThreadPool.shutdown();
+
+      try {
+        while (!deleteThreadPool.awaitTermination(1000, TimeUnit.MILLISECONDS)) {}
+      } catch (InterruptedException e1) {
+        log.error(e1, e1);
+      }
+
+      if (writer != null) {
+        try {
+          writer.close();
+        } catch (MutationsRejectedException e) {
+          log.error("Problem removing entries from the metadata table: ", e);
+        }
+      }
+    }
+
+    @Override
+    public void deleteTableDirIfEmpty(String tableID) throws IOException {
+      // if dir exist and is empty, then empty list is returned...
+      // hadoop 1.0 will return null if the file doesn't exist
+      // hadoop 2.0 will throw an exception if the file does not exist
+      for (String dir : ServerConstants.getTablesDirs()) {
+        FileStatus[] tabletDirs = null;
+        try {
+          tabletDirs = fs.listStatus(new Path(dir + "/" + tableID));
+        } catch (FileNotFoundException ex) {
+          // ignored
+        }
+        if (tabletDirs == null)
+          continue;
+
+        if (tabletDirs.length == 0) {
+          Path p = new Path(dir + "/" + tableID);
+          log.debug("Removing table dir " + p);
+          if (!moveToTrash(p))
+            fs.delete(p);
+        }
+      }
+    }
+
+    @Override
+    public void incrementCandidatesStat(long i) {
+      status.current.candidates += i;
+    }
+
+    @Override
+    public void incrementInUseStat(long i) {
+      status.current.inUse += i;
+    }
+
+  }
+
   private void run() {
     long tStart, tStop;
     
     // Sleep for an initial period, giving the master time to start up and
     // old data files to be unused
-    if (!opts.offline) {
-      try {
-        getZooLock(startStatsService());
-      } catch (Exception ex) {
-        log.error(ex, ex);
-        System.exit(1);
-      }
       
-      try {
-        log.debug("Sleeping for " + gcStartDelay + " milliseconds before beginning garbage collection cycles");
-        Thread.sleep(gcStartDelay);
-      } catch (InterruptedException e) {
-        log.warn(e, e);
-        return;
-      }
+    try {
+      getZooLock(startStatsService());
+    } catch (Exception ex) {
+      log.error(ex, ex);
+      System.exit(1);
+    }
+
+    try {
+      log.debug("Sleeping for " + gcStartDelay + " milliseconds before beginning garbage collection cycles");
+      Thread.sleep(gcStartDelay);
+    } catch (InterruptedException e) {
+      log.warn(e, e);
+      return;
     }
     
     Sampler sampler = new CountSampler(100);
@@ -207,61 +449,18 @@ public class SimpleGarbageCollector implements Iface {
       Span gcSpan = Trace.start("loop");
       tStart = System.currentTimeMillis();
       try {
-        // STEP 1: gather candidates
         System.gc(); // make room
-        candidateMemExceeded = false;
-        checkForBulkProcessingFiles = false;
-        
-        Span candidatesSpan = Trace.start("getCandidates");
+
         status.current.started = System.currentTimeMillis();
-        SortedSet<String> candidates;
-        try {
-          candidates = getCandidates();
-          status.current.candidates = candidates.size();
-        } finally {
-          candidatesSpan.stop();
-        }
-        
-        // STEP 2: confirm deletes
-        // WARNING: This line is EXTREMELY IMPORTANT.
-        // You MUST confirm candidates are okay to delete
-        Span confirmDeletesSpan = Trace.start("confirmDeletes");
-        try {
-          confirmDeletes(candidates);
-        status.current.inUse = status.current.candidates - candidates.size();
-        } finally {
-          confirmDeletesSpan.stop();
-        }
-        
-        // STEP 3: delete files
-        if (opts.safeMode) {
-          if (opts.verbose)
-            System.out.println("SAFEMODE: There are " + candidates.size() + " data file candidates marked for deletion.%n"
-                + "          Examine the log files to identify them.%n" + "          They can be removed by executing: bin/accumulo gc --offline%n"
-                + "WARNING:  Do not run the garbage collector in offline mode unless you are positive%n"
-                + "          that the accumulo METADATA table is in a clean state, or that accumulo%n"
-                + "          has not yet been run, in the case of an upgrade.");
-          log.info("SAFEMODE: Listing all data file candidates for deletion");
-          for (String s : candidates)
-            log.info("SAFEMODE: " + s);
-          log.info("SAFEMODE: End candidates for deletion");
-        } else {
-          Span deleteSpan = Trace.start("deleteFiles");
-          try {
-            deleteFiles(candidates);
-            log.info("Number of data file candidates for deletion: " + status.current.candidates);
-            log.info("Number of data file candidates still in use: " + status.current.inUse);
-            log.info("Number of successfully deleted data files: " + status.current.deleted);
-            log.info("Number of data files delete failures: " + status.current.errors);
-          } finally {
-            deleteSpan.stop();
-          }
-          
-          // delete empty dirs of deleted tables
-          // this can occur as a result of cloning
-          cleanUpDeletedTableDirs(candidates);
-        }
-        
+
+        new GarbageCollectionAlgorithm().collect(new GCEnv(RootTable.NAME));
+        new GarbageCollectionAlgorithm().collect(new GCEnv(MetadataTable.NAME));
+
+        log.info("Number of data file candidates for deletion: " + status.current.candidates);
+        log.info("Number of data file candidates still in use: " + status.current.inUse);
+        log.info("Number of successfully deleted data files: " + status.current.deleted);
+        log.info("Number of data files delete failures: " + status.current.errors);
+
         status.current.finished = System.currentTimeMillis();
         status.last = status.current;
         status.current = new GcCycleStats();
@@ -269,17 +468,10 @@ public class SimpleGarbageCollector implements Iface {
       } catch (Exception e) {
         log.error(e, e);
       }
+
       tStop = System.currentTimeMillis();
       log.info(String.format("Collect cycle took %.2f seconds", ((tStop - tStart) / 1000.0)));
       
-      if (opts.offline)
-        break;
-      
-      if (candidateMemExceeded) {
-        log.info("Gathering of candidates was interrupted due to memory shortage. Bypassing cycle delay to collect the remaining candidates.");
-        continue;
-      }
-      
       // Clean up any unused write-ahead logs
       Span waLogs = Trace.start("walogs");
       try {
@@ -324,51 +516,6 @@ public class SimpleGarbageCollector implements Iface {
     }
   }
   
-  /*
-   * this method removes deleted table dirs that are empty
-   */
-  private void cleanUpDeletedTableDirs(SortedSet<String> candidates) throws Exception {
-    
-    HashSet<String> tableIdsWithDeletes = new HashSet<String>();
-    
-    // find the table ids that had dirs deleted
-    for (String delete : candidates) {
-      if (isDir(delete)) {
-        String tableId = delete.split("/")[1];
-        tableIdsWithDeletes.add(tableId);
-      }
-    }
-    
-    Tables.clearCache(instance);
-    Set<String> tableIdsInZookeeper = Tables.getIdToNameMap(instance).keySet();
-    
-    tableIdsWithDeletes.removeAll(tableIdsInZookeeper);
-    
-    // tableIdsWithDeletes should now contain the set of deleted tables that had dirs deleted
-    
-    for (String delTableId : tableIdsWithDeletes) {
-      // if dir exist and is empty, then empty list is returned...
-      // hadoop 1.0 will return null if the file doesn't exist
-      // hadoop 2.0 will throw an exception if the file does not exist
-      for (String dir : ServerConstants.getTablesDirs()) {
-        FileStatus[] tabletDirs = null;
-        try {
-          tabletDirs = fs.listStatus(new Path(dir + "/" + delTableId));
-        } catch (FileNotFoundException ex) {
-          // ignored
-        }
-        if (tabletDirs == null)
-          continue;
-        
-        if (tabletDirs.length == 0) {
-          Path p = new Path(dir + "/" + delTableId);
-          if (!moveToTrash(p))
-            fs.delete(p);
-        }
-      }
-    }
-  }
-  
   private void getZooLock(HostAndPort addr) throws KeeperException, InterruptedException {
     String path = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZGC_LOCK;
     
@@ -414,347 +561,29 @@ public class SimpleGarbageCollector implements Iface {
     return result;
   }
   
-  /**
-   * This method gets a set of candidates for deletion by scanning the METADATA table deleted flag keyspace
-   */
-  SortedSet<String> getCandidates() throws Exception {
-    TreeSet<String> candidates = new TreeSet<String>();
-    
-    if (opts.offline) {
-      checkForBulkProcessingFiles = true;
-      try {
-        for (String validExtension : FileOperations.getValidExtensions()) {
-          for (String dir : ServerConstants.getTablesDirs()) {
-            for (FileStatus stat : fs.globStatus(new Path(dir + "/*/*/*." + validExtension))) {
-              String cand = stat.getPath().toUri().getPath();
-              if (cand.contains(ServerConstants.getRootTabletDir()))
-                continue;
-              candidates.add(cand.substring(dir.length()));
-              log.debug("Offline candidate: " + cand);
-            }
-          }
-        }
-      } catch (IOException e) {
-        log.error("Unable to check the filesystem for offline candidates. Removing all candidates for deletion to be safe.", e);
-        candidates.clear();
-      }
-      return candidates;
-    }
-    
-    checkForBulkProcessingFiles = false;
-    candidates.addAll(getBatch(RootTable.NAME));
-    if (candidateMemExceeded)
-      return candidates;
-    
-    candidates.addAll(getBatch(MetadataTable.NAME));
-    return candidates;
-  }
-  
-  /**
-   * Gets a batch of delete markers from the specified table
-   * 
-   * @param tableName
-   *          the name of the system table to scan (either {@link RootTable.NAME} or {@link MetadataTable.NAME})
-   */
-  private Collection<String> getBatch(String tableName) throws Exception {
-    // want to ensure GC makes progress... if the 1st N deletes are stable and we keep processing them,
-    // then will never inspect deletes after N
-    Range range = MetadataSchema.DeletesSection.getRange();
-    if (continueKey != null) {
-      if (!range.contains(continueKey)) {
-        // continue key is for some other range
-        return Collections.emptyList();
-      }
-      range = new Range(continueKey, true, range.getEndKey(), range.isEndKeyInclusive());
-      continueKey = null;
-    }
-    
-    Scanner scanner = instance.getConnector(credentials.getPrincipal(), credentials.getToken()).createScanner(tableName, Authorizations.EMPTY);
-    scanner.setRange(range);
-    List<String> result = new ArrayList<String>();
-    // find candidates for deletion; chop off the prefix
-    for (Entry<Key,Value> entry : scanner) {
-      String cand = entry.getKey().getRow().toString().substring(MetadataSchema.DeletesSection.getRowPrefix().length());
-      result.add(cand);
-      checkForBulkProcessingFiles |= cand.toLowerCase(Locale.ENGLISH).contains(Constants.BULK_PREFIX);
-      if (almostOutOfMemory()) {
-        candidateMemExceeded = true;
-        log.info("List of delete candidates has exceeded the memory threshold. Attempting to delete what has been gathered so far.");
-        continueKey = entry.getKey();
-        break;
-      }
-    }
-    
-    return result;
-  }
-  
+
   static public boolean almostOutOfMemory() {
     Runtime runtime = Runtime.getRuntime();
     return runtime.totalMemory() - runtime.freeMemory() > CANDIDATE_MEMORY_PERCENTAGE * runtime.maxMemory();
   }
   
-  /**
-   * This method removes candidates from the candidate list under two conditions: 1. They are in the same folder as a bulk processing file, if that option is
-   * selected 2. They are still in use in the file column family in the METADATA table
-   */
-  public void confirmDeletes(SortedSet<String> candidates) throws AccumuloException {
-    confirmDeletes(RootTable.NAME, candidates);
-    confirmDeletes(MetadataTable.NAME, candidates);
-  }
-  
-  private void confirmDeletes(String tableName, SortedSet<String> candidates) throws AccumuloException {
-    Scanner scanner;
-    if (opts.offline) {
-      // TODO
-      throw new RuntimeException("Offline scanner no longer supported");
-      // try {
-      // scanner = new OfflineMetadataScanner(instance.getConfiguration(), fs);
-      // } catch (IOException e) {
-      // throw new IllegalStateException("Unable to create offline metadata scanner", e);
-      // }
-    } else {
-      try {
-        scanner = new IsolatedScanner(instance.getConnector(credentials.getPrincipal(), credentials.getToken()).createScanner(tableName, Authorizations.EMPTY));
-      } catch (AccumuloSecurityException ex) {
-        throw new AccumuloException(ex);
-      } catch (TableNotFoundException ex) {
-        throw new AccumuloException(ex);
-      }
-    }
-    
-    // skip candidates that are in a bulk processing folder
-    if (checkForBulkProcessingFiles) {
-      
-      log.debug("Checking for bulk processing flags");
-      
-      scanner.setRange(MetadataSchema.BlipSection.getRange());
-      
-      // WARNING: This block is IMPORTANT
-      // You MUST REMOVE candidates that are in the same folder as a bulk
-      // processing flag!
-      
-      for (Entry<Key,Value> entry : scanner) {
-        String blipPath = entry.getKey().getRow().toString().substring(MetadataSchema.BlipSection.getRowPrefix().length());
-        Iterator<String> tailIter = candidates.tailSet(blipPath).iterator();
-        int count = 0;
-        while (tailIter.hasNext()) {
-          if (tailIter.next().startsWith(blipPath)) {
-            count++;
-            tailIter.remove();
-          } else {
-            break;
-          }
-        }
-        
-        if (count > 0)
-          log.debug("Folder has bulk processing flag: " + blipPath);
-        
-      }
-    }
-    
-    // skip candidates that are still in use in the file column family in
-    // the metadata table
-    scanner.clearColumns();
-    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-    scanner.fetchColumnFamily(ScanFileColumnFamily.NAME);
-    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
-    TabletIterator tabletIterator = new TabletIterator(scanner, MetadataSchema.TabletsSection.getRange(), false, true);
-    
-    while (tabletIterator.hasNext()) {
-      Map<Key,Value> tabletKeyValues = tabletIterator.next();
-      
-      for (Entry<Key,Value> entry : tabletKeyValues.entrySet()) {
-        if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME) || entry.getKey().getColumnFamily().equals(ScanFileColumnFamily.NAME)) {
-          
-          String cf = entry.getKey().getColumnQualifier().toString();
-          String delete = cf;
-          if (!cf.contains(":")) {
-            if (cf.startsWith("../")) {
-              delete = cf.substring(2);
-            } else {
-              String table = new String(KeyExtent.tableOfMetadataRow(entry.getKey().getRow()));
-              if (cf.startsWith("/"))
-                delete = "/" + table + cf;
-              else
-                delete = "/" + table + "/" + cf;
-            }
-          }
-          // WARNING: This line is EXTREMELY IMPORTANT.
-          // You MUST REMOVE candidates that are still in use
-          if (candidates.remove(delete))
-            log.debug("Candidate was still in use in the " + tableName + " table: " + delete);
-          
-          String path = delete.substring(0, delete.lastIndexOf('/'));
-          if (candidates.remove(path))
-            log.debug("Candidate was still in use in the " + tableName + " table: " + path);
-        } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
-          String table = new String(KeyExtent.tableOfMetadataRow(entry.getKey().getRow()));
-          String delete = "/" + table + entry.getValue().toString();
-          if (candidates.remove(delete))
-            log.debug("Candidate was still in use in the " + tableName + " table: " + delete);
-        } else
-          throw new AccumuloException("Scanner over metadata table returned unexpected column : " + entry.getKey());
-      }
-    }
-  }
   
   final static String METADATA_TABLE_DIR = "/" + MetadataTable.ID;
   
-  private static void putMarkerDeleteMutation(final String delete, final BatchWriter metadataWriter, final BatchWriter rootWriter)
+  private static void putMarkerDeleteMutation(final String delete, final BatchWriter writer)
       throws MutationsRejectedException {
-    BatchWriter writer = delete.contains(METADATA_TABLE_DIR) ? rootWriter : metadataWriter;
     Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + delete);
     m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
     writer.addMutation(m);
   }
   
-  /**
-   * This method attempts to do its best to remove files from the filesystem that have been confirmed for deletion.
-   */
-  private void deleteFiles(SortedSet<String> confirmedDeletes) {
-    // create a batchwriter to remove the delete flags for successful
-    // deletes; Need separate writer for the root tablet.
-    BatchWriter writer = null;
-    BatchWriter rootWriter = null;
-    if (!opts.offline) {
-      Connector c;
-      try {
-        c = instance.getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
-        writer = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-        rootWriter = c.createBatchWriter(RootTable.NAME, new BatchWriterConfig());
-      } catch (AccumuloException e) {
-        log.error("Unable to connect to Accumulo to write deletes", e);
-      } catch (AccumuloSecurityException e) {
-        log.error("Unable to connect to Accumulo to write deletes", e);
-      } catch (TableNotFoundException e) {
-        log.error("Unable to create writer to remove file from the " + e.getTableName() + " table", e);
-      }
-    }
-    // when deleting a dir and all files in that dir, only need to delete the dir
-    // the dir will sort right before the files... so remove the files in this case
-    // to minimize namenode ops
-    Iterator<String> cdIter = confirmedDeletes.iterator();
-    String lastDir = null;
-    while (cdIter.hasNext()) {
-      String delete = cdIter.next();
-      if (isDir(delete)) {
-        lastDir = delete;
-      } else if (lastDir != null) {
-        if (delete.startsWith(lastDir)) {
-          log.debug("Ignoring " + delete + " because " + lastDir + " exist");
-          try {
-            putMarkerDeleteMutation(delete, writer, rootWriter);
-          } catch (MutationsRejectedException e) {
-            throw new RuntimeException(e);
-          }
-          cdIter.remove();
-        } else {
-          lastDir = null;
-        }
-        
-      }
-    }
-    
-    final BatchWriter finalWriter = writer;
-    final BatchWriter finalRootWriter = rootWriter;
-    
-    ExecutorService deleteThreadPool = Executors.newFixedThreadPool(numDeleteThreads, new NamingThreadFactory("deleting"));
-    
-    for (final String delete : confirmedDeletes) {
-      
-      Runnable deleteTask = new Runnable() {
-        @Override
-        public void run() {
-          boolean removeFlag;
-          
-          try {
-            Path fullPath = fs.getFullPath(FileType.TABLE, delete);
-
-            log.debug("Deleting " + fullPath);
-            
-            if (moveToTrash(fullPath) || fs.deleteRecursively(fullPath)) {
-              // delete succeeded, still want to delete
-              removeFlag = true;
-              synchronized (SimpleGarbageCollector.this) {
-                ++status.current.deleted;
-              }
-            } else if (fs.exists(fullPath)) {
-              // leave the entry in the METADATA table; we'll try again
-              // later
-              removeFlag = false;
-              synchronized (SimpleGarbageCollector.this) {
-                ++status.current.errors;
-              }
-              log.warn("File exists, but was not deleted for an unknown reason: " + fullPath);
-            } else {
-              // this failure, we still want to remove the METADATA table
-              // entry
-              removeFlag = true;
-              synchronized (SimpleGarbageCollector.this) {
-                ++status.current.errors;
-              }
-              String parts[] = delete.split("/");
-              if (parts.length > 2) {
-                String tableId = parts[parts.length - 3];
-                String tabletDir = parts[parts.length - 2];
-                TableManager.getInstance().updateTableStateCache(tableId);
-                TableState tableState = TableManager.getInstance().getTableState(tableId);
-                if (tableState != null && tableState != TableState.DELETING) {
-                  // clone directories don't always exist
-                  if (!tabletDir.startsWith("c-"))
-                    log.warn("File doesn't exist: " + fullPath);
-                }
-              } else {
-                log.warn("Very strange path name: " + delete);
-              }
-            }
-            
-            // proceed to clearing out the flags for successful deletes and
-            // non-existent files
-            if (removeFlag && finalWriter != null) {
-              putMarkerDeleteMutation(delete, finalWriter, finalRootWriter);
-            }
-          } catch (Exception e) {
-            log.error(e, e);
-          }
-          
-        }
-        
-      };
-      
-      deleteThreadPool.execute(deleteTask);
-    }
-    
-    deleteThreadPool.shutdown();
-    
-    try {
-      while (!deleteThreadPool.awaitTermination(1000, TimeUnit.MILLISECONDS)) {}
-    } catch (InterruptedException e1) {
-      log.error(e1, e1);
-    }
-    
-    if (writer != null) {
-      try {
-        writer.close();
-      } catch (MutationsRejectedException e) {
-        log.error("Problem removing entries from the metadata table: ", e);
-      }
-    }
-    if (rootWriter != null) {
-      try {
-        rootWriter.close();
-      } catch (MutationsRejectedException e) {
-        log.error("Problem removing entries from the metadata table: ", e);
-      }
-    }
-  }
   
   private boolean isDir(String delete) {
     int slashCount = 0;
     for (int i = 0; i < delete.length(); i++)
       if (delete.charAt(i) == '/')
         slashCount++;
-    return slashCount == 2;
+    return slashCount == 1;
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/26a8f5f5/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
index 16d401c..c916d07 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
@@ -109,7 +109,7 @@ class CloneMetadata extends MasterRepo {
     // need to clear out any metadata entries for tableId just in case this
     // died before and is executing again
     MetadataTableUtil.deleteTable(cloneInfo.tableId, false, SystemCredentials.get(), environment.getMasterLock());
-    MetadataTableUtil.cloneTable(instance, cloneInfo.srcTableId, cloneInfo.tableId);
+    MetadataTableUtil.cloneTable(instance, cloneInfo.srcTableId, cloneInfo.tableId, environment.getFileSystem());
     return new FinishCloneTable(cloneInfo);
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/26a8f5f5/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
index 4ea43fa..23b3760 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
@@ -41,7 +41,6 @@ import org.apache.accumulo.server.security.SystemCredentials;
 import org.apache.accumulo.server.tabletserver.TabletTime;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.util.TablePropUtil;
-import org.apache.accumulo.server.util.TabletOperations;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
@@ -56,6 +55,8 @@ class TableInfo implements Serializable {
   String user;
   
   public Map<String,String> props;
+
+  public String dir = null;
 }
 
 class FinishCreateTable extends MasterRepo {
@@ -113,10 +114,8 @@ class PopulateMetadata extends MasterRepo {
   
   @Override
   public Repo<Master> call(long tid, Master environment) throws Exception {
-    
     KeyExtent extent = new KeyExtent(new Text(tableInfo.tableId), null, null);
-    String tdir = environment.getFileSystem().choose(ServerConstants.getTablesDirs()) + "/" + tableInfo.tableId + "" + Constants.DEFAULT_TABLET_LOCATION;
-    MetadataTableUtil.addTablet(extent, tdir, SystemCredentials.get(), tableInfo.timeType, environment.getMasterLock());
+    MetadataTableUtil.addTablet(extent, tableInfo.dir, SystemCredentials.get(), tableInfo.timeType, environment.getMasterLock());
     
     return new FinishCreateTable(tableInfo);
     
@@ -146,16 +145,40 @@ class CreateDir extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
     VolumeManager fs = master.getFileSystem();
-    TabletOperations.createTabletDirectory(fs, tableInfo.tableId, null);
+    fs.mkdirs(new Path(tableInfo.dir));
     return new PopulateMetadata(tableInfo);
   }
   
   @Override
   public void undo(long tid, Master master) throws Exception {
     VolumeManager fs = master.getFileSystem();
-    for (String dir : ServerConstants.getTablesDirs()) {
-      fs.deleteRecursively(new Path(dir + "/" + tableInfo.tableId));
-    }
+    fs.deleteRecursively(new Path(tableInfo.dir));
+
+  }
+}
+
+class ChooseDir extends MasterRepo {
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  ChooseDir(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    tableInfo.dir = master.getFileSystem().choose(ServerConstants.getTablesDirs()) + "/" + tableInfo.tableId + "" + Constants.DEFAULT_TABLET_LOCATION;
+    return new CreateDir(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master master) throws Exception {
     
   }
 }
@@ -192,7 +215,7 @@ class PopulateZookeeper extends MasterRepo {
         TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue());
       
       Tables.clearCache(instance);
-      return new CreateDir(tableInfo);
+      return new ChooseDir(tableInfo);
     } finally {
       Utils.tableNameLock.unlock();
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/26a8f5f5/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index b52ec9a..43b8e32 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -1131,7 +1131,7 @@ public class MetadataTableUtil {
     return rewrites;
   }
   
-  public static void cloneTable(Instance instance, String srcTableId, String tableId) throws Exception {
+  public static void cloneTable(Instance instance, String srcTableId, String tableId, VolumeManager volumeManager) throws Exception {
     
     Connector conn = instance.getConnector(SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
     BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
@@ -1177,7 +1177,9 @@ public class MetadataTableUtil {
       Key k = entry.getKey();
       Mutation m = new Mutation(k.getRow());
       m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
-      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
+      String dir = volumeManager.choose(ServerConstants.getTablesDirs()) + "/" + tableId
+          + new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes()));
+      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dir.getBytes()));
       bw.addMutation(m);
     }
     

http://git-wip-us.apache.org/repos/asf/accumulo/blob/26a8f5f5/server/src/test/java/org/apache/accumulo/server/gc/GarbageCollectionTest.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/gc/GarbageCollectionTest.java b/server/src/test/java/org/apache/accumulo/server/gc/GarbageCollectionTest.java
new file mode 100644
index 0000000..2f3f16e
--- /dev/null
+++ b/server/src/test/java/org/apache/accumulo/server/gc/GarbageCollectionTest.java
@@ -0,0 +1,453 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.gc;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * 
+ */
+public class GarbageCollectionTest {
+  static class TestGCE implements GarbageCollectionEnvironment {
+    TreeSet<String> candidates = new TreeSet<String>();
+    ArrayList<String> blips = new ArrayList<String>();
+    Map<Key,Value> references = new TreeMap<Key,Value>();
+    HashSet<String> tableIds = new HashSet<String>();
+
+    ArrayList<String> deletes = new ArrayList<String>();
+    ArrayList<String> tablesDirsToDelete = new ArrayList<String>();
+
+    @Override
+    public List<String> getCandidates(String continuePoint) {
+      Iterator<String> iter = candidates.tailSet(continuePoint, false).iterator();
+      ArrayList<String> ret = new ArrayList<String>();
+      while (iter.hasNext() && ret.size() < 3) {
+        ret.add(iter.next());
+      }
+
+      return ret;
+    }
+
+    @Override
+    public Iterator<String> getBlipIterator() {
+      return blips.iterator();
+    }
+
+    @Override
+    public Iterator<Entry<Key,Value>> getReferenceIterator() {
+      return references.entrySet().iterator();
+    }
+
+    @Override
+    public Set<String> getTableIDs() {
+      return tableIds;
+    }
+
+    @Override
+    public void delete(SortedMap<String,String> candidateMap) {
+      deletes.addAll(candidateMap.values());
+      this.candidates.removeAll(candidateMap.values());
+    }
+
+    @Override
+    public void deleteTableDirIfEmpty(String tableID) {
+      tablesDirsToDelete.add(tableID);
+    }
+
+    public Key newFileReferenceKey(String tableId, String endRow, String file) {
+      String row = new KeyExtent(new Text(tableId), endRow == null ? null : new Text(endRow), null).getMetadataEntry().toString();
+      String cf = MetadataSchema.TabletsSection.DataFileColumnFamily.NAME.toString();
+      String cq = file;
+      Key key = new Key(row, cf, cq);
+      return key;
+    }
+
+    public Value addFileReference(String tableId, String endRow, String file) {
+      Key key = newFileReferenceKey(tableId, endRow, file);
+      Value val = new Value(new DataFileValue(0, 0).encode());
+      return references.put(key, val);
+    }
+
+
+
+    public Value removeFileReference(String tableId, String endRow, String file) {
+      return references.remove(newFileReferenceKey(tableId, endRow, file));
+    }
+
+    Key newDirReferenceKey(String tableId, String endRow) {
+      String row = new KeyExtent(new Text(tableId), endRow == null ? null : new Text(endRow), null).getMetadataEntry().toString();
+      String cf = MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily().toString();
+      String cq = MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier().toString();
+      Key key = new Key(row, cf, cq);
+      return key;
+    }
+
+    public Value addDirReference(String tableId, String endRow, String dir) {
+      Key key = newDirReferenceKey(tableId, endRow);
+      Value val = new Value(dir.getBytes());
+      return references.put(key, val);
+    }
+
+
+
+    public Value removeDirReference(String tableId, String endRow) {
+      return references.remove(newDirReferenceKey(tableId, endRow));
+    }
+
+    @Override
+    public void incrementCandidatesStat(long i) {}
+
+    @Override
+    public void incrementInUseStat(long i) {}
+  }
+
+  private void assertRemoved(TestGCE gce, String... refs) {
+    for (String ref : refs) {
+      Assert.assertTrue(gce.deletes.remove(ref));
+    }
+
+    Assert.assertEquals(0, gce.deletes.size());
+  }
+
+  @Test
+  public void testBasic() throws Exception {
+    TestGCE gce = new TestGCE();
+
+    gce.candidates.add("hdfs://foo:6000/accumulo/tables/4/t0/F000.rf");
+    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/4/t0/F001.rf");
+    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/5/t0/F005.rf");
+
+    gce.addFileReference("4", null, "hdfs://foo.com:6000/accumulo/tables/4/t0/F000.rf");
+    gce.addFileReference("4", null, "hdfs://foo:6000/accumulo/tables/4/t0/F001.rf");
+    gce.addFileReference("4", null, "hdfs://foo.com:6000/accumulo/tables/4/t0//F002.rf");
+    gce.addFileReference("5", null, "hdfs://foo.com:6000/accumulo/tables/5/t0/F005.rf");
+
+    GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
+
+    gca.collect(gce);
+    assertRemoved(gce);
+
+    gce.removeFileReference("4", null, "hdfs://foo.com:6000/accumulo/tables/4/t0/F000.rf");
+    gca.collect(gce);
+    assertRemoved(gce, "hdfs://foo:6000/accumulo/tables/4/t0/F000.rf");
+
+    gce.removeFileReference("4", null, "hdfs://foo.com:6000/accumulo/tables/4/t0/F002.rf");
+    gca.collect(gce);
+    assertRemoved(gce);
+
+    gce.removeFileReference("4", null, "hdfs://foo:6000/accumulo/tables/4/t0/F001.rf");
+    gca.collect(gce);
+    assertRemoved(gce, "hdfs://foo.com:6000/accumulo/tables/4/t0/F001.rf");
+
+    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/4/t0/F003.rf");
+    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/4/t0/F004.rf");
+    gca.collect(gce);
+    assertRemoved(gce, "hdfs://foo.com:6000/accumulo/tables/4/t0/F003.rf", "hdfs://foo.com:6000/accumulo/tables/4/t0/F004.rf");
+
+  }
+
+  @Test
+  public void testRelative() throws Exception {
+    TestGCE gce = new TestGCE();
+
+    gce.candidates.add("/4/t0/F000.rf");
+    gce.candidates.add("/4/t0/F002.rf");
+    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/4/t0/F001.rf");
+
+    gce.addFileReference("4", null, "/t0/F000.rf");
+    gce.addFileReference("4", null, "/t0/F001.rf");
+    gce.addFileReference("4", null, "/t0/F002.rf");
+    gce.addFileReference("5", null, "../4/t0/F000.rf");
+    gce.addFileReference("6", null, "hdfs://foo.com:6000/accumulo/tables/4/t0/F000.rf");
+
+    GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
+
+    gca.collect(gce);
+    assertRemoved(gce);
+
+    List<String[]> refsToRemove = new ArrayList<String[]>();
+    refsToRemove.add(new String[] {"4", "/t0/F000.rf"});
+    refsToRemove.add(new String[] {"5", "../4/t0/F000.rf"});
+    refsToRemove.add(new String[] {"6", "hdfs://foo.com:6000/accumulo/tables/4/t0/F000.rf"});
+
+    Collections.shuffle(refsToRemove);
+
+    for (int i = 0; i < 2; i++) {
+      gce.removeFileReference(refsToRemove.get(i)[0], null, refsToRemove.get(i)[1]);
+      gca.collect(gce);
+      assertRemoved(gce);
+    }
+
+    gce.removeFileReference(refsToRemove.get(2)[0], null, refsToRemove.get(2)[1]);
+    gca.collect(gce);
+    assertRemoved(gce, "/4/t0/F000.rf");
+
+    gce.removeFileReference("4", null, "/t0/F001.rf");
+    gca.collect(gce);
+    assertRemoved(gce, "hdfs://foo.com:6000/accumulo/tables/4/t0/F001.rf");
+
+    // add absolute candidate for file that already has a relative candidate
+    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/4/t0/F002.rf");
+    gca.collect(gce);
+    assertRemoved(gce);
+
+    gce.removeFileReference("4", null, "/t0/F002.rf");
+    gca.collect(gce);
+    assertRemoved(gce, "hdfs://foo.com:6000/accumulo/tables/4/t0/F002.rf");
+
+    gca.collect(gce);
+    assertRemoved(gce, "/4/t0/F002.rf");
+
+  }
+
+  @Test
+  public void testBlip() throws Exception {
+    TestGCE gce = new TestGCE();
+
+    gce.candidates.add("/4/b-0");
+    gce.candidates.add("/4/b-0/F002.rf");
+    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/4/b-0/F001.rf");
+    gce.candidates.add("/5/b-0");
+    gce.candidates.add("/5/b-0/F002.rf");
+    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/5/b-0/F001.rf");
+
+    gce.blips.add("/4/b-0");
+    gce.blips.add("hdfs://foo.com:6000/accumulo/tables/5/b-0");
+
+    GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
+
+    gca.collect(gce);
+    assertRemoved(gce);
+
+    gce.blips.remove("/4/b-0");
+
+    gca.collect(gce);
+    assertRemoved(gce, "/4/b-0", "/4/b-0/F002.rf", "hdfs://foo.com:6000/accumulo/tables/4/b-0/F001.rf");
+
+    gce.blips.remove("hdfs://foo.com:6000/accumulo/tables/5/b-0");
+
+    gca.collect(gce);
+    assertRemoved(gce, "/5/b-0", "/5/b-0/F002.rf", "hdfs://foo.com:6000/accumulo/tables/5/b-0/F001.rf");
+
+    gca.collect(gce);
+    assertRemoved(gce);
+  }
+
+  @Test
+  public void testDirectories() throws Exception {
+    TestGCE gce = new TestGCE();
+
+    gce.candidates.add("/4/t-0");
+    gce.candidates.add("/4/t-0/F002.rf");
+    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/5/t-0");
+    gce.candidates.add("/6/t-0");
+    gce.candidates.add("hdfs://foo:6000/accumulo/tables/7/t-0/");
+    gce.candidates.add("/8/t-0");
+    gce.candidates.add("hdfs://foo:6000/accumulo/tables/9/t-0");
+    gce.candidates.add("/a/t-0");
+    gce.candidates.add("hdfs://foo:6000/accumulo/tables/b/t-0");
+    gce.candidates.add("/c/t-0");
+    gce.candidates.add("hdfs://foo:6000/accumulo/tables/d/t-0");
+
+    gce.addDirReference("4", null, "/t-0");
+    gce.addDirReference("5", null, "/t-0");
+    gce.addDirReference("6", null, "hdfs://foo.com:6000/accumulo/tables/6/t-0");
+    gce.addDirReference("7", null, "hdfs://foo.com:6000/accumulo/tables/7/t-0");
+
+    gce.addFileReference("8", "m", "/t-0/F00.rf");
+    gce.addFileReference("9", "m", "/t-0/F00.rf");
+
+    gce.addFileReference("a", "m", "hdfs://foo.com:6000/accumulo/tables/a/t-0/F00.rf");
+    gce.addFileReference("b", "m", "hdfs://foo.com:6000/accumulo/tables/b/t-0/F00.rf");
+
+    gce.addFileReference("e", "m", "../c/t-0/F00.rf");
+    gce.addFileReference("f", "m", "../d/t-0/F00.rf");
+
+    GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
+
+    gca.collect(gce);
+    assertRemoved(gce, "/4/t-0/F002.rf");
+
+    gce.removeDirReference("5", null);
+    gca.collect(gce);
+    assertRemoved(gce, "hdfs://foo.com:6000/accumulo/tables/5/t-0");
+
+    gce.removeDirReference("4", null);
+    gca.collect(gce);
+    assertRemoved(gce, "/4/t-0");
+
+    gce.removeDirReference("6", null);
+    gce.removeDirReference("7", null);
+    gca.collect(gce);
+    assertRemoved(gce, "/6/t-0", "hdfs://foo:6000/accumulo/tables/7/t-0/");
+
+    gce.removeFileReference("8", "m", "/t-0/F00.rf");
+    gce.removeFileReference("9", "m", "/t-0/F00.rf");
+    gce.removeFileReference("a", "m", "hdfs://foo.com:6000/accumulo/tables/a/t-0/F00.rf");
+    gce.removeFileReference("b", "m", "hdfs://foo.com:6000/accumulo/tables/b/t-0/F00.rf");
+    gce.removeFileReference("e", "m", "../c/t-0/F00.rf");
+    gce.removeFileReference("f", "m", "../d/t-0/F00.rf");
+    gca.collect(gce);
+    assertRemoved(gce, "/8/t-0", "hdfs://foo:6000/accumulo/tables/9/t-0", "/a/t-0", "hdfs://foo:6000/accumulo/tables/b/t-0", "/c/t-0",
+        "hdfs://foo:6000/accumulo/tables/d/t-0");
+
+    gca.collect(gce);
+    assertRemoved(gce);
+  }
+
+  private void badRefTest(String ref) {
+    TestGCE gce = new TestGCE();
+
+    gce.candidates.add("/4/t-0/F002.rf");
+
+    gce.addFileReference("4", "m", ref);
+
+    GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
+    try {
+      gca.collect(gce);
+    } catch (RuntimeException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testBadFileRef1() {
+    badRefTest("/F00.rf");
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testBadFileRef2() {
+    badRefTest("../F00.rf");
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testBadFileRef3() {
+    badRefTest("hdfs://foo.com:6000/accumulo/F00.rf");
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testBadFileRef4() {
+    badRefTest("hdfs://foo.com:6000/accumulo/tbls/5/F00.rf");
+  }
+
+  @Test(expected = RuntimeException.class)
+  public void testBadFileRef5() {
+    badRefTest("F00.rf");
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testBadFileRef6() {
+    badRefTest("/accumulo/tbls/5/F00.rf");
+  }
+
+  @Test
+  public void test() throws Exception {
+
+    GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
+
+    TestGCE gce = new TestGCE();
+    gce.candidates.add("/1636/default_tablet");
+    gce.addDirReference("1636", null, "/default_tablet");
+    gca.collect(gce);
+    assertRemoved(gce);
+
+    gce.candidates.clear();
+    gce.candidates.add("/1636/default_tablet/someFile");
+    gca.collect(gce);
+    assertRemoved(gce, "/1636/default_tablet/someFile");
+
+    gce.addFileReference("1636", null, "/default_tablet/someFile");
+    gce.candidates.add("/1636/default_tablet/someFile");
+    gca.collect(gce);
+    assertRemoved(gce);
+
+    // have an indirect file reference
+    gce = new TestGCE();
+    gce.addFileReference("1636", null, "../9/default_tablet/someFile");
+    gce.addDirReference("1636", null, "/default_tablet");
+    gce.candidates.add("/9/default_tablet/someFile");
+    gca.collect(gce);
+    assertRemoved(gce);
+
+    // have an indirect file reference and a directory candidate
+    gce.candidates.clear();
+    gce.candidates.add("/9/default_tablet");
+    gca.collect(gce);
+    assertRemoved(gce);
+
+    gce.candidates.clear();
+    gce.candidates.add("/9/default_tablet");
+    gce.candidates.add("/9/default_tablet/someFile");
+    gca.collect(gce);
+    assertRemoved(gce);
+
+    gce = new TestGCE();
+    gce.blips.add("/1636/b-0001");
+    gce.candidates.add("/1636/b-0001/I0000");
+    gca.collect(gce);
+    assertRemoved(gce);
+
+  }
+
+  @Test
+  public void testDeleteTableDir() throws Exception {
+    GarbageCollectionAlgorithm gca = new GarbageCollectionAlgorithm();
+
+    TestGCE gce = new TestGCE();
+
+    gce.tableIds.add("4");
+
+    gce.candidates.add("/4/t-0");
+    gce.candidates.add("/4/t-0/F002.rf");
+    gce.candidates.add("hdfs://foo.com:6000/accumulo/tables/5/t-0");
+    gce.candidates.add("/6/t-0");
+    gce.candidates.add("hdfs://foo:6000/accumulo/tables/7/t-0/");
+
+    gce.addDirReference("7", null, "hdfs://foo.com:6000/accumulo/tables/7/t-0");
+
+    gca.collect(gce);
+
+    HashSet<String> tids = new HashSet<String>();
+    tids.add("5");
+    tids.add("6");
+
+    Assert.assertEquals(tids.size(), gce.tablesDirsToDelete.size());
+    Assert.assertTrue(tids.containsAll(gce.tablesDirsToDelete));
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/26a8f5f5/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java b/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
deleted file mode 100644
index 36938d7..0000000
--- a/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.gc;
-
-import java.util.Arrays;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.mock.MockInstance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.Credentials;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.gc.SimpleGarbageCollector.Opts;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * 
- */
-public class TestConfirmDeletes {
-  
-  SortedSet<String> newSet(String... s) {
-    SortedSet<String> result = new TreeSet<String>(Arrays.asList(s));
-    return result;
-  }
-  
-  @Test
-  public void test() throws Exception {
-    
-    // have a directory reference
-    String metadata[] = {"1636< last:3353986642a66eb 192.168.117.9:9997", "1636< srv:dir /default_tablet", "1636< srv:flush 2",
-        "1636< srv:lock tservers/192.168.117.9:9997/zlock-0000000000$3353986642a66eb", "1636< srv:time M1328505870023", "1636< ~tab:~pr \0"};
-    String deletes[] = {"~del/1636/default_tablet"};
-    
-    test1(metadata, deletes, 1, 0);
-    
-    // have no file reference
-    deletes = new String[] {"~del/1636/default_tablet/someFile"};
-    test1(metadata, deletes, 1, 1);
-    
-    // have a file reference
-    metadata = new String[] {"1636< file:/default_tablet/someFile 10,100", "1636< last:3353986642a66eb 192.168.117.9:9997", "1636< srv:dir /default_tablet",
-        "1636< srv:flush 2", "1636< srv:lock tservers/192.168.117.9:9997/zlock-0000000000$3353986642a66eb", "1636< srv:time M1328505870023",
-        "1636< ~tab:~pr \0"};
-    test1(metadata, deletes, 1, 0);
-    
-    // have an indirect file reference
-    deletes = new String[] {"~del/9/default_tablet/someFile"};
-    metadata = new String[] {"1636< file:../9/default_tablet/someFile 10,100", "1636< last:3353986642a66eb 192.168.117.9:9997",
-        "1636< srv:dir /default_tablet", "1636< srv:flush 2", "1636< srv:lock tservers/192.168.117.9:9997/zlock-0000000000$3353986642a66eb",
-        "1636< srv:time M1328505870023", "1636< ~tab:~pr \0"};
-    
-    test1(metadata, deletes, 1, 0);
-    
-    // have an indirect file reference and a directory candidate
-    deletes = new String[] {"~del/9/default_tablet"};
-    test1(metadata, deletes, 1, 0);
-    
-    deletes = new String[] {"~del/9/default_tablet", "~del/9/default_tablet/someFile"};
-    test1(metadata, deletes, 2, 0);
-    
-    deletes = new String[] {"~blip/1636/b-0001", "~del/1636/b-0001/I0000"};
-    test1(metadata, deletes, 1, 0);
-  }
-  
-  private void test1(String[] metadata, String[] deletes, int expectedInitial, int expected) throws Exception {
-    Credentials credentials = new Credentials("root", new PasswordToken(new byte[0]));
-    
-    Instance instance = new MockInstance();
-    VolumeManager fs = VolumeManagerImpl.getLocal();
-    
-    load(instance, metadata, deletes);
-    
-    SimpleGarbageCollector gc = new SimpleGarbageCollector(new Opts());
-    gc.init(fs, instance, credentials, false);
-    SortedSet<String> candidates = gc.getCandidates();
-    Assert.assertEquals(expectedInitial, candidates.size());
-    gc.confirmDeletes(candidates);
-    Assert.assertEquals(expected, candidates.size());
-  }
-  
-  private void load(Instance instance, String[] metadata, String[] deletes) throws Exception {
-    Credentials credentials = new Credentials("root", new PasswordToken(new byte[0]));
-    
-    Scanner scanner = instance.getConnector(credentials.getPrincipal(), credentials.getToken()).createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    int count = 0;
-    for (@SuppressWarnings("unused")
-    Entry<Key,Value> entry : scanner) {
-      count++;
-    }
-    
-    // ensure there is no data from previous test
-    Assert.assertEquals(0, count);
-    
-    Connector conn = instance.getConnector(credentials.getPrincipal(), credentials.getToken());
-    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-    for (String line : metadata) {
-      String[] parts = line.split(" ");
-      String[] columnParts = parts[1].split(":");
-      Mutation m = new Mutation(parts[0]);
-      m.put(new Text(columnParts[0]), new Text(columnParts[1]), new Value(parts[2].getBytes()));
-      bw.addMutation(m);
-    }
-    
-    for (String line : deletes) {
-      Mutation m = new Mutation(line);
-      m.put("", "", "");
-      bw.addMutation(m);
-    }
-    bw.close();
-  }
-}


Mime
View raw message