accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [19/54] [partial] ACCUMULO-658, ACCUMULO-656 Split server into separate modules
Date Fri, 01 Nov 2013 00:55:58 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
deleted file mode 100644
index ce5e5e4..0000000
--- a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.constraints;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.constraints.Constraint;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ClonedColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
-import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
-import org.apache.accumulo.server.zookeeper.ZooCache;
-import org.apache.accumulo.server.zookeeper.ZooLock;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-
-public class MetadataConstraints implements Constraint {
-  
-  private ZooCache zooCache = null;
-  private String zooRoot = null;
-  
-  private static final Logger log = Logger.getLogger(MetadataConstraints.class);
-  
-  private static boolean[] validTableNameChars = new boolean[256];
-  
-  {
-    for (int i = 0; i < 256; i++) {
-      validTableNameChars[i] = ((i >= 'a' && i <= 'z') || (i >= '0' && i <= '9')) || i == '!';
-    }
-  }
-  
-  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(new ColumnFQ[] {
-      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN,
-      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN,
-      TabletsSection.ServerColumnFamily.LOCK_COLUMN, TabletsSection.ServerColumnFamily.FLUSH_COLUMN, TabletsSection.ServerColumnFamily.COMPACT_COLUMN}));
-  
-  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(new Text[] {TabletsSection.BulkFileColumnFamily.NAME,
-      LogColumnFamily.NAME, ScanFileColumnFamily.NAME, DataFileColumnFamily.NAME,
-      TabletsSection.CurrentLocationColumnFamily.NAME, TabletsSection.LastLocationColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME,
-      ChoppedColumnFamily.NAME, ClonedColumnFamily.NAME}));
-  
-  private static boolean isValidColumn(ColumnUpdate cu) {
-    
-    if (validColumnFams.contains(new Text(cu.getColumnFamily())))
-      return true;
-    
-    if (validColumnQuals.contains(new ColumnFQ(cu)))
-      return true;
-    
-    return false;
-  }
-  
-  static private ArrayList<Short> addViolation(ArrayList<Short> lst, int violation) {
-    if (lst == null)
-      lst = new ArrayList<Short>();
-    lst.add((short) violation);
-    return lst;
-  }
-  
-  static private ArrayList<Short> addIfNotPresent(ArrayList<Short> lst, int intViolation) {
-    if (lst == null)
-      return addViolation(lst, intViolation);
-    short violation = (short) intViolation;
-    if (!lst.contains(violation))
-      return addViolation(lst, intViolation);
-    return lst;
-  }
-  
-  @Override
-  public List<Short> check(Environment env, Mutation mutation) {
-    
-    ArrayList<Short> violations = null;
-    
-    Collection<ColumnUpdate> colUpdates = mutation.getUpdates();
-    
-    // check the row, it should contains at least one ; or end with <
-    boolean containsSemiC = false;
-    
-    byte[] row = mutation.getRow();
-    
-    // always allow rows that fall within reserved areas
-    if (row.length > 0 && row[0] == '~')
-      return null;
-    if (row.length > 2 && row[0] == '!' && row[1] == '!' && row[2] == '~')
-      return null;
-    
-    for (byte b : row) {
-      if (b == ';') {
-        containsSemiC = true;
-      }
-      
-      if (b == ';' || b == '<')
-        break;
-      
-      if (!validTableNameChars[0xff & b]) {
-        violations = addIfNotPresent(violations, 4);
-      }
-    }
-    
-    if (!containsSemiC) {
-      // see if last row char is <
-      if (row.length == 0 || row[row.length - 1] != '<') {
-        violations = addIfNotPresent(violations, 4);
-      }
-    } else {
-      if (row.length == 0) {
-        violations = addIfNotPresent(violations, 4);
-      }
-    }
-    
-    if (row.length > 0 && row[0] == '!') {
-      if (row.length < 3 || row[1] != '0' || (row[2] != '<' && row[2] != ';')) {
-        violations = addIfNotPresent(violations, 4);
-      }
-    }
-    
-    // ensure row is not less than Constants.METADATA_TABLE_ID
-    if (new Text(row).compareTo(new Text(MetadataTable.ID)) < 0) {
-      violations = addViolation(violations, 5);
-    }
-    
-    boolean checkedBulk = false;
-    
-    for (ColumnUpdate columnUpdate : colUpdates) {
-      Text columnFamily = new Text(columnUpdate.getColumnFamily());
-      
-      if (columnUpdate.isDeleted()) {
-        if (!isValidColumn(columnUpdate)) {
-          violations = addViolation(violations, 2);
-        }
-        continue;
-      }
-      
-      if (columnUpdate.getValue().length == 0 && !columnFamily.equals(ScanFileColumnFamily.NAME)) {
-        violations = addViolation(violations, 6);
-      }
-      
-      if (columnFamily.equals(DataFileColumnFamily.NAME)) {
-        try {
-          DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
-          
-          if (dfv.getSize() < 0 || dfv.getNumEntries() < 0) {
-            violations = addViolation(violations, 1);
-          }
-        } catch (NumberFormatException nfe) {
-          violations = addViolation(violations, 1);
-        } catch (ArrayIndexOutOfBoundsException aiooe) {
-          violations = addViolation(violations, 1);
-        }
-      } else if (columnFamily.equals(ScanFileColumnFamily.NAME)) {
-        
-      } else if (columnFamily.equals(TabletsSection.BulkFileColumnFamily.NAME)) {
-        if (!columnUpdate.isDeleted() && !checkedBulk) {
-          // splits, which also write the time reference, are allowed to write this reference even when
-          // the transaction is not running because the other half of the tablet is holding a reference
-          // to the file.
-          boolean isSplitMutation = false;
-          // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
-          // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
-          // See ACCUMULO-1230.
-          boolean isLocationMutation = false;
-          
-          HashSet<Text> dataFiles = new HashSet<Text>();
-          HashSet<Text> loadedFiles = new HashSet<Text>();
-          
-          String tidString = new String(columnUpdate.getValue());
-          int otherTidCount = 0;
-          
-          for (ColumnUpdate update : mutation.getUpdates()) {
-            if (new ColumnFQ(update).equals(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN)) {
-              isSplitMutation = true;
-            } else if (new Text(update.getColumnFamily()).equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
-              isLocationMutation = true;
-            } else if (new Text(update.getColumnFamily()).equals(DataFileColumnFamily.NAME)) {
-              dataFiles.add(new Text(update.getColumnQualifier()));
-            } else if (new Text(update.getColumnFamily()).equals(TabletsSection.BulkFileColumnFamily.NAME)) {
-              loadedFiles.add(new Text(update.getColumnQualifier()));
-              
-              if (!new String(update.getValue()).equals(tidString)) {
-                otherTidCount++;
-              }
-            }
-          }
-          
-          if (!isSplitMutation && !isLocationMutation) {
-            long tid = Long.parseLong(tidString);
-            
-            try {
-              if (otherTidCount > 0 || !dataFiles.equals(loadedFiles) || !getArbitrator().transactionAlive(Constants.BULK_ARBITRATOR_TYPE, tid)) {
-                violations = addViolation(violations, 8);
-              }
-            } catch (Exception ex) {
-              violations = addViolation(violations, 8);
-            }
-          }
-          
-          checkedBulk = true;
-        }
-      } else {
-        if (!isValidColumn(columnUpdate)) {
-          violations = addViolation(violations, 2);
-        } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
-            && (violations == null || !violations.contains((short) 4))) {
-          KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
-          
-          Text per = KeyExtent.decodePrevEndRow(new Value(columnUpdate.getValue()));
-          
-          boolean prevEndRowLessThanEndRow = per == null || ke.getEndRow() == null || per.compareTo(ke.getEndRow()) < 0;
-          
-          if (!prevEndRowLessThanEndRow) {
-            violations = addViolation(violations, 3);
-          }
-        } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.ServerColumnFamily.LOCK_COLUMN)) {
-          if (zooCache == null) {
-            zooCache = new ZooCache();
-          }
-          
-          if (zooRoot == null) {
-            zooRoot = ZooUtil.getRoot(HdfsZooInstance.getInstance());
-          }
-          
-          boolean lockHeld = false;
-          String lockId = new String(columnUpdate.getValue());
-          
-          try {
-            lockHeld = ZooLock.isLockHeld(zooCache, new ZooUtil.LockID(zooRoot, lockId));
-          } catch (Exception e) {
-            log.debug("Failed to verify lock was held " + lockId + " " + e.getMessage());
-          }
-          
-          if (!lockHeld) {
-            violations = addViolation(violations, 7);
-          }
-        }
-        
-      }
-    }
-    
-    if (violations != null) {
-      log.debug("violating metadata mutation : " + new String(mutation.getRow()));
-      for (ColumnUpdate update : mutation.getUpdates()) {
-        log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value "
-            + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
-      }
-    }
-    
-    return violations;
-  }
-  
-  protected Arbitrator getArbitrator() {
-    return new ZooArbitrator();
-  }
-  
-  @Override
-  public String getViolationDescription(short violationCode) {
-    switch (violationCode) {
-      case 1:
-        return "data file size must be a non-negative integer";
-      case 2:
-        return "Invalid column name given.";
-      case 3:
-        return "Prev end row is greater than or equal to end row.";
-      case 4:
-        return "Invalid metadata row format";
-      case 5:
-        return "Row can not be less than " + MetadataTable.ID;
-      case 6:
-        return "Empty values are not allowed for any " + MetadataTable.NAME + " column";
-      case 7:
-        return "Lock not held in zookeeper by writer";
-      case 8:
-        return "Bulk load transaction no longer running";
-    }
-    return null;
-  }
-  
-  @Override
-  protected void finalize() {
-    if (zooCache != null)
-      zooCache.clear();
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/constraints/SystemConstraint.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/constraints/SystemConstraint.java b/server/src/main/java/org/apache/accumulo/server/constraints/SystemConstraint.java
deleted file mode 100644
index d11d4a4..0000000
--- a/server/src/main/java/org/apache/accumulo/server/constraints/SystemConstraint.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.constraints;
-
-import org.apache.accumulo.core.constraints.Constraint;
-
-public abstract class SystemConstraint implements Constraint {}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/constraints/UnsatisfiableConstraint.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/constraints/UnsatisfiableConstraint.java b/server/src/main/java/org/apache/accumulo/server/constraints/UnsatisfiableConstraint.java
deleted file mode 100644
index 86b6863..0000000
--- a/server/src/main/java/org/apache/accumulo/server/constraints/UnsatisfiableConstraint.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.constraints;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.accumulo.core.constraints.Constraint;
-import org.apache.accumulo.core.data.Mutation;
-
-public class UnsatisfiableConstraint implements Constraint {
-  
-  private List<Short> violations;
-  private String vDesc;
-  
-  public UnsatisfiableConstraint(short vcode, String violationDescription) {
-    this.violations = Collections.unmodifiableList(Collections.singletonList(vcode));
-    this.vDesc = violationDescription;
-  }
-  
-  public List<Short> check(Environment env, Mutation mutation) {
-    return violations;
-  }
-  
-  public String getViolationDescription(short violationCode) {
-    return vDesc;
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/data/ServerColumnUpdate.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/data/ServerColumnUpdate.java b/server/src/main/java/org/apache/accumulo/server/data/ServerColumnUpdate.java
deleted file mode 100644
index af992a6..0000000
--- a/server/src/main/java/org/apache/accumulo/server/data/ServerColumnUpdate.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.data;
-
-import org.apache.accumulo.core.data.ColumnUpdate;
-
-public class ServerColumnUpdate extends ColumnUpdate {
-  
-  ServerMutation parent;
-
-  public ServerColumnUpdate(byte[] cf, byte[] cq, byte[] cv, boolean hasts, long ts, boolean deleted, byte[] val, ServerMutation serverMutation) {
-    super(cf, cq, cv, hasts, ts, deleted, val);
-    parent = serverMutation;
-  }
-
-  public long getTimestamp() {
-    if (hasTimestamp())
-      return super.getTimestamp();
-    return parent.getSystemTimestamp();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/data/ServerConditionalMutation.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/data/ServerConditionalMutation.java b/server/src/main/java/org/apache/accumulo/server/data/ServerConditionalMutation.java
deleted file mode 100644
index 7487ba3..0000000
--- a/server/src/main/java/org/apache/accumulo/server/data/ServerConditionalMutation.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.data;
-
-import java.util.List;
-
-import org.apache.accumulo.core.client.impl.Translator;
-import org.apache.accumulo.core.data.thrift.TCondition;
-import org.apache.accumulo.core.data.thrift.TConditionalMutation;
-
-/**
- * 
- */
-public class ServerConditionalMutation extends ServerMutation {
-  
-  public static class TCMTranslator extends Translator<TConditionalMutation,ServerConditionalMutation> {
-    @Override
-    public ServerConditionalMutation translate(TConditionalMutation input) {
-      return new ServerConditionalMutation(input);
-    }
-  }
-  
-  public static final TCMTranslator TCMT = new TCMTranslator();
-
-  private long cmid;
-  private List<TCondition> conditions;
-  
-  public ServerConditionalMutation(TConditionalMutation input) {
-    super(input.mutation);
-
-    this.cmid = input.id;
-    this.conditions = input.conditions;
-  }
-
-  public long getID() {
-    return cmid;
-  }
-  
-  public List<TCondition> getConditions() {
-    return conditions;
-  }
-  
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/data/ServerMutation.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/data/ServerMutation.java b/server/src/main/java/org/apache/accumulo/server/data/ServerMutation.java
deleted file mode 100644
index 389cc33..0000000
--- a/server/src/main/java/org/apache/accumulo/server/data/ServerMutation.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.data;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.thrift.TMutation;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableUtils;
-
-/**
- * Mutation that holds system time as computed by the tablet server when not provided by the user.
- */
-public class ServerMutation extends Mutation {
-  private long systemTime = 0l;
-  
-  public ServerMutation(TMutation tmutation) {
-    super(tmutation);
-  }
-
-  public ServerMutation(Text key) {
-    super(key);
-  }
-
-  public ServerMutation() {
-  }
-
-  protected void droppingOldTimestamp(long ts) {
-    this.systemTime = ts;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    // new format writes system time with the mutation
-    if (getSerializedFormat() == SERIALIZED_FORMAT.VERSION2)
-      systemTime = WritableUtils.readVLong(in);
-  }
-  
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    WritableUtils.writeVLong(out, systemTime);
-  }
-
-  public void setSystemTimestamp(long v) {
-    this.systemTime = v;
-  }
-  
-  public long getSystemTimestamp() {
-    return this.systemTime;
-  }
-
-  @Override
-  protected ColumnUpdate newColumnUpdate(byte[] cf, byte[] cq, byte[] cv, boolean hasts, long ts, boolean deleted, byte[] val) {
-    return new ServerColumnUpdate(cf, cq, cv, hasts, ts, deleted, val, this);
-  }
-
-  @Override
-  public long estimatedMemoryUsed() {
-    return super.estimatedMemoryUsed() + 8;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (o == this) {
-      return true;
-    }
-    if (o == null || o.getClass() != ServerMutation.class) {
-      return false;
-    }
-    ServerMutation sm = (ServerMutation) o;
-    if (sm.systemTime != systemTime) {
-      return false;
-    }
-    return super.equals(o);
-  }
-
-  @Override
-  public int hashCode() {
-    int result = super.hashCode();
-    result = 31 * result + (int) (systemTime & 0xffffffff);
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/fate/Admin.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/fate/Admin.java b/server/src/main/java/org/apache/accumulo/server/fate/Admin.java
deleted file mode 100644
index dd27193..0000000
--- a/server/src/main/java/org/apache/accumulo/server/fate/Admin.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.fate;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.cli.Help;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.AdminUtil;
-import org.apache.accumulo.fate.ZooStore;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.master.Master;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-
-import com.beust.jcommander.JCommander;
-import com.beust.jcommander.Parameter;
-import com.beust.jcommander.Parameters;
-
-/**
- * A utility to administer FATE operations
- */
-public class Admin {
-  
-  static class TxOpts {
-    @Parameter(description = "<txid>", required = true)
-    List<String> args = new ArrayList<String>();
-  }
-  
-  @Parameters(commandDescription = "Stop an existing FATE by transaction id")
-  static class FailOpts extends TxOpts {}
-  
-  @Parameters(commandDescription = "Delete an existing FATE by transaction id")
-  static class DeleteOpts extends TxOpts {}
-  
-  @Parameters(commandDescription = "List the existing FATE transactions")
-  static class PrintOpts {}
-  
-  public static void main(String[] args) throws Exception {
-    Help opts = new Help();
-    JCommander jc = new JCommander(opts);
-    jc.setProgramName(Admin.class.getName());
-    jc.addCommand("fail", new FailOpts());
-    jc.addCommand("delete", new DeleteOpts());
-    jc.addCommand("print", new PrintOpts());
-    jc.parse(args);
-    if (opts.help || jc.getParsedCommand() == null) {
-      jc.usage();
-      System.exit(1);
-    }
-    
-    System.err.printf("This tool has been deprecated%nFATE administration now available within 'accumulo shell'%n$ fate fail <txid>... | delete <txid>... | print [<txid>...]%n%n");
-    
-    AdminUtil<Master> admin = new AdminUtil<Master>();
-    
-    Instance instance = HdfsZooInstance.getInstance();
-    String path = ZooUtil.getRoot(instance) + Constants.ZFATE;
-    String masterPath = ZooUtil.getRoot(instance) + Constants.ZMASTER_LOCK;
-    IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
-    ZooStore<Master> zs = new ZooStore<Master>(path, zk);
-    
-    if (jc.getParsedCommand().equals("fail")) {
-      admin.prepFail(zs, zk, masterPath, args[1]);
-    } else if (jc.getParsedCommand().equals("delete")) {
-      admin.prepDelete(zs, zk, masterPath, args[1]);
-      admin.deleteLocks(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS, args[1]);
-    } else if (jc.getParsedCommand().equals("print")) {
-      admin.print(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/fs/FileRef.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/fs/FileRef.java b/server/src/main/java/org/apache/accumulo/server/fs/FileRef.java
deleted file mode 100644
index b4bea4a..0000000
--- a/server/src/main/java/org/apache/accumulo/server/fs/FileRef.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.fs;
-
-import org.apache.accumulo.core.data.Key;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-
-
-/**
- * This is a glue object, to convert short file references to long references.
- * The !METADATA table may contain old relative file references.  This class keeps 
- * track of the short file reference, so it can be removed properly from the !METADATA table.
- */
-public class FileRef implements Comparable<FileRef> {
-  String metaReference;  // something like ../2/d-00000/A00001.rf
-  Path fullReference;  // something like hdfs://nn:9001/accumulo/tables/2/d-00000/A00001.rf
-  
-  public FileRef(VolumeManager fs, Key key) {
-    metaReference = key.getColumnQualifier().toString();
-    fullReference = fs.getFullPath(key);
-  }
-  
-  public FileRef(String metaReference, Path fullReference) {
-    this.metaReference = metaReference;
-    this.fullReference = fullReference;
-  }
-  
-  public FileRef(String path) {
-    this.metaReference = path;
-    this.fullReference = new Path(path);
-  }
-  
-  public String toString() {
-    return fullReference.toString();
-  }
-  
-  public Path path() {
-    return fullReference;
-  }
-  
-  public Text meta() {
-    return new Text(metaReference);
-  }
-
-  @Override
-  public int compareTo(FileRef o) {
-    return path().compareTo(o.path());
-  }
-
-  @Override
-  public int hashCode() {
-    return path().hashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof FileRef) {
-      return compareTo((FileRef)obj) == 0;
-    }
-    return false;
-  }
-  
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java b/server/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
deleted file mode 100644
index 2760b07..0000000
--- a/server/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.fs;
-
-import java.util.Random;
-
-public class RandomVolumeChooser implements VolumeChooser {
-  Random random = new Random();
-  
-  @Override
-  public String choose(String[] options) {
-    return options[random.nextInt(options.length)];
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/fs/VolumeChooser.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/fs/VolumeChooser.java b/server/src/main/java/org/apache/accumulo/server/fs/VolumeChooser.java
deleted file mode 100644
index 8713c97..0000000
--- a/server/src/main/java/org/apache/accumulo/server/fs/VolumeChooser.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.fs;
-
-
-public interface VolumeChooser {
-  String choose(String[] options);
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java b/server/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
deleted file mode 100644
index b7787c9..0000000
--- a/server/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.fs;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-/**
- * A wrapper around multiple hadoop FileSystem objects, which are assumed to be different volumes.
- * This also concentrates a bunch of meta-operations like waiting for SAFE_MODE, and closing WALs.
- */
-public interface VolumeManager {
-  
-  
-  
-  public static enum FileType {
-    TABLE(ServerConstants.TABLE_DIR), WAL(ServerConstants.WAL_DIR), RECOVERY(ServerConstants.RECOVERY_DIR);
-    
-    private String dir;
-    
-    FileType(String dir) {
-      this.dir = dir;
-    }
-    
-    public String getDirectory() {
-      return dir;
-    }
-  }
-  
-  // close the underlying FileSystems
-  void close() throws IOException;
-  
-  // the mechanism by which the master ensures that tablet servers can no longer write to a WAL
-  boolean closePossiblyOpenFile(Path path) throws IOException;
-  
-  // forward to the appropriate FileSystem object
-  FSDataOutputStream create(Path dest) throws IOException;
-  
-  // forward to the appropriate FileSystem object
-  FSDataOutputStream create(Path path, boolean b) throws IOException;
-  
-  // forward to the appropriate FileSystem object
-  FSDataOutputStream create(Path path, boolean b, int int1, short int2, long long1) throws IOException;
-  
-  // create a file, but only if it doesn't exist
-  boolean createNewFile(Path writable) throws IOException;
-  
-  // create a file which can be sync'd to disk
-  FSDataOutputStream createSyncable(Path logPath, int buffersize, short replication, long blockSize) throws IOException;
-  
-  // delete a file
-  boolean delete(Path path) throws IOException;
-  
-  // delete a directory and anything under it
-  boolean deleteRecursively(Path path) throws IOException;
-  
-  // forward to the appropriate FileSystem object
-  boolean exists(Path path) throws IOException;
-  
-  // forward to the appropriate FileSystem object
-  FileStatus getFileStatus(Path path) throws IOException;
-  
-  // find the appropriate FileSystem object given a path
-  FileSystem getFileSystemByPath(Path path);
-  
-  // get a mapping of volume to FileSystem
-  Map<String, ? extends FileSystem> getFileSystems();
-  
-  // return the item in options that is in the same volume as source
-  Path matchingFileSystem(Path source, String[] options);
-  
-  
-  // forward to the appropriate FileSystem object
-  FileStatus[] listStatus(Path path) throws IOException;
-  
-  // forward to the appropriate FileSystem object
-  boolean mkdirs(Path directory) throws IOException;
-  
-  // forward to the appropriate FileSystem object
-  FSDataInputStream open(Path path) throws IOException;
-  
-  // forward to the appropriate FileSystem object, throws an exception if the paths are in different volumes
-  boolean rename(Path path, Path newPath) throws IOException;
-  
-  // forward to the appropriate FileSystem object
-  boolean moveToTrash(Path sourcePath) throws IOException;
-  
-  // forward to the appropriate FileSystem object
-  short getDefaultReplication(Path logPath);
-  
-  // forward to the appropriate FileSystem object
-  boolean isFile(Path path) throws IOException;
-  
-  // all volume are ready to provide service (not in SafeMode, for example)
-  boolean isReady() throws IOException;
-  
-  // ambiguous references to files go here
-  FileSystem getDefaultVolume();
-  
-  // forward to the appropriate FileSystem object
-  FileStatus[] globStatus(Path path) throws IOException;
-
-  // Convert a file or directory !METADATA reference into a path
-  Path getFullPath(Key key);
-  
-  Path getFullPath(String tableId, String path);
-
-  // Given a filename, figure out the qualified path given multiple namespaces
-  Path getFullPath(FileType fileType, String fileName) throws IOException;
-
-  // forward to the appropriate FileSystem object
-  ContentSummary getContentSummary(Path dir) throws IOException;
-
-  // decide on which of the given locations to create a new file
-  String choose(String[] options);
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
deleted file mode 100644
index 39afe75..0000000
--- a/server/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ /dev/null
@@ -1,474 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.fs;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.file.FileUtil;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfiguration;
-import org.apache.commons.lang.NotImplementedException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.Trash;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.util.Progressable;
-import org.apache.log4j.Logger;
-
-public class VolumeManagerImpl implements VolumeManager {
-
-  private static final Logger log = Logger.getLogger(VolumeManagerImpl.class);
-
-  Map<String,? extends FileSystem> volumes;
-  String defaultVolume;
-  AccumuloConfiguration conf;
-  VolumeChooser chooser;
-
-  protected VolumeManagerImpl(Map<String,? extends FileSystem> volumes, String defaultVolume, AccumuloConfiguration conf) {
-    this.volumes = volumes;
-    this.defaultVolume = defaultVolume;
-    this.conf = conf;
-    ensureSyncIsEnabled();
-    chooser = Property.createInstanceFromPropertyName(conf, Property.GENERAL_VOLUME_CHOOSER, VolumeChooser.class, new RandomVolumeChooser());
-  }
-
-  public static org.apache.accumulo.server.fs.VolumeManager getLocal() throws IOException {
-    return new VolumeManagerImpl(Collections.singletonMap("", FileSystem.getLocal(CachedConfiguration.getInstance())), "",
-        DefaultConfiguration.getDefaultConfiguration());
-  }
-
-  @Override
-  public void close() throws IOException {
-    IOException ex = null;
-    for (FileSystem fs : volumes.values()) {
-      try {
-        fs.close();
-      } catch (IOException e) {
-        ex = e;
-      }
-    }
-    if (ex != null) {
-      throw ex;
-    }
-  }
-
-  @Override
-  public boolean closePossiblyOpenFile(Path path) throws IOException {
-    FileSystem fs = getFileSystemByPath(path);
-    if (fs instanceof DistributedFileSystem) {
-      DistributedFileSystem dfs = (DistributedFileSystem) fs;
-      try {
-        return dfs.recoverLease(path);
-      } catch (FileNotFoundException ex) {
-        throw ex;
-      }
-    } else if (fs instanceof LocalFileSystem) {
-      // ignore
-    } else {
-      throw new IllegalStateException("Don't know how to recover a lease for " + fs.getClass().getName());
-    }
-    fs.append(path).close();
-    log.info("Recovered lease on " + path.toString() + " using append");
-    return true;
-  }
-
-  @Override
-  public FSDataOutputStream create(Path path) throws IOException {
-    FileSystem fs = getFileSystemByPath(path);
-    return fs.create(path);
-  }
-
-  @Override
-  public FSDataOutputStream create(Path path, boolean overwrite) throws IOException {
-    FileSystem fs = getFileSystemByPath(path);
-    return fs.create(path, overwrite);
-  }
-
-  private static long correctBlockSize(Configuration conf, long blockSize) {
-    if (blockSize <= 0)
-      blockSize = conf.getLong("dfs.block.size", 67108864);
-
-    int checkSum = conf.getInt("io.bytes.per.checksum", 512);
-    blockSize -= blockSize % checkSum;
-    blockSize = Math.max(blockSize, checkSum);
-    return blockSize;
-  }
-
-  private static int correctBufferSize(Configuration conf, int bufferSize) {
-    if (bufferSize <= 0)
-      bufferSize = conf.getInt("io.file.buffer.size", 4096);
-    return bufferSize;
-  }
-
-  @Override
-  public FSDataOutputStream create(Path path, boolean overwrite, int bufferSize, short replication, long blockSize) throws IOException {
-    FileSystem fs = getFileSystemByPath(path);
-    if (bufferSize == 0) {
-      fs.getConf().getInt("io.file.buffer.size", 4096);
-    }
-    return fs.create(path, overwrite, bufferSize, replication, correctBlockSize(fs.getConf(), blockSize));
-  }
-
-  @Override
-  public boolean createNewFile(Path path) throws IOException {
-    FileSystem fs = getFileSystemByPath(path);
-    return fs.createNewFile(path);
-  }
-
-  @Override
-  public FSDataOutputStream createSyncable(Path logPath, int bufferSize, short replication, long blockSize) throws IOException {
-    FileSystem fs = getFileSystemByPath(logPath);
-    blockSize = correctBlockSize(fs.getConf(), blockSize);
-    bufferSize = correctBufferSize(fs.getConf(), bufferSize);
-    try {
-      // This...
-      // EnumSet<CreateFlag> set = EnumSet.of(CreateFlag.SYNC_BLOCK, CreateFlag.CREATE);
-      // return fs.create(logPath, FsPermission.getDefault(), set, buffersize, replication, blockSize, null);
-      // Becomes this:
-      Class<?> createFlags = Class.forName("org.apache.hadoop.fs.CreateFlag");
-      List<Enum<?>> flags = new ArrayList<Enum<?>>();
-      if (createFlags.isEnum()) {
-        for (Object constant : createFlags.getEnumConstants()) {
-          if (constant.toString().equals("SYNC_BLOCK")) {
-            flags.add((Enum<?>) constant);
-            log.debug("Found synch enum " + constant);
-          }
-          if (constant.toString().equals("CREATE")) {
-            flags.add((Enum<?>) constant);
-            log.debug("Found CREATE enum " + constant);
-          }
-        }
-      }
-      Object set = EnumSet.class.getMethod("of", java.lang.Enum.class, java.lang.Enum.class).invoke(null, flags.get(0), flags.get(1));
-      log.debug("CreateFlag set: " + set);
-      Method create = fs.getClass().getMethod("create", Path.class, FsPermission.class, EnumSet.class, Integer.TYPE, Short.TYPE, Long.TYPE, Progressable.class);
-      log.debug("creating " + logPath + " with SYNCH_BLOCK flag");
-      return (FSDataOutputStream) create.invoke(fs, logPath, FsPermission.getDefault(), set, bufferSize, replication, blockSize, null);
-    } catch (ClassNotFoundException ex) {
-      // Expected in hadoop 1.0
-      return fs.create(logPath, true, bufferSize, replication, blockSize);
-    } catch (Exception ex) {
-      log.debug(ex, ex);
-      return fs.create(logPath, true, bufferSize, replication, blockSize);
-    }
-  }
-
-  @Override
-  public boolean delete(Path path) throws IOException {
-    return getFileSystemByPath(path).delete(path, false);
-  }
-
-  @Override
-  public boolean deleteRecursively(Path path) throws IOException {
-    return getFileSystemByPath(path).delete(path, true);
-  }
-
-  protected void ensureSyncIsEnabled() {
-    for (Entry<String,? extends FileSystem> entry : getFileSystems().entrySet()) {
-      final String volumeName = entry.getKey();
-      final FileSystem fs = entry.getValue();
-      
-      if (fs instanceof DistributedFileSystem) {
-        final String DFS_DURABLE_SYNC = "dfs.durable.sync", DFS_SUPPORT_APPEND = "dfs.support.append";
-        final String ticketMessage = "See ACCUMULO-623 and ACCUMULO-1637 for more details.";
-        // Check to make sure that we have proper defaults configured
-        try {
-          // If the default is off (0.20.205.x or 1.0.x)
-          DFSConfigKeys configKeys = new DFSConfigKeys();
-          
-          // Can't use the final constant itself as Java will inline it at compile time
-          Field dfsSupportAppendDefaultField = configKeys.getClass().getField("DFS_SUPPORT_APPEND_DEFAULT");
-          boolean dfsSupportAppendDefaultValue = dfsSupportAppendDefaultField.getBoolean(configKeys);
-          
-          if (!dfsSupportAppendDefaultValue) {
-            // See if the user did the correct override
-            if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, false)) {
-              String msg = "Accumulo requires that dfs.support.append to true. " + ticketMessage;
-              log.fatal(msg);
-              throw new RuntimeException(msg);
-            }
-          }
-        } catch (NoSuchFieldException e) {
-          // If we can't find DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT, the user is running
-          // 1.1.x or 1.2.x. This is ok, though, as, by default, these versions have append/sync enabled.
-        } catch (Exception e) {
-          log.warn("Error while checking for " + DFS_SUPPORT_APPEND + " on volume " + volumeName + ". The user should ensure that Hadoop is configured to properly supports append and sync. " + ticketMessage, e);
-        }
-        
-        // If either of these parameters are configured to be false, fail.
-        // This is a sign that someone is writing bad configuration.
-        if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, true) || !fs.getConf().getBoolean(DFS_DURABLE_SYNC, true)) {
-          String msg = "Accumulo requires that " + DFS_SUPPORT_APPEND + " and " + DFS_DURABLE_SYNC + " not be configured as false. " + ticketMessage;
-          log.fatal(msg);
-          throw new RuntimeException(msg);
-        }
-        
-        try {
-          // if this class exists
-          Class.forName("org.apache.hadoop.fs.CreateFlag");
-          // we're running hadoop 2.0, 1.1
-          if (!fs.getConf().getBoolean("dfs.datanode.synconclose", false)) {
-            log.warn("dfs.datanode.synconclose set to false: data loss is possible on system reset or power loss on volume " + volumeName);
-          }
-        } catch (ClassNotFoundException ex) {
-          // hadoop 1.0
-        }
-      }
-    }
-
-  }
-
-  @Override
-  public boolean exists(Path path) throws IOException {
-    return getFileSystemByPath(path).exists(path);
-  }
-
-  @Override
-  public FileStatus getFileStatus(Path path) throws IOException {
-    return getFileSystemByPath(path).getFileStatus(path);
-  }
-
-  @Override
-  public FileSystem getFileSystemByPath(Path path) {
-    if (path.toString().contains(":")) {
-      try {
-        return path.getFileSystem(CachedConfiguration.getInstance());
-      } catch (IOException ex) {
-        throw new RuntimeException(ex);
-      }
-    }
-
-    return volumes.get(defaultVolume);
-  }
-
-  @Override
-  public Map<String,? extends FileSystem> getFileSystems() {
-    return volumes;
-  }
-
-  @Override
-  public FileStatus[] listStatus(Path path) throws IOException {
-    return getFileSystemByPath(path).listStatus(path);
-  }
-
-  @Override
-  public boolean mkdirs(Path path) throws IOException {
-    return getFileSystemByPath(path).mkdirs(path);
-  }
-
-  @Override
-  public FSDataInputStream open(Path path) throws IOException {
-    return getFileSystemByPath(path).open(path);
-  }
-
-  @Override
-  public boolean rename(Path path, Path newPath) throws IOException {
-    FileSystem source = getFileSystemByPath(path);
-    FileSystem dest = getFileSystemByPath(newPath);
-    if (source != dest) {
-      throw new NotImplementedException("Cannot rename files across volumes: " + path + " -> " + newPath);
-    }
-    return source.rename(path, newPath);
-  }
-
-  @Override
-  public boolean moveToTrash(Path path) throws IOException {
-    FileSystem fs = getFileSystemByPath(path);
-    Trash trash = new Trash(fs, fs.getConf());
-    return trash.moveToTrash(path);
-  }
-
-  @Override
-  public short getDefaultReplication(Path path) {
-    @SuppressWarnings("deprecation")
-    short rep = getFileSystemByPath(path).getDefaultReplication();
-    return rep;
-  }
-
-  @Override
-  public boolean isFile(Path path) throws IOException {
-    return getFileSystemByPath(path).isFile(path);
-  }
-
-  public static VolumeManager get() throws IOException {
-    AccumuloConfiguration conf = ServerConfiguration.getSystemConfiguration(HdfsZooInstance.getInstance());
-    return get(conf);
-  }
-
-  static private final String DEFAULT = "";
-
-  public static VolumeManager get(AccumuloConfiguration conf) throws IOException {
-    Map<String,FileSystem> fileSystems = new HashMap<String,FileSystem>();
-    Configuration hadoopConf = CachedConfiguration.getInstance();
-    fileSystems.put(DEFAULT, FileUtil.getFileSystem(hadoopConf, conf));
-    String ns = conf.get(Property.INSTANCE_VOLUMES);
-    if (ns != null && !ns.isEmpty()) {
-      for (String space : ns.split(",")) {
-        if (space.equals(DEFAULT))
-          throw new IllegalArgumentException();
-
-        if (space.contains(":")) {
-          fileSystems.put(space, new Path(space).getFileSystem(hadoopConf));
-        } else {
-          fileSystems.put(space, FileSystem.get(hadoopConf));
-        }
-      }
-    }
-    return new VolumeManagerImpl(fileSystems, DEFAULT, conf);
-  }
-
-  @Override
-  public boolean isReady() throws IOException {
-    for (FileSystem fs : getFileSystems().values()) {
-      if (!(fs instanceof DistributedFileSystem))
-        continue;
-      DistributedFileSystem dfs = (DistributedFileSystem) fs;
-      // So this: if (!dfs.setSafeMode(SafeModeAction.SAFEMODE_GET))
-      // Becomes this:
-      Class<?> safeModeAction;
-      try {
-        // hadoop 2.0
-        safeModeAction = Class.forName("org.apache.hadoop.hdfs.protocol.HdfsConstants$SafeModeAction");
-      } catch (ClassNotFoundException ex) {
-        // hadoop 1.0
-        try {
-          safeModeAction = Class.forName("org.apache.hadoop.hdfs.protocol.FSConstants$SafeModeAction");
-        } catch (ClassNotFoundException e) {
-          throw new RuntimeException("Cannot figure out the right class for Constants");
-        }
-      }
-      Object get = null;
-      for (Object obj : safeModeAction.getEnumConstants()) {
-        if (obj.toString().equals("SAFEMODE_GET"))
-          get = obj;
-      }
-      if (get == null) {
-        throw new RuntimeException("cannot find SAFEMODE_GET");
-      }
-      try {
-        Method setSafeMode = dfs.getClass().getMethod("setSafeMode", safeModeAction);
-        boolean inSafeMode = (Boolean) setSafeMode.invoke(dfs, get);
-        if (inSafeMode) {
-          return false;
-        }
-      } catch (Exception ex) {
-        throw new RuntimeException("cannot find method setSafeMode");
-      }
-    }
-    return true;
-  }
-
-  @Override
-  public FileSystem getDefaultVolume() {
-    return volumes.get(defaultVolume);
-  }
-
-  @Override
-  public FileStatus[] globStatus(Path pathPattern) throws IOException {
-    return getFileSystemByPath(pathPattern).globStatus(pathPattern);
-  }
-
-  @Override
-  public Path getFullPath(Key key) {
-    // TODO sanity check col fam
-    String relPath = key.getColumnQualifierData().toString();
-    byte[] tableId = KeyExtent.tableOfMetadataRow(key.getRow());
-    return getFullPath(new String(tableId), relPath);
-  }
-
-  @Override
-  public Path matchingFileSystem(Path source, String[] options) {
-    URI uri1 = source.toUri();
-    for (String option : options) {
-      URI uri3 = URI.create(option);
-      if (uri1.getScheme().equals(uri3.getScheme())) {
-        String a1 = uri1.getAuthority();
-        String a2 = uri3.getAuthority();
-        if (a1 == a2 || (a1 != null && a1.equals(a2)))
-          return new Path(option);
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public Path getFullPath(String tableId, String path) {
-    if (path.contains(":"))
-      return new Path(path);
-    
-    if (path.startsWith("../"))
-      path = path.substring(2);
-    else if (path.startsWith("/"))
-      path = "/" + tableId + path;
-    else
-      throw new IllegalArgumentException("Unexpected path prefix " + path);
-    
-    return getFullPath(FileType.TABLE, path);
-  }
-  
-  @Override
-  public Path getFullPath(FileType fileType, String path) {
-    if (path.contains(":"))
-      return new Path(path);
-    
-    // normalize the path
-    Path fullPath = new Path(ServerConstants.getDefaultBaseDir(), fileType.getDirectory());
-    if (path.startsWith("/"))
-      path = path.substring(1);
-    fullPath = new Path(fullPath, path);
-    
-    FileSystem fs = getFileSystemByPath(fullPath);
-    return fs.makeQualified(fullPath);
-  }
-
-  @Override
-  public ContentSummary getContentSummary(Path dir) throws IOException {
-    return getFileSystemByPath(dir).getContentSummary(dir);
-  }
-
-  @Override
-  public String choose(String[] options) {
-    return chooser.choose(options);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
deleted file mode 100644
index c6cb772..0000000
--- a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.gc;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.gc.thrift.GCStatus;
-import org.apache.accumulo.core.gc.thrift.GcCycleStats;
-import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
-import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client;
-import org.apache.accumulo.core.util.AddressUtil;
-import org.apache.accumulo.core.util.ThriftUtil;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.fs.VolumeManager.FileType;
-import org.apache.accumulo.server.security.SystemCredentials;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.accumulo.server.util.MetadataTableUtil.LogEntry;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.trace.instrument.Span;
-import org.apache.accumulo.trace.instrument.Trace;
-import org.apache.accumulo.trace.instrument.Tracer;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-import org.apache.zookeeper.KeeperException;
-
-import com.google.common.net.HostAndPort;
-
-public class GarbageCollectWriteAheadLogs {
-  private static final Logger log = Logger.getLogger(GarbageCollectWriteAheadLogs.class);
-  
-  private final Instance instance;
-  private final VolumeManager fs;
-  
-  private boolean useTrash;
-  
-  GarbageCollectWriteAheadLogs(Instance instance, VolumeManager fs, boolean useTrash) throws IOException {
-    this.instance = instance;
-    this.fs = fs;
-  }
-  
-  public void collect(GCStatus status) {
-    
-    Span span = Trace.start("scanServers");
-    try {
-      
-      Set<Path> sortedWALogs = getSortedWALogs();
-      
-      status.currentLog.started = System.currentTimeMillis();
-      
-      Map<Path,String> fileToServerMap = new HashMap<Path,String>();
-      int count = scanServers(fileToServerMap);
-      long fileScanStop = System.currentTimeMillis();
-      log.info(String.format("Fetched %d files from %d servers in %.2f seconds", fileToServerMap.size(), count,
-          (fileScanStop - status.currentLog.started) / 1000.));
-      status.currentLog.candidates = fileToServerMap.size();
-      span.stop();
-      
-      span = Trace.start("removeMetadataEntries");
-      try {
-        count = removeMetadataEntries(fileToServerMap, sortedWALogs, status);
-      } catch (Exception ex) {
-        log.error("Unable to scan metadata table", ex);
-        return;
-      } finally {
-        span.stop();
-      }
-      
-      long logEntryScanStop = System.currentTimeMillis();
-      log.info(String.format("%d log entries scanned in %.2f seconds", count, (logEntryScanStop - fileScanStop) / 1000.));
-      
-      span = Trace.start("removeFiles");
-      Map<String,ArrayList<Path>> serverToFileMap = mapServersToFiles(fileToServerMap);
-      
-      count = removeFiles(serverToFileMap, sortedWALogs, status);
-      
-      long removeStop = System.currentTimeMillis();
-      log.info(String.format("%d total logs removed from %d servers in %.2f seconds", count, serverToFileMap.size(), (removeStop - logEntryScanStop) / 1000.));
-      status.currentLog.finished = removeStop;
-      status.lastLog = status.currentLog;
-      status.currentLog = new GcCycleStats();
-      span.stop();
-      
-    } catch (Exception e) {
-      log.error("exception occured while garbage collecting write ahead logs", e);
-    } finally {
-      span.stop();
-    }
-  }
-  
-  boolean holdsLock(HostAndPort addr) {
-    try {
-      String zpath = ZooUtil.getRoot(instance) + Constants.ZTSERVERS + "/" + addr.toString();
-      List<String> children = ZooReaderWriter.getInstance().getChildren(zpath);
-      return !(children == null || children.isEmpty());
-    } catch (KeeperException.NoNodeException ex) {
-      return false;
-    } catch (Exception ex) {
-      log.debug(ex, ex);
-      return true;
-    }
-  }
-  
-  private int removeFiles(Map<String,ArrayList<Path>> serverToFileMap, Set<Path> sortedWALogs, final GCStatus status) {
-    AccumuloConfiguration conf = instance.getConfiguration();
-    for (Entry<String,ArrayList<Path>> entry : serverToFileMap.entrySet()) {
-      if (entry.getKey().isEmpty()) {
-        // old-style log entry, just remove it
-        for (Path path : entry.getValue()) {
-          log.debug("Removing old-style WAL " + path);
-          try {
-            if (!useTrash || !fs.moveToTrash(path))
-              fs.deleteRecursively(path);
-            status.currentLog.deleted++;
-          } catch (FileNotFoundException ex) {
-            // ignored
-          } catch (IOException ex) {
-            log.error("Unable to delete wal " + path + ": " + ex);
-          }
-        }
-      } else {
-        HostAndPort address = AddressUtil.parseAddress(entry.getKey());
-        if (!holdsLock(address)) {
-          for (Path path : entry.getValue()) {
-            log.debug("Removing WAL for offline server " + path);
-            try {
-              if (!useTrash || !fs.moveToTrash(path))
-                fs.deleteRecursively(path);
-              status.currentLog.deleted++;
-            } catch (FileNotFoundException ex) {
-              // ignored
-            } catch (IOException ex) {
-              log.error("Unable to delete wal " + path + ": " + ex);
-            }
-          }
-          continue;
-        } else {
-          Client tserver = null;
-          try {
-            tserver = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
-            tserver.removeLogs(Tracer.traceInfo(), SystemCredentials.get().toThrift(instance), paths2strings(entry.getValue()));
-            log.debug("deleted " + entry.getValue() + " from " + entry.getKey());
-            status.currentLog.deleted += entry.getValue().size();
-          } catch (TException e) {
-            log.warn("Error talking to " + address + ": " + e);
-          } finally {
-            if (tserver != null)
-              ThriftUtil.returnClient(tserver);
-          }
-        }
-      }
-    }
-    
-    for (Path swalog : sortedWALogs) {
-      log.debug("Removing sorted WAL " + swalog);
-      try {
-        if (!useTrash || !fs.moveToTrash(swalog)) {
-          fs.deleteRecursively(swalog);
-        }
-      } catch (FileNotFoundException ex) {
-        // ignored
-      } catch (IOException ioe) {
-        try {
-          if (fs.exists(swalog)) {
-            log.error("Unable to delete sorted walog " + swalog + ": " + ioe);
-          }
-        } catch (IOException ex) {
-          log.error("Unable to check for the existence of " + swalog, ex);
-        }
-      }
-    }
-    
-    return 0;
-  }
-  
-  private List<String> paths2strings(ArrayList<Path> paths) {
-    List<String> result = new ArrayList<String>(paths.size());
-    for (Path path : paths)
-      result.add(path.toString());
-    return result;
-  }
-  
-  private static Map<String,ArrayList<Path>> mapServersToFiles(Map<Path,String> fileToServerMap) {
-    Map<String,ArrayList<Path>> result = new HashMap<String,ArrayList<Path>>();
-    for (Entry<Path,String> fileServer : fileToServerMap.entrySet()) {
-      ArrayList<Path> files = result.get(fileServer.getValue());
-      if (files == null) {
-        files = new ArrayList<Path>();
-        result.put(fileServer.getValue(), files);
-      }
-      files.add(fileServer.getKey());
-    }
-    return result;
-  }
-  
-  private int removeMetadataEntries(Map<Path,String> fileToServerMap, Set<Path> sortedWALogs, GCStatus status) throws IOException, KeeperException,
-      InterruptedException {
-    int count = 0;
-    Iterator<LogEntry> iterator = MetadataTableUtil.getLogEntries(SystemCredentials.get());
-    while (iterator.hasNext()) {
-      for (String entry : iterator.next().logSet) {
-        String parts[] = entry.split("/", 2);
-        String filename = parts[1];
-        Path path;
-        if (filename.contains(":"))
-          path = new Path(filename);
-        else
-          path = fs.getFullPath(FileType.WAL, filename);
-        
-        if (fileToServerMap.remove(path) != null)
-          status.currentLog.inUse++;
-        
-        sortedWALogs.remove(path);
-        
-        count++;
-      }
-    }
-    return count;
-  }
-
-  //TODO Remove deprecation warning suppression when Hadoop1 support is dropped
-  @SuppressWarnings("deprecation")
-  private int scanServers(Map<Path,String> fileToServerMap) throws Exception {
-    Set<String> servers = new HashSet<String>();
-    for (String walDir : ServerConstants.getWalDirs()) {
-      Path walRoot = new Path(walDir);
-      FileStatus[] listing = fs.listStatus(walRoot);
-      if (listing == null)
-        continue;
-      for (FileStatus status : listing) {
-        String server = status.getPath().getName();
-        servers.add(server);
-        if (status.isDir()) {
-          for (FileStatus file : fs.listStatus(new Path(walRoot, server))) {
-            if (isUUID(file.getPath().getName()))
-              fileToServerMap.put(file.getPath(), server);
-            else {
-              log.info("Ignoring file " + file.getPath() + " because it doesn't look like a uuid");
-            }
-          }
-        } else if (isUUID(server)) {
-          // old-style WAL are not under a directory
-          fileToServerMap.put(status.getPath(), "");
-        } else {
-          log.info("Ignoring file " + status.getPath() + " because it doesn't look like a uuid");
-        }
-      }
-    }
-    return servers.size();
-  }
-  
-  private Set<Path> getSortedWALogs() throws IOException {
-    Set<Path> result = new HashSet<Path>();
-    
-    for (String dir : ServerConstants.getRecoveryDirs()) {
-      Path recoveryDir = new Path(dir);
-      
-      if (fs.exists(recoveryDir)) {
-        for (FileStatus status : fs.listStatus(recoveryDir)) {
-          if (isUUID(status.getPath().getName())) {
-            result.add(status.getPath());
-          } else {
-            log.debug("Ignoring file " + status.getPath() + " because it doesn't look like a uuid");
-          }
-        }
-      }
-    }
-    return result;
-  }
-  
-  static private boolean isUUID(String name) {
-    try {
-      UUID.fromString(name);
-      return true;
-    } catch (IllegalArgumentException ex) {
-      return false;
-    }
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionAlgorithm.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionAlgorithm.java b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionAlgorithm.java
deleted file mode 100644
index 325f1d9..0000000
--- a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionAlgorithm.java
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.gc;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.trace.instrument.Span;
-import org.apache.accumulo.trace.instrument.Trace;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-
-/**
- * 
- */
-public class GarbageCollectionAlgorithm {
-
-  private static final Logger log = Logger.getLogger(GarbageCollectionAlgorithm.class);
-
-  private String makeRelative(String path, int expectedLen) {
-    String relPath = path;
-
-    if (relPath.startsWith("../"))
-      relPath = relPath.substring(3);
-
-    while (relPath.endsWith("/"))
-      relPath = relPath.substring(0, relPath.length() - 1);
-
-    while (relPath.startsWith("/"))
-      relPath = relPath.substring(1);
-
-    String[] tokens = relPath.split("/");
-
-    // handle paths like a//b///c
-    boolean containsEmpty = false;
-    for (String token : tokens) {
-      if (token.equals("")) {
-        containsEmpty = true;
-        break;
-      }
-    }
-
-    if (containsEmpty) {
-      ArrayList<String> tmp = new ArrayList<String>();
-      for (String token : tokens) {
-        if (!token.equals("")) {
-          tmp.add(token);
-        }
-      }
-
-      tokens = tmp.toArray(new String[tmp.size()]);
-    }
-
-    if (tokens.length > 3) {
-      if (!path.contains(":"))
-        throw new IllegalArgumentException(path);
-
-      if (tokens[tokens.length - 4].equals(ServerConstants.TABLE_DIR) && (expectedLen == 0 || expectedLen == 3)) {
-        relPath = tokens[tokens.length - 3] + "/" + tokens[tokens.length - 2] + "/" + tokens[tokens.length - 1];
-      } else if (tokens[tokens.length - 3].equals(ServerConstants.TABLE_DIR) && (expectedLen == 0 || expectedLen == 2)) {
-        relPath = tokens[tokens.length - 2] + "/" + tokens[tokens.length - 1];
-      } else {
-        throw new IllegalArgumentException(path);
-      }
-    } else if (tokens.length == 3 && (expectedLen == 0 || expectedLen == 3)) {
-      relPath = tokens[0] + "/" + tokens[1] + "/" + tokens[2];
-    } else if (tokens.length == 2 && (expectedLen == 0 || expectedLen == 2)) {
-      relPath = tokens[0] + "/" + tokens[1];
-    } else {
-      throw new IllegalArgumentException(path);
-    }
-
-    return relPath;
-  }
-
-  private SortedMap<String,String> makeRelative(Collection<String> candidates) {
-
-    SortedMap<String,String> ret = new TreeMap<String,String>();
-
-    for (String candidate : candidates) {
-      String relPath = makeRelative(candidate, 0);
-      ret.put(relPath, candidate);
-    }
-
-    return ret;
-  }
-
-  private void confirmDeletes(GarbageCollectionEnvironment gce, SortedMap<String,String> candidateMap) throws TableNotFoundException, AccumuloException,
-      AccumuloSecurityException {
-    boolean checkForBulkProcessingFiles = false;
-    Iterator<String> relativePaths = candidateMap.keySet().iterator();
-    while (!checkForBulkProcessingFiles && relativePaths.hasNext())
-      checkForBulkProcessingFiles |= relativePaths.next().toLowerCase(Locale.ENGLISH).contains(Constants.BULK_PREFIX);
-
-    if (checkForBulkProcessingFiles) {
-      Iterator<String> blipiter = gce.getBlipIterator();
-
-      // WARNING: This block is IMPORTANT
-      // You MUST REMOVE candidates that are in the same folder as a bulk
-      // processing flag!
-
-      while (blipiter.hasNext()) {
-        String blipPath = blipiter.next();
-        blipPath = makeRelative(blipPath, 2);
-
-        Iterator<String> tailIter = candidateMap.tailMap(blipPath).keySet().iterator();
-
-        int count = 0;
-
-        while (tailIter.hasNext()) {
-          if (tailIter.next().startsWith(blipPath)) {
-            count++;
-            tailIter.remove();
-          } else {
-            break;
-          }
-        }
-
-        if (count > 0)
-          log.debug("Folder has bulk processing flag: " + blipPath);
-      }
-
-    }
-
-    Iterator<Entry<Key,Value>> iter = gce.getReferenceIterator();
-    while (iter.hasNext()) {
-      Entry<Key,Value> entry = iter.next();
-      Key key = entry.getKey();
-      Text cft = key.getColumnFamily();
-
-      if (cft.equals(DataFileColumnFamily.NAME) || cft.equals(ScanFileColumnFamily.NAME)) {
-        String cq = key.getColumnQualifier().toString();
-
-        String reference = cq;
-        if (cq.startsWith("/")) {
-          String tableID = new String(KeyExtent.tableOfMetadataRow(key.getRow()));
-          reference = "/" + tableID + cq;
-        } else if (!cq.contains(":") && !cq.startsWith("../")) {
-          throw new RuntimeException("Bad file reference " + cq);
-        }
-
-        reference = makeRelative(reference, 3);
-
-        // WARNING: This line is EXTREMELY IMPORTANT.
-        // You MUST REMOVE candidates that are still in use
-        if (candidateMap.remove(reference) != null)
-          log.debug("Candidate was still in use: " + reference);
-
-        String dir = reference.substring(0, reference.lastIndexOf('/'));
-        if (candidateMap.remove(dir) != null)
-          log.debug("Candidate was still in use: " + reference);
-
-      } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
-        String tableID = new String(KeyExtent.tableOfMetadataRow(key.getRow()));
-        String dir = entry.getValue().toString();
-        if (!dir.contains(":")) {
-          if (!dir.startsWith("/"))
-            throw new RuntimeException("Bad directory " + dir);
-          dir = "/" + tableID + dir;
-        }
-
-        dir = makeRelative(dir, 2);
-
-        if (candidateMap.remove(dir) != null)
-          log.debug("Candidate was still in use: " + dir);
-      } else
-        throw new RuntimeException("Scanner over metadata table returned unexpected column : " + entry.getKey());
-    }
-  }
-
-  private void cleanUpDeletedTableDirs(GarbageCollectionEnvironment gce, SortedMap<String,String> candidateMap) throws IOException {
-    HashSet<String> tableIdsWithDeletes = new HashSet<String>();
-
-    // find the table ids that had dirs deleted
-    for (String delete : candidateMap.keySet()) {
-      String[] tokens = delete.split("/");
-      if (tokens.length == 2) {
-        // its a directory
-        String tableId = delete.split("/")[0];
-        tableIdsWithDeletes.add(tableId);
-      }
-    }
-
-    Set<String> tableIdsInZookeeper = gce.getTableIDs();
-
-    tableIdsWithDeletes.removeAll(tableIdsInZookeeper);
-
-    // tableIdsWithDeletes should now contain the set of deleted tables that had dirs deleted
-
-    for (String delTableId : tableIdsWithDeletes) {
-      gce.deleteTableDirIfEmpty(delTableId);
-    }
-
-  }
-
-  private List<String> getCandidates(GarbageCollectionEnvironment gce, String lastCandidate) throws TableNotFoundException, AccumuloException,
-      AccumuloSecurityException {
-    Span candidatesSpan = Trace.start("getCandidates");
-    List<String> candidates;
-    try {
-      candidates = gce.getCandidates(lastCandidate);
-    } finally {
-      candidatesSpan.stop();
-    }
-    return candidates;
-  }
-
-  private void confirmDeletesTrace(GarbageCollectionEnvironment gce, SortedMap<String,String> candidateMap) throws TableNotFoundException, AccumuloException,
-      AccumuloSecurityException {
-    Span confirmDeletesSpan = Trace.start("confirmDeletes");
-    try {
-      confirmDeletes(gce, candidateMap);
-    } finally {
-      confirmDeletesSpan.stop();
-    }
-  }
-
-  private void deleteConfirmed(GarbageCollectionEnvironment gce, SortedMap<String,String> candidateMap) throws IOException, AccumuloException,
-      AccumuloSecurityException, TableNotFoundException {
-    Span deleteSpan = Trace.start("deleteFiles");
-    try {
-      gce.delete(candidateMap);
-    } finally {
-      deleteSpan.stop();
-    }
-
-    cleanUpDeletedTableDirs(gce, candidateMap);
-  }
-
-  public void collect(GarbageCollectionEnvironment gce) throws TableNotFoundException, AccumuloException, AccumuloSecurityException, IOException {
-
-    String lastCandidate = "";
-
-    while (true) {
-      List<String> candidates = getCandidates(gce, lastCandidate);
-
-      if (candidates.size() == 0)
-        break;
-      else
-        lastCandidate = candidates.get(candidates.size() - 1);
-
-      long origSize = candidates.size();
-      gce.incrementCandidatesStat(origSize);
-
-      SortedMap<String,String> candidateMap = makeRelative(candidates);
-
-      confirmDeletesTrace(gce, candidateMap);
-      gce.incrementInUseStat(origSize - candidateMap.size());
-
-      deleteConfirmed(gce, candidateMap);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionEnvironment.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionEnvironment.java b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionEnvironment.java
deleted file mode 100644
index 5052058..0000000
--- a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectionEnvironment.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.gc;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedMap;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
-
-/**
- * 
- */
-public interface GarbageCollectionEnvironment {
-
-  /**
-   * Return a list of paths to files and dirs which are candidates for deletion from a given table, {@link RootTable#NAME} or {@link MetadataTable#NAME}
-   * 
-   * @param continuePoint
-   *          A row to resume from if a previous invocation was stopped due to finding an extremely large number of candidates to remove which would have
-   *          exceeded memory limitations
-   * @return A collection of candidates files for deletion, may not be the complete collection of files for deletion at this point in time
-   * @throws TableNotFoundException
-   * @throws AccumuloException
-   * @throws AccumuloSecurityException
-   */
-  List<String> getCandidates(String continuePoint) throws TableNotFoundException, AccumuloException, AccumuloSecurityException;
-
-  /**
-   * Fetch a list of paths for all bulk loads in progress (blip) from a given table, {@link RootTable#NAME} or {@link MetadataTable#NAME}
-   * 
-   * @return The list of files for each bulk load currently in progress.
-   * @throws TableNotFoundException
-   * @throws AccumuloException
-   * @throws AccumuloSecurityException
-   */
-  Iterator<String> getBlipIterator() throws TableNotFoundException, AccumuloException, AccumuloSecurityException;
-
-  /**
-   * Fetches the references to files, {@link DataFileColumnFamily#NAME} or {@link ScanFileColumnFamily#NAME}, from tablets
-   * 
-   * @return An {@link Iterator} of {@link Entry}&lt;{@link Key}, {@link Value}&gt; which constitute a reference to a file.
-   * @throws TableNotFoundException
-   * @throws AccumuloException
-   * @throws AccumuloSecurityException
-   */
-  Iterator<Entry<Key,Value>> getReferenceIterator() throws TableNotFoundException, AccumuloException, AccumuloSecurityException;
-
-  /**
-   * Return the set of tableIDs for the given instance this GarbageCollector is running over
-   * 
-   * @return The valueSet for the table name to table id map.
-   */
-  Set<String> getTableIDs();
-
-  /**
-   * Delete the given files from the provided {@link Map} of relative path to absolute path for each file that should be deleted
-   * 
-   * @param candidateMap
-   *          A Map from relative path to absolute path for files to be deleted.
-   * @throws IOException
-   * @throws AccumuloSecurityException
-   * @throws AccumuloException
-   * @throws TableNotFoundException
-   */
-  void delete(SortedMap<String,String> candidateMap) throws IOException, AccumuloException, AccumuloSecurityException, TableNotFoundException;
-
-  /**
-   * Delete a table's directory if it is empty.
-   * 
-   * @param tableID
-   *          The id of the table whose directory we are to operate on
-   * @throws IOException
-   */
-  void deleteTableDirIfEmpty(String tableID) throws IOException;
-
-  /**
-   * Increment the number of candidates for deletion for the current garbage collection run
-   * 
-   * @param i
-   *          Value to increment the deletion candidates by
-   */
-  void incrementCandidatesStat(long i);
-
-  /**
-   * Increment the number of files still in use for the current garbage collection run
-   * 
-   * @param i
-   *          Value to increment the still-in-use count by.
-   */
-  void incrementInUseStat(long i);
-}


Mime
View raw message