accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ctubb...@apache.org
Subject [13/54] [partial] ACCUMULO-658, ACCUMULO-656 Split server into separate modules
Date Fri, 01 Nov 2013 00:55:52 GMT
http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
deleted file mode 100644
index 20809d5..0000000
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.master.tableOps;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.GrepIterator;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.master.Master;
-import org.apache.accumulo.server.master.state.MetaDataTableScanner;
-import org.apache.accumulo.server.master.state.TabletLocationState;
-import org.apache.accumulo.server.master.state.TabletState;
-import org.apache.accumulo.server.master.state.tables.TableManager;
-import org.apache.accumulo.server.problems.ProblemReports;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SystemCredentials;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-
-class CleanUp extends MasterRepo {
-  
-  final private static Logger log = Logger.getLogger(CleanUp.class);
-  
-  private static final long serialVersionUID = 1L;
-  
-  private String tableId;
-  
-  private long creationTime;
-  
-  private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
-    in.defaultReadObject();
-    
-    /*
-     * handle the case where we start executing on a new machine where the current time is in the past relative to the previous machine
-     * 
-     * if the new machine has time in the future, that will work ok w/ hasCycled
-     */
-    if (System.currentTimeMillis() < creationTime) {
-      creationTime = System.currentTimeMillis();
-    }
-    
-  }
-  
-  public CleanUp(String tableId) {
-    this.tableId = tableId;
-    creationTime = System.currentTimeMillis();
-  }
-  
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-    if (!master.hasCycled(creationTime)) {
-      return 50;
-    }
-    
-    boolean done = true;
-    Range tableRange = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
-    Scanner scanner = master.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    MetaDataTableScanner.configureScanner(scanner, master);
-    scanner.setRange(tableRange);
-    
-    KeyExtent prevExtent = null;
-    for (Entry<Key,Value> entry : scanner) {
-      TabletLocationState locationState = MetaDataTableScanner.createTabletLocationState(entry.getKey(), entry.getValue());
-      if (!locationState.extent.isPreviousExtent(prevExtent)) {
-        log.debug("Still waiting for table to be deleted: " + tableId + " saw inconsistency" + prevExtent + " " + locationState.extent);
-        done = false;
-        break;
-      }
-      prevExtent = locationState.extent;
-      
-      TabletState state = locationState.getState(master.onlineTabletServers());
-      if (state.equals(TabletState.ASSIGNED) || state.equals(TabletState.HOSTED)) {
-        log.debug("Still waiting for table to be deleted: " + tableId + " locationState: " + locationState);
-        done = false;
-        break;
-      }
-    }
-    
-    if (!done)
-      return 50;
-    
-    return 0;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    
-    master.clearMigrations(tableId);
-    
-    int refCount = 0;
-    
-    try {
-      // look for other tables that references this table's files
-      Connector conn = master.getConnector();
-      BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8);
-      try {
-        Range allTables = MetadataSchema.TabletsSection.getRange();
-        Range tableRange = MetadataSchema.TabletsSection.getRange(tableId);
-        Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false);
-        Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true);
-        bs.setRanges(Arrays.asList(beforeTable, afterTable));
-        bs.fetchColumnFamily(DataFileColumnFamily.NAME);
-        IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
-        GrepIterator.setTerm(cfg, "/" + tableId + "/");
-        bs.addScanIterator(cfg);
-        
-        for (Entry<Key,Value> entry : bs) {
-          if (entry.getKey().getColumnQualifier().toString().contains("/" + tableId + "/")) {
-            refCount++;
-          }
-        }
-      } finally {
-        bs.close();
-      }
-      
-    } catch (Exception e) {
-      refCount = -1;
-      log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
-    }
-    
-    // remove metadata table entries
-    try {
-      // Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
-      // If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
-      // are dropped and the operation completes, then the deletes will not be repeated.
-      MetadataTableUtil.deleteTable(tableId, refCount != 0, SystemCredentials.get(), null);
-    } catch (Exception e) {
-      log.error("error deleting " + tableId + " from metadata table", e);
-    }
-    
-    // remove any problem reports the table may have
-    try {
-      ProblemReports.getInstance().deleteProblemReports(tableId);
-    } catch (Exception e) {
-      log.error("Failed to delete problem reports for table " + tableId, e);
-    }
-    
-    if (refCount == 0) {
-      // delete the map files
-      try {
-        VolumeManager fs = master.getFileSystem();
-        for (String dir : ServerConstants.getTablesDirs()) {
-          fs.deleteRecursively(new Path(dir, tableId));
-        }
-      } catch (IOException e) {
-        log.error("Unable to remove deleted table directory", e);
-      }
-    }
-    
-    // remove table from zookeeper
-    try {
-      TableManager.getInstance().removeTable(tableId);
-      Tables.clearCache(master.getInstance());
-    } catch (Exception e) {
-      log.error("Failed to find table id in zookeeper", e);
-    }
-    
-    // remove any permissions associated with this table
-    try {
-      AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().toThrift(master.getInstance()), tableId);
-    } catch (ThriftSecurityException e) {
-      log.error(e.getMessage(), e);
-    }
-    
-    Utils.unreserveTable(tableId, tid, true);
-    
-    Logger.getLogger(CleanUp.class).debug("Deleted table " + tableId);
-    
-    return null;
-  }
-  
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    // nothing to do
-  }
-  
-}
-
-public class DeleteTable extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  
-  private String tableId;
-  
-  public DeleteTable(String tableId) {
-    this.tableId = tableId;
-  }
-  
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveTable(tableId, tid, true, true, TableOperation.DELETE);
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    TableManager.getInstance().transitionTableState(tableId, TableState.DELETING);
-    environment.getEventCoordinator().event("deleting table %s ", tableId);
-    return new CleanUp(tableId);
-  }
-  
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    Utils.unreserveTable(tableId, tid, true);
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java
deleted file mode 100644
index a80557a..0000000
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.master.tableOps;
-
-import java.io.BufferedOutputStream;
-import java.io.BufferedWriter;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Serializable;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipOutputStream;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.conf.ServerConfiguration;
-import org.apache.accumulo.server.conf.TableConfiguration;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.master.Master;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-
-class ExportInfo implements Serializable {
-  
-  private static final long serialVersionUID = 1L;
-  
-  public String tableName;
-  public String tableID;
-  public String exportDir;
-}
-
-class WriteExportFiles extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  private final ExportInfo tableInfo;
-  
-  WriteExportFiles(ExportInfo tableInfo) {
-    this.tableInfo = tableInfo;
-  }
-  
-  private void checkOffline(Connector conn) throws Exception {
-    if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
-      Tables.clearCache(conn.getInstance());
-      if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
-        throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
-            "Table is not offline");
-      }
-    }
-  }
-  
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-    
-    long reserved = Utils.reserveTable(tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
-    if (reserved > 0)
-      return reserved;
-    
-    Connector conn = master.getConnector();
-    
-    checkOffline(conn);
-    
-    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    metaScanner.setRange(new KeyExtent(new Text(tableInfo.tableID), null, null).toMetadataRange());
-    
-    // scan for locations
-    metaScanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
-    metaScanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
-    
-    if (metaScanner.iterator().hasNext()) {
-      return 500;
-    }
-    
-    // use the same range to check for walogs that we used to check for hosted (or future hosted) tablets
-    // this is done as a separate scan after we check for locations, because walogs are okay only if there is no location
-    metaScanner.clearColumns();
-    metaScanner.fetchColumnFamily(LogColumnFamily.NAME);
-    
-    if (metaScanner.iterator().hasNext()) {
-      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
-          "Write ahead logs found for table");
-    }
-    
-    return 0;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    Connector conn = master.getConnector();
-    
-    try {
-      exportTable(master.getFileSystem(), conn, tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
-    } catch (IOException ioe) {
-      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
-          "Failed to create export files " + ioe.getMessage());
-    }
-    Utils.unreserveTable(tableInfo.tableID, tid, false);
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-    return null;
-  }
-  
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveTable(tableInfo.tableID, tid, false);
-  }
-  
-  public static void exportTable(VolumeManager fs, Connector conn, String tableName, String tableID, String exportDir) throws Exception {
-    
-    fs.mkdirs(new Path(exportDir));
-    Path exportMetaFilePath = fs.getFileSystemByPath(new Path(exportDir)).makeQualified(new Path(exportDir, Constants.EXPORT_FILE));
-    
-    FSDataOutputStream fileOut = fs.create(exportMetaFilePath, false);
-    ZipOutputStream zipOut = new ZipOutputStream(fileOut);
-    BufferedOutputStream bufOut = new BufferedOutputStream(zipOut);
-    DataOutputStream dataOut = new DataOutputStream(bufOut);
-    
-    try {
-      
-      zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_INFO_FILE));
-      OutputStreamWriter osw = new OutputStreamWriter(dataOut);
-      osw.append(ExportTable.EXPORT_VERSION_PROP + ":" + ExportTable.VERSION + "\n");
-      osw.append("srcInstanceName:" + conn.getInstance().getInstanceName() + "\n");
-      osw.append("srcInstanceID:" + conn.getInstance().getInstanceID() + "\n");
-      osw.append("srcZookeepers:" + conn.getInstance().getZooKeepers() + "\n");
-      osw.append("srcTableName:" + tableName + "\n");
-      osw.append("srcTableID:" + tableID + "\n");
-      osw.append(ExportTable.DATA_VERSION_PROP + ":" + ServerConstants.DATA_VERSION + "\n");
-      osw.append("srcCodeVersion:" + Constants.VERSION + "\n");
-      
-      osw.flush();
-      dataOut.flush();
-      
-      exportConfig(conn, tableID, zipOut, dataOut);
-      dataOut.flush();
-      
-      Map<String,String> uniqueFiles = exportMetadata(fs, conn, tableID, zipOut, dataOut);
-      
-      dataOut.close();
-      dataOut = null;
-      
-      createDistcpFile(fs, exportDir, exportMetaFilePath, uniqueFiles);
-      
-    } finally {
-      if (dataOut != null)
-        dataOut.close();
-    }
-  }
-  
-  private static void createDistcpFile(VolumeManager fs, String exportDir, Path exportMetaFilePath, Map<String,String> uniqueFiles) throws IOException {
-    BufferedWriter distcpOut = new BufferedWriter(new OutputStreamWriter(fs.create(new Path(exportDir, "distcp.txt"), false)));
-    
-    try {
-      for (String file : uniqueFiles.values()) {
-        distcpOut.append(file);
-        distcpOut.newLine();
-      }
-      
-      distcpOut.append(exportMetaFilePath.toString());
-      distcpOut.newLine();
-      
-      distcpOut.close();
-      distcpOut = null;
-      
-    } finally {
-      if (distcpOut != null)
-        distcpOut.close();
-    }
-  }
-  
-  private static Map<String,String> exportMetadata(VolumeManager fs, Connector conn, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut)
-      throws IOException, TableNotFoundException {
-    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_METADATA_FILE));
-    
-    Map<String,String> uniqueFiles = new HashMap<String,String>();
-    
-    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    metaScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(metaScanner);
-    TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(metaScanner);
-    metaScanner.setRange(new KeyExtent(new Text(tableID), null, null).toMetadataRange());
-    
-    for (Entry<Key,Value> entry : metaScanner) {
-      entry.getKey().write(dataOut);
-      entry.getValue().write(dataOut);
-      
-      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
-        String path = fs.getFullPath(entry.getKey()).toString();
-        String tokens[] = path.split("/");
-        if (tokens.length < 1) {
-          throw new RuntimeException("Illegal path " + path);
-        }
-        
-        String filename = tokens[tokens.length - 1];
-        
-        String existingPath = uniqueFiles.get(filename);
-        if (existingPath == null) {
-          uniqueFiles.put(filename, path);
-        } else if (!existingPath.equals(path)) {
-          // make sure file names are unique, should only apply for tables with file names generated by Accumulo 1.3 and earlier
-          throw new IOException("Cannot export table with nonunique file names " + filename + ". Major compact table.");
-        }
-        
-      }
-    }
-    return uniqueFiles;
-  }
-  
-  private static void exportConfig(Connector conn, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException,
-      AccumuloSecurityException, TableNotFoundException, IOException {
-    
-    DefaultConfiguration defaultConfig = AccumuloConfiguration.getDefaultConfiguration();
-    Map<String,String> siteConfig = conn.instanceOperations().getSiteConfiguration();
-    Map<String,String> systemConfig = conn.instanceOperations().getSystemConfiguration();
-    
-    TableConfiguration tableConfig = ServerConfiguration.getTableConfiguration(conn.getInstance(), tableID);
-    
-    OutputStreamWriter osw = new OutputStreamWriter(dataOut);
-    
-    // only put props that are different than defaults and higher level configurations
-    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_TABLE_CONFIG_FILE));
-    for (Entry<String,String> prop : tableConfig) {
-      if (prop.getKey().startsWith(Property.TABLE_PREFIX.getKey())) {
-        Property key = Property.getPropertyByKey(prop.getKey());
-        
-        if (key == null || !defaultConfig.get(key).equals(prop.getValue())) {
-          if (!prop.getValue().equals(siteConfig.get(prop.getKey())) && !prop.getValue().equals(systemConfig.get(prop.getKey()))) {
-            osw.append(prop.getKey() + "=" + prop.getValue() + "\n");
-          }
-        }
-      }
-    }
-    
-    osw.flush();
-  }
-}
-
-public class ExportTable extends MasterRepo {
-  private static final long serialVersionUID = 1L;
-  
-  private final ExportInfo tableInfo;
-  
-  public ExportTable(String tableName, String tableId, String exportDir) {
-    tableInfo = new ExportInfo();
-    tableInfo.tableName = tableName;
-    tableInfo.exportDir = exportDir;
-    tableInfo.tableID = tableId;
-  }
-  
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    return new WriteExportFiles(tableInfo);
-  }
-  
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-  }
-  
-  public static final int VERSION = 1;
-  
-  public static final String DATA_VERSION_PROP = "srcDataVersion";
-  public static final String EXPORT_VERSION_PROP = "exportVersion";
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
deleted file mode 100644
index 199665d..0000000
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.master.tableOps;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.Serializable;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipInputStream;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.admin.TableOperationsImpl;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.FastFormat;
-import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.master.Master;
-import org.apache.accumulo.server.master.state.tables.TableManager;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.security.SystemCredentials;
-import org.apache.accumulo.server.tabletserver.UniqueNameAllocator;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.accumulo.server.util.TablePropUtil;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-
-/**
- * 
- */
-class ImportedTableInfo implements Serializable {
-  
-  private static final long serialVersionUID = 1L;
-  
-  public String exportDir;
-  public String user;
-  public String tableName;
-  public String tableId;
-  public String importDir;
-}
-
-class FinishImportTable extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  
-  private ImportedTableInfo tableInfo;
-  
-  public FinishImportTable(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-  
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    
-    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir, "mappings.txt"));
-    
-    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
-    
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
-    
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-    
-    env.getEventCoordinator().event("Imported table %s ", tableInfo.tableName);
-    
-    Logger.getLogger(FinishImportTable.class).debug("Imported table " + tableInfo.tableId + " " + tableInfo.tableName);
-    
-    return null;
-  }
-  
-  @Override
-  public String getReturn() {
-    return tableInfo.tableId;
-  }
-  
-  @Override
-  public void undo(long tid, Master env) throws Exception {}
-  
-}
-
-class MoveExportedFiles extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  
-  private ImportedTableInfo tableInfo;
-  
-  MoveExportedFiles(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    try {
-      VolumeManager fs = master.getFileSystem();
-      
-      Map<String,String> fileNameMappings = PopulateMetadataTable.readMappingFile(fs, tableInfo);
-      
-      for (String oldFileName : fileNameMappings.keySet()) {
-        if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) {
-          throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-              "File referenced by exported table does not exists " + oldFileName);
-        }
-      }
-      
-      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
-      
-      for (FileStatus fileStatus : files) {
-        String newName = fileNameMappings.get(fileStatus.getPath().getName());
-        
-        if (newName != null)
-          fs.rename(fileStatus.getPath(), new Path(tableInfo.importDir, newName));
-      }
-      
-      return new FinishImportTable(tableInfo);
-    } catch (IOException ioe) {
-      log.warn(ioe.getMessage(), ioe);
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error renaming files " + ioe.getMessage());
-    }
-  }
-}
-
-class PopulateMetadataTable extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  
-  private ImportedTableInfo tableInfo;
-  
-  PopulateMetadataTable(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-  
-  static Map<String,String> readMappingFile(VolumeManager fs, ImportedTableInfo tableInfo) throws Exception {
-    BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(tableInfo.importDir, "mappings.txt"))));
-    
-    try {
-      Map<String,String> map = new HashMap<String,String>();
-      
-      String line = null;
-      while ((line = in.readLine()) != null) {
-        String sa[] = line.split(":", 2);
-        map.put(sa[0], sa[1]);
-      }
-      
-      return map;
-    } finally {
-      in.close();
-    }
-    
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    
-    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
-    
-    BatchWriter mbw = null;
-    ZipInputStream zis = null;
-    
-    try {
-      VolumeManager fs = master.getFileSystem();
-      
-      mbw = master.getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-      
-      zis = new ZipInputStream(fs.open(path));
-      
-      Map<String,String> fileNameMappings = readMappingFile(fs, tableInfo);
-      
-      String bulkDir = new Path(tableInfo.importDir).getName();
-      
-      ZipEntry zipEntry;
-      while ((zipEntry = zis.getNextEntry()) != null) {
-        if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
-          DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
-          
-          Key key = new Key();
-          Value val = new Value();
-          
-          Mutation m = null;
-          Text currentRow = null;
-          int dirCount = 0;
-          
-          while (true) {
-            key.readFields(in);
-            val.readFields(in);
-            
-            Text endRow = new KeyExtent(key.getRow(), (Text) null).getEndRow();
-            Text metadataRow = new KeyExtent(new Text(tableInfo.tableId), endRow, null).getMetadataEntry();
-            
-            Text cq;
-            
-            if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
-              String oldName = new Path(key.getColumnQualifier().toString()).getName();
-              String newName = fileNameMappings.get(oldName);
-              
-              if (newName == null) {
-                throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-                    "File " + oldName + " does not exist in import dir");
-              }
-
-              cq = new Text("/" + bulkDir + "/" + newName);
-            } else {
-              cq = key.getColumnQualifier();
-            }
-            
-            if (m == null) {
-              m = new Mutation(metadataRow);
-              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
-              currentRow = metadataRow;
-            }
-            
-            if (!currentRow.equals(metadataRow)) {
-              mbw.addMutation(m);
-              m = new Mutation(metadataRow);
-              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
-            }
-            
-            m.put(key.getColumnFamily(), cq, val);
-            
-            if (endRow == null && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
-              mbw.addMutation(m);
-              break; // its the last column in the last row
-            }
-          }
-          
-          break;
-        }
-      }
-      
-      return new MoveExportedFiles(tableInfo);
-    } catch (IOException ioe) {
-      log.warn(ioe.getMessage(), ioe);
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error reading " + path + " " + ioe.getMessage());
-    } finally {
-      if (zis != null) {
-        try {
-          zis.close();
-        } catch (IOException ioe) {
-          log.warn("Failed to close zip file ", ioe);
-        }
-      }
-      
-      if (mbw != null) {
-        mbw.close();
-      }
-    }
-  }
-  
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, SystemCredentials.get(), environment.getMasterLock());
-  }
-}
-
-class MapImportFileNames extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  
-  private ImportedTableInfo tableInfo;
-  
-  MapImportFileNames(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    
-    Path path = new Path(tableInfo.importDir, "mappings.txt");
-    
-    BufferedWriter mappingsWriter = null;
-    
-    try {
-      VolumeManager fs = environment.getFileSystem();
-      
-      fs.mkdirs(new Path(tableInfo.importDir));
-      
-      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
-      
-      UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
-      
-      mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path)));
-      
-      for (FileStatus fileStatus : files) {
-        String fileName = fileStatus.getPath().getName();
-        log.info("filename " + fileStatus.getPath().toString());
-        String sa[] = fileName.split("\\.");
-        String extension = "";
-        if (sa.length > 1) {
-          extension = sa[sa.length - 1];
-          
-          if (!FileOperations.getValidExtensions().contains(extension)) {
-            continue;
-          }
-        } else {
-          // assume it is a map file
-          extension = Constants.MAPFILE_EXTENSION;
-        }
-        
-        String newName = "I" + namer.getNextName() + "." + extension;
-        
-        mappingsWriter.append(fileName);
-        mappingsWriter.append(':');
-        mappingsWriter.append(newName);
-        mappingsWriter.newLine();
-      }
-      
-      mappingsWriter.close();
-      mappingsWriter = null;
-      
-      return new PopulateMetadataTable(tableInfo);
-    } catch (IOException ioe) {
-      log.warn(ioe.getMessage(), ioe);
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error writing mapping file " + path + " " + ioe.getMessage());
-    } finally {
-      if (mappingsWriter != null)
-        try {
-          mappingsWriter.close();
-        } catch (IOException ioe) {
-          log.warn("Failed to close " + path, ioe);
-        }
-    }
-  }
-  
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir));
-  }
-}
-
-class CreateImportDir extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  
-  private ImportedTableInfo tableInfo;
-  
-  CreateImportDir(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    
-    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
-    
-    Path base = master.getFileSystem().matchingFileSystem(new Path(tableInfo.exportDir), ServerConstants.getTablesDirs());
-    Path directory = new Path(base, tableInfo.tableId);
-    
-    Path newBulkDir = new Path(directory, Constants.BULK_PREFIX + namer.getNextName());
-    
-    tableInfo.importDir = newBulkDir.toString();
-    
-    return new MapImportFileNames(tableInfo);
-  }
-}
-
-class ImportPopulateZookeeper extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  
-  private ImportedTableInfo tableInfo;
-  
-  ImportPopulateZookeeper(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-  
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.IMPORT);
-  }
-  
-  private Map<String,String> getExportedProps(VolumeManager fs) throws Exception {
-    
-    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
-    
-    try {
-      FileSystem ns = fs.getFileSystemByPath(path);
-      return TableOperationsImpl.getExportedProps(ns, path);
-    } catch (IOException ioe) {
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error reading table props from " + path + " " + ioe.getMessage());
-    }
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    // reserve the table name in zookeeper or fail
-    
-    Utils.tableNameLock.lock();
-    try {
-      // write tableName & tableId to zookeeper
-      Instance instance = HdfsZooInstance.getInstance();
-      
-      Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
-      
-      TableManager.getInstance().addTable(tableInfo.tableId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
-      
-      Tables.clearCache(instance);
-    } finally {
-      Utils.tableNameLock.unlock();
-    }
-    
-    for (Entry<String,String> entry : getExportedProps(env.getFileSystem()).entrySet())
-      if (!TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue())) {
-        throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-            "Invalid table property " + entry.getKey());
-      }
-    
-    return new CreateImportDir(tableInfo);
-  }
-  
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    Instance instance = HdfsZooInstance.getInstance();
-    TableManager.getInstance().removeTable(tableInfo.tableId);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
-    Tables.clearCache(instance);
-  }
-}
-
-class ImportSetupPermissions extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  
-  private ImportedTableInfo tableInfo;
-  
-  public ImportSetupPermissions(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-  
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    // give all table permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance();
-    for (TablePermission permission : TablePermission.values()) {
-      try {
-        security.grantTablePermission(SystemCredentials.get().toThrift(env.getInstance()), tableInfo.user, tableInfo.tableId, permission);
-      } catch (ThriftSecurityException e) {
-        Logger.getLogger(ImportSetupPermissions.class).error(e.getMessage(), e);
-        throw e;
-      }
-    }
-    
-    // setup permissions in zookeeper before table info in zookeeper
-    // this way concurrent users will not get a spurious permission denied
-    // error
-    return new ImportPopulateZookeeper(tableInfo);
-  }
-  
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance().deleteTable(SystemCredentials.get().toThrift(env.getInstance()), tableInfo.tableId);
-  }
-}
-
-public class ImportTable extends MasterRepo {
-  private static final long serialVersionUID = 1L;
-  
-  private ImportedTableInfo tableInfo;
-  
-  public ImportTable(String user, String tableName, String exportDir) {
-    tableInfo = new ImportedTableInfo();
-    tableInfo.tableName = tableName;
-    tableInfo.user = user;
-    tableInfo.exportDir = exportDir;
-  }
-  
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    checkVersions(env);
-    
-    // first step is to reserve a table id.. if the machine fails during this step
-    // it is ok to retry... the only side effect is that a table id may not be used
-    // or skipped
-    
-    // assuming only the master process is creating tables
-    
-    Utils.idLock.lock();
-    try {
-      Instance instance = HdfsZooInstance.getInstance();
-      tableInfo.tableId = Utils.getNextTableId(tableInfo.tableName, instance);
-      return new ImportSetupPermissions(tableInfo);
-    } finally {
-      Utils.idLock.unlock();
-    }
-  }
-  
-  public void checkVersions(Master env) throws ThriftTableOperationException {
-    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
-    
-    ZipInputStream zis = null;
-    
-    try {
-      zis = new ZipInputStream(env.getFileSystem().open(path));
-      
-      Integer exportVersion = null;
-      Integer dataVersion = null;
-      
-      ZipEntry zipEntry;
-      while ((zipEntry = zis.getNextEntry()) != null) {
-        if (zipEntry.getName().equals(Constants.EXPORT_INFO_FILE)) {
-          BufferedReader in = new BufferedReader(new InputStreamReader(zis));
-          String line = null;
-          while ((line = in.readLine()) != null) {
-            String sa[] = line.split(":", 2);
-            if (sa[0].equals(ExportTable.EXPORT_VERSION_PROP)) {
-              exportVersion = Integer.parseInt(sa[1]);
-            } else if (sa[0].equals(ExportTable.DATA_VERSION_PROP)) {
-              dataVersion = Integer.parseInt(sa[1]);
-            }
-          }
-          
-          break;
-        }
-      }
-      
-      zis.close();
-      zis = null;
-      
-      if (exportVersion == null || exportVersion > ExportTable.VERSION)
-        throw new ThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-            "Incompatible export version " + exportVersion);
-      
-      if (dataVersion == null || dataVersion > ServerConstants.DATA_VERSION)
-        throw new ThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-            "Incompatible data version " + exportVersion);
-      
-    } catch (IOException ioe) {
-      log.warn(ioe.getMessage(), ioe);
-      throw new ThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Failed to read export metadata " + ioe.getMessage());
-    } finally {
-      if (zis != null)
-        try {
-          zis.close();
-        } catch (IOException ioe) {
-          log.warn(ioe.getMessage(), ioe);
-        }
-    }
-  }
-  
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/master/tableOps/MasterRepo.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/MasterRepo.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/MasterRepo.java
deleted file mode 100644
index cf0c4aa..0000000
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/MasterRepo.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.master.tableOps;
-
-import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.server.master.Master;
-import org.apache.log4j.Logger;
-
-public abstract class MasterRepo implements Repo<Master> {
-  
-  private static final long serialVersionUID = 1L;
-  protected static final Logger log = Logger.getLogger(MasterRepo.class);
-  
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-  
-  @Override
-  public void undo(long tid, Master environment) throws Exception {}
-  
-  @Override
-  public String getDescription() {
-    return this.getClass().getSimpleName();
-  }
-  
-  @Override
-  public String getReturn() {
-    return null;
-  }
-  
-  @Override
-  abstract public Repo<Master> call(long tid, Master environment) throws Exception;
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java
deleted file mode 100644
index 16201b1..0000000
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/RenameTable.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.master.tableOps;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
-import org.apache.accumulo.server.master.Master;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.log4j.Logger;
-
-public class RenameTable extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  private String tableId;
-  private String oldTableName;
-  private String newTableName;
-  
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveTable(tableId, tid, true, true, TableOperation.RENAME);
-  }
-  
-  public RenameTable(String tableId, String oldTableName, String newTableName) {
-    this.tableId = tableId;
-    this.oldTableName = oldTableName;
-    this.newTableName = newTableName;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    
-    Instance instance = master.getInstance();
-    
-    IZooReaderWriter zoo = ZooReaderWriter.getRetryingInstance();
-    Utils.tableNameLock.lock();
-    try {
-      Utils.checkTableDoesNotExist(instance, newTableName, tableId, TableOperation.RENAME);
-      
-      final String tap = ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_NAME;
-      
-      zoo.mutate(tap, null, null, new Mutator() {
-        public byte[] mutate(byte[] current) throws Exception {
-          final String currentName = new String(current);
-          if (currentName.equals(newTableName))
-            return null; // assume in this case the operation is running again, so we are done
-          if (!currentName.equals(oldTableName)) {
-            throw new ThriftTableOperationException(null, oldTableName, TableOperation.RENAME, TableOperationExceptionType.NOTFOUND,
-                "Name changed while processing");
-          }
-          return newTableName.getBytes();
-        }
-      });
-      Tables.clearCache(instance);
-    } finally {
-      Utils.tableNameLock.unlock();
-      Utils.unreserveTable(tableId, tid, true);
-    }
-    
-    Logger.getLogger(RenameTable.class).debug("Renamed table " + tableId + " " + oldTableName + " " + newTableName);
-    
-    return null;
-  }
-  
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveTable(tableId, tid, true);
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java
deleted file mode 100644
index a7ceec1..0000000
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.master.tableOps;
-
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.util.TextUtil;
-import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.server.master.Master;
-import org.apache.accumulo.server.master.state.MergeInfo;
-import org.apache.accumulo.server.master.state.MergeInfo.Operation;
-import org.apache.accumulo.server.master.state.MergeState;
-import org.apache.hadoop.io.Text;
-
-/**
- * Merge makes things hard.
- * 
- * Typically, a client will read the list of tablets, and begin an operation on that tablet at the location listed in the metadata table. When a tablet splits,
- * the information read from the metadata table doesn't match reality, so the operation fails, and must be retried. But the operation will take place either on
- * the parent, or at a later time on the children. It won't take place on just half of the tablet.
- * 
- * However, when a merge occurs, the operation may have succeeded on one section of the merged area, and not on the others, when the merge occurs. There is no
- * way to retry the request at a later time on an unmodified tablet.
- * 
- * The code below uses read-write lock to prevent some operations while a merge is taking place. Normal operations, like bulk imports, will grab the read lock
- * and prevent merges (writes) while they run. Merge operations will lock out some operations while they run.
- */
-class TableRangeOpWait extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  private String tableId;
-  
-  public TableRangeOpWait(String tableId) {
-    this.tableId = tableId;
-  }
-  
-  @Override
-  public long isReady(long tid, Master env) throws Exception {
-    Text tableIdText = new Text(tableId);
-    if (!env.getMergeInfo(tableIdText).getState().equals(MergeState.NONE)) {
-      return 50;
-    }
-    return 0;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    Text tableIdText = new Text(tableId);
-    MergeInfo mergeInfo = master.getMergeInfo(tableIdText);
-    log.info("removing merge information " + mergeInfo);
-    master.clearMergeState(tableIdText);
-    Utils.unreserveTable(tableId, tid, true);
-    return null;
-  }
-  
-}
-
-public class TableRangeOp extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  
-  private String tableId;
-  private byte[] startRow;
-  private byte[] endRow;
-  private Operation op;
-  
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveTable(tableId, tid, true, true, TableOperation.MERGE);
-  }
-  
-  public TableRangeOp(MergeInfo.Operation op, String tableId, Text startRow, Text endRow) throws ThriftTableOperationException {
-    
-    this.tableId = tableId;
-    this.startRow = TextUtil.getBytes(startRow);
-    this.endRow = TextUtil.getBytes(endRow);
-    this.op = op;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    
-    if (RootTable.ID.equals(tableId) && TableOperation.MERGE.equals(op)) {
-      log.warn("Attempt to merge tablets for " + RootTable.NAME + " does nothing. It is not splittable.");
-    }
-    
-    Text start = startRow.length == 0 ? null : new Text(startRow);
-    Text end = endRow.length == 0 ? null : new Text(endRow);
-    Text tableIdText = new Text(tableId);
-    
-    if (start != null && end != null)
-      if (start.compareTo(end) >= 0)
-        throw new ThriftTableOperationException(tableId, null, TableOperation.MERGE, TableOperationExceptionType.BAD_RANGE,
-            "start row must be less than end row");
-    
-    env.mustBeOnline(tableId);
-    
-    MergeInfo info = env.getMergeInfo(tableIdText);
-    
-    if (info.getState() == MergeState.NONE) {
-      KeyExtent range = new KeyExtent(tableIdText, end, start);
-      env.setMergeState(new MergeInfo(range, op), MergeState.STARTED);
-    }
-    
-    return new TableRangeOpWait(tableId);
-  }
-  
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    // Not sure this is a good thing to do. The Master state engine should be the one to remove it.
-    Text tableIdText = new Text(tableId);
-    MergeInfo mergeInfo = env.getMergeInfo(tableIdText);
-    if (mergeInfo.getState() != MergeState.NONE)
-      log.info("removing merge information " + mergeInfo);
-    env.clearMergeState(tableIdText);
-    Utils.unreserveTable(tableId, tid, true);
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java
deleted file mode 100644
index e789577..0000000
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.master.tableOps;
-
-import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.trace.instrument.Span;
-import org.apache.accumulo.trace.instrument.Trace;
-import org.apache.accumulo.trace.instrument.Tracer;
-import org.apache.accumulo.trace.thrift.TInfo;
-
-/**
- * 
- */
-public class TraceRepo<T> implements Repo<T> {
-  
-  private static final long serialVersionUID = 1L;
-  
-  TInfo tinfo;
-  Repo<T> repo;
-  
-  public TraceRepo(Repo<T> repo) {
-    this.repo = repo;
-    tinfo = Tracer.traceInfo();
-  }
-  
-  @Override
-  public long isReady(long tid, T environment) throws Exception {
-    Span span = Trace.trace(tinfo, repo.getDescription());
-    try {
-      return repo.isReady(tid, environment);
-    } finally {
-      span.stop();
-    }
-  }
-  
-  @Override
-  public Repo<T> call(long tid, T environment) throws Exception {
-    Span span = Trace.trace(tinfo, repo.getDescription());
-    try {
-      Repo<T> result = repo.call(tid, environment);
-      if (result == null)
-        return result;
-      return new TraceRepo<T>(result);
-    } finally {
-      span.stop();
-    }
-  }
-  
-  @Override
-  public void undo(long tid, T environment) throws Exception {
-    Span span = Trace.trace(tinfo, repo.getDescription());
-    try {
-      repo.undo(tid, environment);
-    } finally {
-      span.stop();
-    }
-  }
-  
-  @Override
-  public String getDescription() {
-    return repo.getDescription();
-  }
-  
-  @Override
-  public String getReturn() {
-    return repo.getReturn();
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java
deleted file mode 100644
index e57801c..0000000
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/Utils.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.master.tableOps;
-
-import java.math.BigInteger;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
-import org.apache.accumulo.fate.zookeeper.ZooReservation;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.zookeeper.ZooQueueLock;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.KeeperException;
-
-public class Utils {
-  
-  static void checkTableDoesNotExist(Instance instance, String tableName, String tableId, TableOperation operation) throws ThriftTableOperationException {
-    
-    String id = Tables.getNameToIdMap(instance).get(tableName);
-    
-    if (id != null && !id.equals(tableId))
-      throw new ThriftTableOperationException(null, tableName, operation, TableOperationExceptionType.EXISTS, null);
-  }
-  
-  static String getNextTableId(String tableName, Instance instance) throws ThriftTableOperationException {
-    
-    String tableId = null;
-    try {
-      IZooReaderWriter zoo = ZooReaderWriter.getRetryingInstance();
-      final String ntp = ZooUtil.getRoot(instance) + Constants.ZTABLES;
-      byte[] nid = zoo.mutate(ntp, "0".getBytes(), ZooUtil.PUBLIC, new Mutator() {
-        @Override
-        public byte[] mutate(byte[] currentValue) throws Exception {
-          BigInteger nextId = new BigInteger(new String(currentValue), Character.MAX_RADIX);
-          nextId = nextId.add(BigInteger.ONE);
-          return nextId.toString(Character.MAX_RADIX).getBytes();
-        }
-      });
-      return new String(nid);
-    } catch (Exception e1) {
-      Logger.getLogger(CreateTable.class).error("Failed to assign tableId to " + tableName, e1);
-      throw new ThriftTableOperationException(tableId, tableName, TableOperation.CREATE, TableOperationExceptionType.OTHER, e1.getMessage());
-    }
-  }
-  
-  static final Lock tableNameLock = new ReentrantLock();
-  static final Lock idLock = new ReentrantLock();
-  private static final Logger log = Logger.getLogger(Utils.class);
-  
-  public static long reserveTable(String tableId, long tid, boolean writeLock, boolean tableMustExist, TableOperation op) throws Exception {
-    if (getLock(tableId, tid, writeLock).tryLock()) {
-      if (tableMustExist) {
-        Instance instance = HdfsZooInstance.getInstance();
-        IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
-        if (!zk.exists(ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId))
-          throw new ThriftTableOperationException(tableId, "", op, TableOperationExceptionType.NOTFOUND, "Table does not exists");
-      }
-      log.info("table " + tableId + " (" + Long.toHexString(tid) + ") locked for " + (writeLock ? "write" : "read") + " operation: " + op);
-      return 0;
-    } else
-      return 100;
-  }
-  
-  public static void unreserveTable(String tableId, long tid, boolean writeLock) throws Exception {
-    getLock(tableId, tid, writeLock).unlock();
-    log.info("table " + tableId + " (" + Long.toHexString(tid) + ") unlocked for " + (writeLock ? "write" : "read"));
-  }
-  
-  public static long reserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
-    Instance instance = HdfsZooInstance.getInstance();
-    
-    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/" + new String(Base64.encodeBase64(directory.getBytes()));
-    
-    IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
-    
-    if (ZooReservation.attempt(zk, resvPath, String.format("%016x", tid), "")) {
-      return 0;
-    } else
-      return 50;
-  }
-  
-  public static void unreserveHdfsDirectory(String directory, long tid) throws KeeperException, InterruptedException {
-    Instance instance = HdfsZooInstance.getInstance();
-    String resvPath = ZooUtil.getRoot(instance) + Constants.ZHDFS_RESERVATIONS + "/" + new String(Base64.encodeBase64(directory.getBytes()));
-    ZooReservation.release(ZooReaderWriter.getRetryingInstance(), resvPath, String.format("%016x", tid));
-  }
-  
-  private static Lock getLock(String tableId, long tid, boolean writeLock) throws Exception {
-    byte[] lockData = String.format("%016x", tid).getBytes();
-    ZooQueueLock qlock = new ZooQueueLock(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZTABLE_LOCKS + "/" + tableId, false);
-    Lock lock = DistributedReadWriteLock.recoverLock(qlock, lockData);
-    if (lock == null) {
-      DistributedReadWriteLock locker = new DistributedReadWriteLock(qlock, lockData);
-      if (writeLock)
-        lock = locker.writeLock();
-      else
-        lock = locker.readLock();
-    }
-    return lock;
-  }
-  
-  public static Lock getReadLock(String tableId, long tid) throws Exception {
-    return Utils.getLock(tableId, tid, false);
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/master/tserverOps/ShutdownTServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tserverOps/ShutdownTServer.java b/server/src/main/java/org/apache/accumulo/server/master/tserverOps/ShutdownTServer.java
deleted file mode 100644
index 1a196a4..0000000
--- a/server/src/main/java/org/apache/accumulo/server/master/tserverOps/ShutdownTServer.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.master.tserverOps;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.master.thrift.TabletServerStatus;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
-import org.apache.accumulo.server.master.EventCoordinator.Listener;
-import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
-import org.apache.accumulo.server.master.Master;
-import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.master.tableOps.MasterRepo;
-import org.apache.accumulo.server.zookeeper.ZooLock;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.log4j.Logger;
-import org.apache.thrift.transport.TTransportException;
-
-public class ShutdownTServer extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  private static final Logger log = Logger.getLogger(ShutdownTServer.class);
-  private TServerInstance server;
-  private boolean force;
-  
-  public ShutdownTServer(TServerInstance server, boolean force) {
-    this.server = server;
-    this.force = force;
-  }
-  
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-  
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    // suppress assignment of tablets to the server
-    if (force) {
-      String path = ZooUtil.getRoot(master.getInstance()) + Constants.ZTSERVERS + "/" + server.getLocation();
-      ZooLock.deleteLock(path);
-      path = ZooUtil.getRoot(master.getInstance()) + Constants.ZDEADTSERVERS + "/" + server.getLocation();
-      IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-      zoo.putPersistentData(path, "forced down".getBytes(), NodeExistsPolicy.OVERWRITE);
-      return null;
-    }
-    
-    // TODO move this to isReady() and drop while loop? - ACCUMULO-1259
-    Listener listener = master.getEventCoordinator().getListener();
-    master.shutdownTServer(server);
-    while (master.onlineTabletServers().contains(server)) {
-      TServerConnection connection = master.getConnection(server);
-      if (connection != null) {
-        try {
-          TabletServerStatus status = connection.getTableMap(false);
-          if (status.tableMap != null && status.tableMap.isEmpty()) {
-            log.info("tablet server hosts no tablets " + server);
-            connection.halt(master.getMasterLock());
-            log.info("tablet server asked to halt " + server);
-            break;
-          }
-        } catch (TTransportException ex) {
-          // expected
-        } catch (Exception ex) {
-          log.error("Error talking to tablet server " + server + ": " + ex);
-        }
-      }
-      listener.waitForEvents(1000);
-    }
-    
-    return null;
-  }
-  
-  @Override
-  public void undo(long tid, Master m) throws Exception {}
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java b/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
deleted file mode 100644
index 8cdafcd..0000000
--- a/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.metanalysis;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.server.logger.LogEvents;
-import org.apache.accumulo.server.logger.LogFileKey;
-import org.apache.accumulo.server.logger.LogFileValue;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-/**
- * A map reduce job that takes a set of walogs and filters out all non metadata table events.
- */
-public class FilterMeta extends Configured implements Tool {
-  
-  public static class FilterMapper extends Mapper<LogFileKey,LogFileValue,LogFileKey,LogFileValue> {
-    private Set<Integer> tabletIds;
-    
-    @Override
-    protected void setup(Context context) throws java.io.IOException, java.lang.InterruptedException {
-      tabletIds = new HashSet<Integer>();
-    }
-    
-    @Override
-    public void map(LogFileKey key, LogFileValue value, Context context) throws IOException, InterruptedException {
-      if (key.event == LogEvents.OPEN) {
-        context.write(key, value);
-      } else if (key.event == LogEvents.DEFINE_TABLET && key.tablet.getTableId().toString().equals(MetadataTable.ID)) {
-        tabletIds.add(key.tid);
-        context.write(key, value);
-      } else if ((key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) && tabletIds.contains(key.tid)) {
-        context.write(key, value);
-      }
-    }
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    
-    String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
-
-    @SuppressWarnings("deprecation")
-    Job job = new Job(getConf(), jobName);
-    job.setJarByClass(this.getClass());
-    
-    Path paths[] = new Path[args.length - 1];
-    for (int i = 0; i < paths.length; i++) {
-      paths[i] = new Path(args[i]);
-    }
-
-    job.setInputFormatClass(LogFileInputFormat.class);
-    LogFileInputFormat.setInputPaths(job, paths);
-    
-    job.setOutputFormatClass(LogFileOutputFormat.class);
-    LogFileOutputFormat.setOutputPath(job, new Path(args[args.length - 1]));
-
-    job.setMapperClass(FilterMapper.class);
-    
-    job.setNumReduceTasks(0);
-
-    job.waitForCompletion(true);
-    return job.isSuccessful() ? 0 : 1;
-  }
-  
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(CachedConfiguration.getInstance(), new FilterMeta(), args);
-    System.exit(res);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java b/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java
deleted file mode 100644
index f7e5de9..0000000
--- a/server/src/main/java/org/apache/accumulo/server/metanalysis/FindTablet.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.metanalysis;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.TextUtil;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Finds tablet creation events.
- */
-public class FindTablet {
-  
-  static public class Opts extends ClientOpts {
-    @Parameter(names={"-r", "--row"}, required=true, description="find tablets that contain this row")
-    String row = null;
-    
-    @Parameter(names="--tableId", required=true, description="table id")
-    String tableId = null;
-  }
-  
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(FindTablet.class.getName(), args);
-    
-    findContainingTablets(opts);
-  }
-
-  /**
-   * @param conn
-   * @param tablePrefix
-   * @param tableID
-   * @param option
-   */
-  private static void findContainingTablets(Opts opts) throws Exception {
-    Range range = new KeyExtent(new Text(opts.tableId), null, null).toMetadataRange();
-
-    Scanner scanner = opts.getConnector().createScanner("createEvents", opts.auths);
-    scanner.setRange(range);
-
-    Text row = new Text(opts.row);
-    for (Entry<Key,Value> entry : scanner) {
-      KeyExtent ke = new KeyExtent(entry.getKey().getRow(), new Value(TextUtil.getBytes(entry.getKey().getColumnFamily())));
-      if (ke.contains(row)) {
-        System.out.println(entry.getKey().getColumnQualifier() + " " + ke + " " + entry.getValue());
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/598821cd/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java b/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
deleted file mode 100644
index 3b04367..0000000
--- a/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.metanalysis;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.server.logger.LogEvents;
-import org.apache.accumulo.server.logger.LogFileKey;
-import org.apache.accumulo.server.logger.LogFileValue;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.log4j.Logger;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * A map reduce job that takes write ahead logs containing mutations for the metadata table and indexes them into Accumulo tables for analysis.
- * 
- */
-
-public class IndexMeta extends Configured implements Tool {
-  
-  public static class IndexMapper extends Mapper<LogFileKey,LogFileValue,Text,Mutation> {
-    private static final Text CREATE_EVENTS_TABLE = new Text("createEvents");
-    private static final Text TABLET_EVENTS_TABLE = new Text("tabletEvents");
-    private Map<Integer,KeyExtent> tabletIds = new HashMap<Integer,KeyExtent>();
-    private String uuid = null;
-    
-    @Override
-    protected void setup(Context context) throws java.io.IOException, java.lang.InterruptedException {
-      tabletIds = new HashMap<Integer,KeyExtent>();
-      uuid = null;
-    }
-    
-    @Override
-    public void map(LogFileKey key, LogFileValue value, Context context) throws IOException, InterruptedException {
-      if (key.event == LogEvents.OPEN) {
-        uuid = key.tserverSession;
-      } else if (key.event == LogEvents.DEFINE_TABLET) {
-        if (key.tablet.getTableId().toString().equals(MetadataTable.ID)) {
-          tabletIds.put(key.tid, new KeyExtent(key.tablet));
-        }
-      } else if ((key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) && tabletIds.containsKey(key.tid)) {
-        for (Mutation m : value.mutations) {
-          index(context, m, uuid, tabletIds.get(key.tid));
-        }
-      }
-    }
-    
-    void index(Context context, Mutation m, String logFile, KeyExtent metaTablet) throws IOException, InterruptedException {
-      List<ColumnUpdate> columnsUpdates = m.getUpdates();
-      
-      Text prevRow = null;
-      long timestamp = 0;
-      
-      if (m.getRow().length > 0 && m.getRow()[0] == '~') {
-        return;
-      }
-      
-      for (ColumnUpdate cu : columnsUpdates) {
-        if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && !cu.isDeleted()) {
-          prevRow = new Text(cu.getValue());
-        }
-        
-        timestamp = cu.getTimestamp();
-      }
-      
-      byte[] serMut = WritableUtils.toByteArray(m);
-      
-      if (prevRow != null) {
-        Mutation createEvent = new Mutation(new Text(m.getRow()));
-        createEvent.put(prevRow, new Text(String.format("%020d", timestamp)), new Value(metaTablet.toString().getBytes()));
-        context.write(CREATE_EVENTS_TABLE, createEvent);
-      }
-      
-      Mutation tabletEvent = new Mutation(new Text(m.getRow()));
-      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("mut"), new Value(serMut));
-      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("mtab"), new Value(metaTablet.toString().getBytes()));
-      tabletEvent.put(new Text(String.format("%020d", timestamp)), new Text("log"), new Value(logFile.getBytes()));
-      context.write(TABLET_EVENTS_TABLE, tabletEvent);
-    }
-  }
-  
-  static class Opts extends ClientOpts {
-    @Parameter(description = "<logfile> { <logfile> ...}")
-    List<String> logFiles = new ArrayList<String>();
-  }
-  
-  @Override
-  public int run(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(IndexMeta.class.getName(), args);
-    
-    String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
-
-    @SuppressWarnings("deprecation")
-    Job job = new Job(getConf(), jobName);
-    job.setJarByClass(this.getClass());
-    
-    List<String> logFiles = Arrays.asList(args).subList(4, args.length);
-    Path paths[] = new Path[logFiles.size()];
-    int count = 0;
-    for (String logFile : logFiles) {
-      paths[count++] = new Path(logFile);
-    }
-    
-    job.setInputFormatClass(LogFileInputFormat.class);
-    LogFileInputFormat.setInputPaths(job, paths);
-    
-    job.setNumReduceTasks(0);
-    
-    job.setOutputFormatClass(AccumuloOutputFormat.class);
-    AccumuloOutputFormat.setZooKeeperInstance(job, opts.instance, opts.zookeepers);
-    AccumuloOutputFormat.setConnectorInfo(job, opts.principal, opts.getToken());
-    AccumuloOutputFormat.setCreateTables(job, false);
-    
-    job.setMapperClass(IndexMapper.class);
-    
-    Connector conn = opts.getConnector();
-    
-    try {
-      conn.tableOperations().create("createEvents");
-    } catch (TableExistsException tee) {
-      Logger.getLogger(IndexMeta.class).warn("Table createEvents exists");
-    }
-    
-    try {
-      conn.tableOperations().create("tabletEvents");
-    } catch (TableExistsException tee) {
-      Logger.getLogger(IndexMeta.class).warn("Table tabletEvents exists");
-    }
-    
-    job.waitForCompletion(true);
-    return job.isSuccessful() ? 0 : 1;
-  }
-  
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(CachedConfiguration.getInstance(), new IndexMeta(), args);
-    System.exit(res);
-  }
-}


Mime
View raw message