accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vi...@apache.org
Subject svn commit: r1496226 [7/13] - in /accumulo/branches/ACCUMULO-CURATOR: ./ assemble/ conf/examples/1GB/native-standalone/ conf/examples/1GB/standalone/ conf/examples/2GB/native-standalone/ conf/examples/2GB/standalone/ conf/examples/3GB/native-standalone...
Date Mon, 24 Jun 2013 21:34:25 GMT
Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java Mon Jun 24 21:34:20 2013
@@ -16,10 +16,6 @@
  */
 package org.apache.accumulo.server.master.tableOps;
 
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
@@ -30,9 +26,6 @@ import org.apache.accumulo.server.master
 import org.apache.accumulo.server.master.state.MergeInfo;
 import org.apache.accumulo.server.master.state.MergeInfo.Operation;
 import org.apache.accumulo.server.master.state.MergeState;
-import org.apache.accumulo.server.util.MetadataTable;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 
 /**
@@ -48,28 +41,6 @@ import org.apache.hadoop.io.Text;
  * The code below uses read-write lock to prevent some operations while a merge is taking place. Normal operations, like bulk imports, will grab the read lock
  * and prevent merges (writes) while they run. Merge operations will lock out some operations while they run.
  */
-
-class MakeDeleteEntries extends MasterRepo {
-  
-  private static final long serialVersionUID = 1L;
-  
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    log.info("creating delete entries for merged metadata tablets");
-    Connector conn = master.getConnector();
-    BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
-    String tableDir = Constants.getMetadataTableDir(master.getConfiguration().getConfiguration());
-    for (FileStatus fs : master.getFileSystem().listStatus(new Path(tableDir))) {
-      // TODO: add the entries only if there are no !METADATA table references - ACCUMULO-1308
-      if (fs.isDir() && fs.getPath().getName().matches("^" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + ".*")) {
-        bw.addMutation(MetadataTable.createDeleteMutation(Constants.METADATA_TABLE_ID, "/" + fs.getPath().getName()));
-      }
-    }
-    bw.close();
-    return null;
-  }
-}
-
 class TableRangeOpWait extends MasterRepo {
   
   private static final long serialVersionUID = 1L;
@@ -95,13 +66,6 @@ class TableRangeOpWait extends MasterRep
     log.info("removing merge information " + mergeInfo);
     master.clearMergeState(tableIdText);
     Utils.unreserveTable(tableId, tid, true);
-    // We can't add entries to the metadata table if it is offline for this merge.
-    // If the delete entries for the metadata table were in the root tablet, it would work just fine
-    // but all the delete entries go into the end of the metadata table. Work around: add the
-    // delete entries after the merge completes.
-    if (mergeInfo.getOperation().equals(Operation.MERGE) && tableId.equals(Constants.METADATA_TABLE_ID)) {
-      return new MakeDeleteEntries();
-    }
     return null;
   }
   

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java Mon Jun 24 21:34:20 2013
@@ -16,12 +16,11 @@
  */
 package org.apache.accumulo.server.master.tableOps;
 
+import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.trace.instrument.Span;
 import org.apache.accumulo.trace.instrument.Trace;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.accumulo.trace.thrift.TInfo;
-import org.apache.accumulo.fate.Repo;
-
 
 /**
  * 
@@ -29,7 +28,7 @@ import org.apache.accumulo.fate.Repo;
 public class TraceRepo<T> implements Repo<T> {
   
   private static final long serialVersionUID = 1L;
-
+  
   TInfo tinfo;
   Repo<T> repo;
   
@@ -38,11 +37,6 @@ public class TraceRepo<T> implements Rep
     tinfo = Tracer.traceInfo();
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.fate.Repo#isReady(long, java.lang.Object)
-   */
   @Override
   public long isReady(long tid, T environment) throws Exception {
     Span span = Trace.trace(tinfo, repo.getDescription());
@@ -53,11 +47,6 @@ public class TraceRepo<T> implements Rep
     }
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.fate.Repo#call(long, java.lang.Object)
-   */
   @Override
   public Repo<T> call(long tid, T environment) throws Exception {
     Span span = Trace.trace(tinfo, repo.getDescription());
@@ -71,11 +60,6 @@ public class TraceRepo<T> implements Rep
     }
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.fate.Repo#undo(long, java.lang.Object)
-   */
   @Override
   public void undo(long tid, T environment) throws Exception {
     Span span = Trace.trace(tinfo, repo.getDescription());
@@ -86,24 +70,14 @@ public class TraceRepo<T> implements Rep
     }
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.fate.Repo#getDescription()
-   */
   @Override
   public String getDescription() {
     return repo.getDescription();
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.fate.Repo#getReturn()
-   */
   @Override
   public String getReturn() {
     return repo.getReturn();
   }
-
+  
 }

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java Mon Jun 24 21:34:20 2013
@@ -20,8 +20,8 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.logger.LogEvents;
 import org.apache.accumulo.server.logger.LogFileKey;
 import org.apache.accumulo.server.logger.LogFileValue;
@@ -49,7 +49,7 @@ public class FilterMeta extends Configur
     public void map(LogFileKey key, LogFileValue value, Context context) throws IOException, InterruptedException {
       if (key.event == LogEvents.OPEN) {
         context.write(key, value);
-      } else if (key.event == LogEvents.DEFINE_TABLET && key.tablet.getTableId().toString().equals(Constants.METADATA_TABLE_ID)) {
+      } else if (key.event == LogEvents.DEFINE_TABLET && key.tablet.getTableId().toString().equals(MetadataTable.ID)) {
         tabletIds.add(key.tid);
         context.write(key, value);
       } else if ((key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) && tabletIds.contains(key.tid)) {

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java Mon Jun 24 21:34:20 2013
@@ -23,7 +23,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
@@ -32,6 +31,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.logger.LogEvents;
 import org.apache.accumulo.server.logger.LogFileKey;
@@ -72,7 +72,7 @@ public class IndexMeta extends Configure
       if (key.event == LogEvents.OPEN) {
         uuid = key.tserverSession;
       } else if (key.event == LogEvents.DEFINE_TABLET) {
-        if (key.tablet.getTableId().toString().equals(Constants.METADATA_TABLE_ID)) {
+        if (key.tablet.getTableId().toString().equals(MetadataTable.ID)) {
           tabletIds.put(key.tid, new KeyExtent(key.tablet));
         }
       } else if ((key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) && tabletIds.containsKey(key.tid)) {
@@ -93,7 +93,7 @@ public class IndexMeta extends Configure
       }
       
       for (ColumnUpdate cu : columnsUpdates) {
-        if (Constants.METADATA_PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && !cu.isDeleted()) {
+        if (MetadataTable.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && !cu.isDeleted()) {
           prevRow = new Text(cu.getValue());
         }
         

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java Mon Jun 24 21:34:20 2013
@@ -22,7 +22,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
@@ -33,6 +32,7 @@ import org.apache.accumulo.core.data.Par
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.logger.LogFileValue;
 import org.apache.hadoop.io.Text;
 
@@ -96,7 +96,7 @@ public class PrintEvents {
         
         List<ColumnUpdate> columnsUpdates = m.getUpdates();
         for (ColumnUpdate cu : columnsUpdates) {
-          if (Constants.METADATA_PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && count > 0) {
+          if (MetadataTable.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && count > 0) {
             System.out.println("Saw change to prevrow, stopping printing events.");
             break loop1;
           }

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java Mon Jun 24 21:34:20 2013
@@ -27,13 +27,11 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.MasterClient;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.file.FileUtil;
 import org.apache.accumulo.core.gc.thrift.GCMonitorService;
 import org.apache.accumulo.core.gc.thrift.GCStatus;
 import org.apache.accumulo.core.master.thrift.Compacting;
@@ -42,7 +40,6 @@ import org.apache.accumulo.core.master.t
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.security.SecurityUtil;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.LoggingRunnable;
 import org.apache.accumulo.core.util.Pair;
@@ -54,6 +51,8 @@ import org.apache.accumulo.core.zookeepe
 import org.apache.accumulo.server.Accumulo;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.monitor.servlets.DefaultServlet;
 import org.apache.accumulo.server.monitor.servlets.GcStatusServlet;
 import org.apache.accumulo.server.monitor.servlets.JSONServlet;
@@ -73,7 +72,7 @@ import org.apache.accumulo.server.proble
 import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.util.EmbeddedWebServer;
-import org.apache.hadoop.fs.FileSystem;
+import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
@@ -450,7 +449,7 @@ public class Monitor {
   public static void main(String[] args) throws Exception {
     SecurityUtil.serverLogin();
     
-    FileSystem fs = FileUtil.getFileSystem(CachedConfiguration.getInstance(), ServerConfiguration.getSiteConfiguration());
+    VolumeManager fs = VolumeManagerImpl.get();
     String hostname = Accumulo.getLocalAddress(args);
     instance = HdfsZooInstance.getInstance();
     config = new ServerConfiguration(instance);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java Mon Jun 24 21:34:20 2013
@@ -33,6 +33,8 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.state.MetaDataTableScanner;
 import org.apache.accumulo.server.master.state.TabletLocationState;
@@ -144,23 +146,27 @@ public class TablesServlet extends Basic
   private void doTableDetails(HttpServletRequest req, StringBuilder sb, Map<String,String> tidToNameMap, String tableId) {
     String displayName = Tables.getPrintableTableNameFromId(tidToNameMap, tableId);
     Instance instance = HdfsZooInstance.getInstance();
-    MetaDataTableScanner scanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), new Range(KeyExtent.getMetadataEntry(new Text(
-        tableId), new Text()),
-        KeyExtent.getMetadataEntry(
-        new Text(tableId), null)));
-    
     TreeSet<String> locs = new TreeSet<String>();
-    while (scanner.hasNext()) {
-      TabletLocationState state = scanner.next();
-      if (state.current != null) {
-        try {
-          locs.add(state.current.hostPort());
-        } catch (Exception ex) {
-          log.error(ex, ex);
+    if (RootTable.ID.equals(tableId)) {
+      locs.add(instance.getRootTabletLocation());
+    } else {
+      String systemTableName = MetadataTable.ID.equals(tableId) ? RootTable.NAME : MetadataTable.NAME;
+      MetaDataTableScanner scanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), new Range(KeyExtent.getMetadataEntry(
+          new Text(tableId), new Text()), KeyExtent.getMetadataEntry(new Text(tableId), null)), systemTableName);
+      
+      while (scanner.hasNext()) {
+        TabletLocationState state = scanner.next();
+        if (state.current != null) {
+          try {
+            locs.add(state.current.hostPort());
+          } catch (Exception ex) {
+            log.error(ex, ex);
+          }
         }
       }
+      scanner.close();
     }
-    scanner.close();
+    
     log.debug("Locs: " + locs);
     
     List<TabletServerStatus> tservers = new ArrayList<TabletServerStatus>();

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java Mon Jun 24 21:34:20 2013
@@ -39,8 +39,10 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.SortedKeyIterator;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.LoggingRunnable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.server.client.HdfsZooInstance;
@@ -84,7 +86,7 @@ public class ProblemReports implements I
         log.debug("Filing problem report " + pr.getTableName() + " " + pr.getProblemType() + " " + pr.getResource());
         
         try {
-          if (pr.getTableName().equals(Constants.METADATA_TABLE_ID)) {
+          if (pr.getTableName().equals(MetadataTable.ID) || pr.getTableName().equals(RootTable.ID)) {
             // file report in zookeeper
             pr.saveToZooKeeper();
           } else {
@@ -120,7 +122,7 @@ public class ProblemReports implements I
       @Override
       public void run() {
         try {
-          if (pr.getTableName().equals(Constants.METADATA_TABLE_ID)) {
+          if (pr.getTableName().equals(MetadataTable.ID)) {
             // file report in zookeeper
             pr.removeFromZooKeeper();
           } else {
@@ -144,7 +146,7 @@ public class ProblemReports implements I
   
   public void deleteProblemReports(String table) throws Exception {
     
-    if (Constants.METADATA_TABLE_ID.equals(table)) {
+    if (MetadataTable.ID.equals(table)) {
       Iterator<ProblemReport> pri = iterator(table);
       while (pri.hasNext()) {
         pri.next().removeFromZooKeeper();
@@ -153,7 +155,7 @@ public class ProblemReports implements I
     }
     
     Connector connector = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken());
-    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.addScanIterator(new IteratorSetting(1, "keys-only", SortedKeyIterator.class));
     
     if (table == null) {
@@ -187,7 +189,7 @@ public class ProblemReports implements I
           if (iter1 == null) {
             try {
               List<String> children;
-              if (table == null || table.equals(Constants.METADATA_TABLE_ID)) {
+              if (table == null || table.equals(MetadataTable.ID)) {
                 children = zoo.getChildren(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZPROBLEMS);
               } else {
                 children = Collections.emptyList();
@@ -206,9 +208,9 @@ public class ProblemReports implements I
         private Iterator<Entry<Key,Value>> getIter2() {
           if (iter2 == null) {
             try {
-              if ((table == null || !table.equals(Constants.METADATA_TABLE_ID)) && iter1Count == 0) {
+              if ((table == null || !table.equals(MetadataTable.ID)) && iter1Count == 0) {
                 Connector connector = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken());
-                Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+                Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
                 
                 scanner.setTimeout(3, TimeUnit.SECONDS);
                 

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java Mon Jun 24 21:34:20 2013
@@ -20,7 +20,6 @@ import java.nio.ByteBuffer;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.Translator;
@@ -38,6 +37,7 @@ import org.apache.accumulo.core.security
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.handler.Authenticator;
 import org.apache.accumulo.server.security.handler.Authorizor;
@@ -50,7 +50,8 @@ import org.apache.log4j.Logger;
  */
 public class AuditedSecurityOperation extends SecurityOperation {
   
-  public static final Logger audit = Logger.getLogger(Constants.AUDITLOG);
+  public static final String AUDITLOG = "Audit";
+  public static final Logger audit = Logger.getLogger(AUDITLOG);
   
   public AuditedSecurityOperation(Authorizor author, Authenticator authent, PermissionHandler pm, String instanceId) {
     super(author, authent, pm, instanceId);
@@ -86,7 +87,7 @@ public class AuditedSecurityOperation ex
   }
   
   private static boolean shouldAudit(TCredentials credentials, String tableId) {
-    return !tableId.equals(Constants.METADATA_TABLE_ID) && shouldAudit(credentials);
+    return !tableId.equals(MetadataTable.ID) && shouldAudit(credentials);
   }
   
   // Is INFO the right level to check? Do we even need that check?
@@ -116,9 +117,9 @@ public class AuditedSecurityOperation ex
       audit.info("operation: " + prefix + "; user: " + credentials.getPrincipal() + "; " + String.format(template, args));
     }
   }
-
+  
   public static final String CAN_SCAN_AUDIT_TEMPLATE = "action: scan; targetTable: %s; authorizations: %s; range: %s; columns: %s; iterators: %s; iteratorOptions: %s;";
-
+  
   @Override
   public boolean canScan(TCredentials credentials, String tableId, TRange range, List<TColumn> columns, List<IterInfo> ssiList,
       Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
@@ -140,8 +141,9 @@ public class AuditedSecurityOperation ex
       return super.canScan(credentials, tableId);
     }
   }
+  
   public static final String CAN_SCAN_BATCH_AUDIT_TEMPLATE = "action: scan; targetTable: %s; authorizations: %s; range: %s; columns: %s; iterators: %s; iteratorOptions: %s;";
-
+  
   @Override
   public boolean canScan(TCredentials credentials, String tableId, Map<TKeyExtent,List<TRange>> tbatch, List<TColumn> tcolumns, List<IterInfo> ssiList,
       Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
@@ -165,8 +167,9 @@ public class AuditedSecurityOperation ex
       return super.canScan(credentials, tableId);
     }
   }
+  
   public static final String CHANGE_AUTHORIZATIONS_AUDIT_TEMPLATE = "action: changeAuthorizations; targetUser: %s; authorizations: %s";
-
+  
   @Override
   public void changeAuthorizations(TCredentials credentials, String user, Authorizations authorizations) throws ThriftSecurityException {
     try {
@@ -177,8 +180,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CHANGE_PASSWORD_AUDIT_TEMPLATE = "action: changePassword; targetUser: %s;";
-
+  
   @Override
   public void changePassword(TCredentials credentials, TCredentials newInfo) throws ThriftSecurityException {
     try {
@@ -189,8 +193,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CREATE_USER_AUDIT_TEMPLATE = "action: createUser; targetUser: %s; Authorizations: %s;";
-
+  
   @Override
   public void createUser(TCredentials credentials, TCredentials newUser, Authorizations authorizations) throws ThriftSecurityException {
     try {
@@ -201,8 +206,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CAN_CREATE_TABLE_AUDIT_TEMPLATE = "action: createTable; targetTable: %s;";
-
+  
   @Override
   public boolean canCreateTable(TCredentials c, String tableName) throws ThriftSecurityException {
     try {
@@ -214,8 +220,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CAN_DELETE_TABLE_AUDIT_TEMPLATE = "action: deleteTable; targetTable: %s;";
-
+  
   @Override
   public boolean canDeleteTable(TCredentials c, String tableId) throws ThriftSecurityException {
     String tableName = getTableName(tableId);
@@ -228,8 +235,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CAN_RENAME_TABLE_AUDIT_TEMPLATE = "action: renameTable; targetTable: %s; newTableName: %s;";
-
+  
   @Override
   public boolean canRenameTable(TCredentials c, String tableId, String oldTableName, String newTableName) throws ThriftSecurityException {
     try {
@@ -241,8 +249,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CAN_CLONE_TABLE_AUDIT_TEMPLATE = "action: cloneTable; targetTable: %s; newTableName: %s";
-
+  
   @Override
   public boolean canCloneTable(TCredentials c, String tableId, String tableName) throws ThriftSecurityException {
     String oldTableName = getTableName(tableId);
@@ -255,8 +264,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CAN_DELETE_RANGE_AUDIT_TEMPLATE = "action: deleteData; targetTable: %s; startRange: %s; endRange: %s;";
-
+  
   @Override
   public boolean canDeleteRange(TCredentials c, String tableId, String tableName, Text startRow, Text endRow) throws ThriftSecurityException {
     try {
@@ -268,8 +278,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CAN_BULK_IMPORT_AUDIT_TEMPLATE = "action: bulkImport; targetTable: %s; dataDir: %s; failDir: %s;";
-
+  
   @Override
   public boolean canBulkImport(TCredentials c, String tableId, String tableName, String dir, String failDir) throws ThriftSecurityException {
     try {
@@ -281,8 +292,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CAN_IMPORT_AUDIT_TEMPLATE = "action: import; targetTable: %s; dataDir: %s;";
-
+  
   @Override
   public boolean canImport(TCredentials credentials, String tableName, String importDir) throws ThriftSecurityException {
     
@@ -295,8 +307,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CAN_EXPORT_AUDIT_TEMPLATE = "action: export; targetTable: %s; dataDir: %s;";
-
+  
   @Override
   public boolean canExport(TCredentials credentials, String tableId, String tableName, String exportDir) throws ThriftSecurityException {
     
@@ -309,8 +322,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String DROP_USER_AUDIT_TEMPLATE = "action: dropUser; targetUser: %s;";
-
+  
   @Override
   public void dropUser(TCredentials credentials, String user) throws ThriftSecurityException {
     try {
@@ -321,8 +335,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String GRANT_SYSTEM_PERMISSION_AUDIT_TEMPLATE = "action: grantSystemPermission; permission: %s; targetUser: %s;";
-
+  
   @Override
   public void grantSystemPermission(TCredentials credentials, String user, SystemPermission permission) throws ThriftSecurityException {
     try {
@@ -333,8 +348,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String GRANT_TABLE_PERMISSION_AUDIT_TEMPLATE = "action: grantTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
-
+  
   @Override
   public void grantTablePermission(TCredentials credentials, String user, String tableId, TablePermission permission) throws ThriftSecurityException {
     String tableName = getTableName(tableId);
@@ -346,8 +362,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String REVOKE_SYSTEM_PERMISSION_AUDIT_TEMPLATE = "action: revokeSystemPermission; permission: %s; targetUser: %s;";
-
+  
   @Override
   public void revokeSystemPermission(TCredentials credentials, String user, SystemPermission permission) throws ThriftSecurityException {
     
@@ -359,8 +376,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String REVOKE_TABLE_PERMISSION_AUDIT_TEMPLATE = "action: revokeTablePermission; permission: %s; targetTable: %s; targetUser: %s;";
-
+  
   @Override
   public void revokeTablePermission(TCredentials credentials, String user, String tableId, TablePermission permission) throws ThriftSecurityException {
     String tableName = getTableName(tableId);
@@ -372,8 +390,9 @@ public class AuditedSecurityOperation ex
       throw ex;
     }
   }
+  
   public static final String CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE = "action: %s; targetTable: %s;";
-
+  
   @Override
   public boolean canOnlineOfflineTable(TCredentials credentials, String tableId, TableOperation op) throws ThriftSecurityException {
     String tableName = getTableName(tableId);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/SecurityConstants.java Mon Jun 24 21:34:20 2013
@@ -30,6 +30,7 @@ import org.apache.accumulo.core.client.s
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.commons.codec.binary.Base64;
@@ -62,7 +63,7 @@ public class SecurityConstants {
   }
   
   private static AuthenticationToken makeSystemPassword() {
-    int wireVersion = Constants.WIRE_VERSION;
+    int wireVersion = ServerConstants.WIRE_VERSION;
     byte[] inst = HdfsZooInstance.getInstance().getInstanceID().getBytes(Constants.UTF8);
     try {
       confChecksum = getSystemConfigChecksum();
@@ -92,7 +93,7 @@ public class SecurityConstants {
       
       // seed the config with the version and instance id, so at least
       // it's not empty
-      md.update(Constants.WIRE_VERSION.toString().getBytes(Constants.UTF8));
+      md.update(ServerConstants.WIRE_VERSION.toString().getBytes(Constants.UTF8));
       md.update(HdfsZooInstance.getInstance().getInstanceID().getBytes(Constants.UTF8));
       
       for (Entry<String,String> entry : ServerConfiguration.getSiteConfiguration()) {

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java Mon Jun 24 21:34:20 2013
@@ -39,6 +39,8 @@ import org.apache.accumulo.core.security
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.master.Master;
@@ -105,6 +107,7 @@ public class SecurityOperation {
    * 
    * @deprecated not for client use
    */
+  @Deprecated
   public SecurityOperation(String instanceId) {
     ZKUserPath = Constants.ZROOT + "/" + instanceId + "/users";
     zooCache = new ZooCache();
@@ -132,7 +135,7 @@ public class SecurityOperation {
     authorizor.initializeSecurity(credentials, rootPrincipal);
     permHandle.initializeSecurity(credentials, rootPrincipal);
     try {
-      permHandle.grantTablePermission(rootPrincipal, Constants.METADATA_TABLE_ID, TablePermission.ALTER_TABLE);
+      permHandle.grantTablePermission(rootPrincipal, MetadataTable.ID, TablePermission.ALTER_TABLE);
     } catch (TableNotFoundException e) {
       // Shouldn't happen
       throw new RuntimeException(e);
@@ -209,7 +212,7 @@ public class SecurityOperation {
     
     // system user doesn't need record-level authorizations for the tables it reads (for now)
     if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
-      return Constants.NO_AUTHS;
+      return Authorizations.EMPTY;
     
     try {
       return authorizor.getCachedUserAuthorizations(user);
@@ -253,7 +256,7 @@ public class SecurityOperation {
     
     targetUserExists(user);
     
-    if (table.equals(Constants.METADATA_TABLE_ID) && permission.equals(TablePermission.READ))
+    if ((table.equals(MetadataTable.ID) || table.equals(RootTable.ID)) && permission.equals(TablePermission.READ))
       return true;
     
     try {
@@ -293,11 +296,13 @@ public class SecurityOperation {
     return hasTablePermission(credentials.getPrincipal(), table, TablePermission.READ, true);
   }
   
-  public boolean canScan(TCredentials credentials, String table, TRange range, List<TColumn> columns, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
+  public boolean canScan(TCredentials credentials, String table, TRange range, List<TColumn> columns, List<IterInfo> ssiList,
+      Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
     return canScan(credentials, table);
   }
   
-  public boolean canScan(TCredentials credentials, String table, Map<TKeyExtent,List<TRange>> tbatch, List<TColumn> tcolumns, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
+  public boolean canScan(TCredentials credentials, String table, Map<TKeyExtent,List<TRange>> tbatch, List<TColumn> tcolumns, List<IterInfo> ssiList,
+      Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
     return canScan(credentials, table);
   }
   

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/InsecurePermHandler.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/InsecurePermHandler.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/InsecurePermHandler.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/InsecurePermHandler.java Mon Jun 24 21:34:20 2013
@@ -27,120 +27,77 @@ import org.apache.accumulo.core.security
  */
 public class InsecurePermHandler implements PermissionHandler {
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#initialize(java.lang.String)
-   */
   @Override
   public void initialize(String instanceId, boolean initialize) {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#validSecurityHandlers(org.apache.accumulo.server.security.handler.Authenticator, org.apache.accumulo.server.security.handler.Authorizor)
-   */
   @Override
   public boolean validSecurityHandlers(Authenticator authent, Authorizor author) {
     return true;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#initializeSecurity(java.lang.String)
-   */
   @Override
   public void initializeSecurity(TCredentials token, String rootuser) throws AccumuloSecurityException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#hasSystemPermission(java.lang.String, org.apache.accumulo.core.security.SystemPermission)
-   */
   @Override
   public boolean hasSystemPermission(String user, SystemPermission permission) throws AccumuloSecurityException {
     return true;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#hasCachedSystemPermission(java.lang.String, org.apache.accumulo.core.security.SystemPermission)
-   */
   @Override
   public boolean hasCachedSystemPermission(String user, SystemPermission permission) throws AccumuloSecurityException {
     return true;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#hasTablePermission(java.lang.String, java.lang.String, org.apache.accumulo.core.security.TablePermission)
-   */
   @Override
   public boolean hasTablePermission(String user, String table, TablePermission permission) throws AccumuloSecurityException, TableNotFoundException {
     return true;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#hasCachedTablePermission(java.lang.String, java.lang.String, org.apache.accumulo.core.security.TablePermission)
-   */
   @Override
   public boolean hasCachedTablePermission(String user, String table, TablePermission permission) throws AccumuloSecurityException, TableNotFoundException {
     return true;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#grantSystemPermission(java.lang.String, org.apache.accumulo.core.security.SystemPermission)
-   */
   @Override
   public void grantSystemPermission(String user, SystemPermission permission) throws AccumuloSecurityException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#revokeSystemPermission(java.lang.String, org.apache.accumulo.core.security.SystemPermission)
-   */
   @Override
   public void revokeSystemPermission(String user, SystemPermission permission) throws AccumuloSecurityException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#grantTablePermission(java.lang.String, java.lang.String, org.apache.accumulo.core.security.TablePermission)
-   */
   @Override
   public void grantTablePermission(String user, String table, TablePermission permission) throws AccumuloSecurityException, TableNotFoundException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#revokeTablePermission(java.lang.String, java.lang.String, org.apache.accumulo.core.security.TablePermission)
-   */
   @Override
   public void revokeTablePermission(String user, String table, TablePermission permission) throws AccumuloSecurityException, TableNotFoundException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#cleanTablePermissions(java.lang.String)
-   */
   @Override
   public void cleanTablePermissions(String table) throws AccumuloSecurityException, TableNotFoundException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#initUser(java.lang.String)
-   */
   @Override
   public void initUser(String user) throws AccumuloSecurityException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#dropUser(java.lang.String)
-   */
   @Override
   public void cleanUser(String user) throws AccumuloSecurityException {
     return;
   }
-
+  
   @Override
-  public void initTable(String table) throws AccumuloSecurityException {
-  }
+  public void initTable(String table) throws AccumuloSecurityException {}
   
 }

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java Mon Jun 24 21:34:20 2013
@@ -22,13 +22,13 @@ import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
@@ -64,7 +64,7 @@ public class ZKAuthorizor implements Aut
     byte[] authsBytes = zooCache.get(ZKUserPath + "/" + user + ZKUserAuths).getData();
     if (authsBytes != null)
       return ZKSecurityTool.convertAuthorizations(authsBytes);
-    return Constants.NO_AUTHS;
+    return Authorizations.EMPTY;
   }
   
   @Override
@@ -82,7 +82,7 @@ public class ZKAuthorizor implements Aut
       rootPerms.add(p);
     Map<String,Set<TablePermission>> tablePerms = new HashMap<String,Set<TablePermission>>();
     // Allow the root user to flush the !METADATA table
-    tablePerms.put(Constants.METADATA_TABLE_ID, Collections.singleton(TablePermission.ALTER_TABLE));
+    tablePerms.put(MetadataTable.ID, Collections.singleton(TablePermission.ALTER_TABLE));
     
     try {
       // prep parent node of users with root username
@@ -90,7 +90,7 @@ public class ZKAuthorizor implements Aut
         zoo.putPersistentData(ZKUserPath, rootuser.getBytes(), NodeExistsPolicy.FAIL);
       
       initUser(rootuser);
-      zoo.putPersistentData(ZKUserPath + "/" + rootuser + ZKUserAuths, ZKSecurityTool.convertAuthorizations(Constants.NO_AUTHS), NodeExistsPolicy.FAIL);
+      zoo.putPersistentData(ZKUserPath + "/" + rootuser + ZKUserAuths, ZKSecurityTool.convertAuthorizations(Authorizations.EMPTY), NodeExistsPolicy.FAIL);
     } catch (KeeperException e) {
       log.error(e, e);
       throw new RuntimeException(e);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java Mon Jun 24 21:34:20 2013
@@ -23,13 +23,14 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
@@ -59,6 +60,7 @@ public class ZKPermHandler implements Pe
     return zkPermHandlerInstance;
   }
   
+  @Override
   public void initialize(String instanceId, boolean initialize) {
     ZKUserPath = ZKSecurityTool.getInstancePath(instanceId) + "/users";
     ZKTablePath = ZKSecurityTool.getInstancePath(instanceId) + "/tables";
@@ -252,8 +254,9 @@ public class ZKPermHandler implements Pe
     for (SystemPermission p : SystemPermission.values())
       rootPerms.add(p);
     Map<String,Set<TablePermission>> tablePerms = new HashMap<String,Set<TablePermission>>();
-    // Allow the root user to flush the !METADATA table
-    tablePerms.put(Constants.METADATA_TABLE_ID, Collections.singleton(TablePermission.ALTER_TABLE));
+    // Allow the root user to flush the system tables
+    tablePerms.put(RootTable.ID, Collections.singleton(TablePermission.ALTER_TABLE));
+    tablePerms.put(MetadataTable.ID, Collections.singleton(TablePermission.ALTER_TABLE));
     
     try {
       // prep parent node of users with root username
@@ -277,6 +280,7 @@ public class ZKPermHandler implements Pe
    * @param user
    * @throws AccumuloSecurityException
    */
+  @Override
   public void initUser(String user) throws AccumuloSecurityException {
     IZooReaderWriter zoo = ZooReaderWriter.getRetryingInstance();
     try {

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/Compactor.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/Compactor.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/Compactor.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/Compactor.java Mon Jun 24 21:34:20 2013
@@ -28,8 +28,6 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.accumulo.trace.instrument.Span;
-import org.apache.accumulo.trace.instrument.Trace;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
@@ -55,15 +53,18 @@ import org.apache.accumulo.core.util.Loc
 import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
 import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
 import org.apache.accumulo.server.conf.TableConfiguration;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.problems.ProblemReport;
 import org.apache.accumulo.server.problems.ProblemReportingIterator;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.accumulo.server.tabletserver.Tablet.MajorCompactionReason;
 import org.apache.accumulo.server.tabletserver.Tablet.MinorCompactionReason;
+import org.apache.accumulo.trace.instrument.Span;
+import org.apache.accumulo.trace.instrument.Trace;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Logger;
 
 
@@ -118,14 +119,14 @@ public class Compactor implements Callab
     IteratorScope getIteratorScope();
   }
   
-  private Map<String,DataFileValue> filesToCompact;
+  private Map<FileRef,DataFileValue> filesToCompact;
   private InMemoryMap imm;
-  private String outputFile;
+  private FileRef outputFile;
   private boolean propogateDeletes;
   private TableConfiguration acuTableConf;
   private CompactionEnv env;
   private Configuration conf;
-  private FileSystem fs;
+  private VolumeManager fs;
   protected KeyExtent extent;
   private List<IteratorSetting> iterators;
   
@@ -217,9 +218,10 @@ public class Compactor implements Callab
         iiList.add(new IterInfo(iterSetting.getPriority(), iterSetting.getIteratorClass(), iterSetting.getName()));
         iterOptions.put(iterSetting.getName(), iterSetting.getOptions());
       }
-      
-      return new ActiveCompaction(compactor.extent.toThrift(), System.currentTimeMillis() - compactor.startTime, new ArrayList<String>(
-          compactor.filesToCompact.keySet()), compactor.outputFile, type, reason, localityGroup, entriesRead, entriesWritten, iiList, iterOptions);
+      List<String> filesToCompact = new ArrayList<String>();
+      for (FileRef ref : compactor.filesToCompact.keySet())
+        filesToCompact.add(ref.toString());
+      return new ActiveCompaction(compactor.extent.toThrift(), System.currentTimeMillis() - compactor.startTime, filesToCompact, compactor.outputFile.toString(), type, reason, localityGroup, entriesRead, entriesWritten, iiList, iterOptions);
     }
   }
   
@@ -235,7 +237,7 @@ public class Compactor implements Callab
     return compactions;
   }
 
-  Compactor(Configuration conf, FileSystem fs, Map<String,DataFileValue> files, InMemoryMap imm, String outputFile, boolean propogateDeletes,
+  Compactor(Configuration conf, VolumeManager fs, Map<FileRef,DataFileValue> files, InMemoryMap imm, FileRef outputFile, boolean propogateDeletes,
       TableConfiguration acuTableConf, KeyExtent extent, CompactionEnv env, List<IteratorSetting> iterators, MajorCompactionReason reason) {
     this.extent = extent;
     this.conf = conf;
@@ -252,12 +254,12 @@ public class Compactor implements Callab
     startTime = System.currentTimeMillis();
   }
   
-  Compactor(Configuration conf, FileSystem fs, Map<String,DataFileValue> files, InMemoryMap imm, String outputFile, boolean propogateDeletes,
+  Compactor(Configuration conf, VolumeManager fs, Map<FileRef,DataFileValue> files, InMemoryMap imm, FileRef outputFile, boolean propogateDeletes,
       TableConfiguration acuTableConf, KeyExtent extent, CompactionEnv env) {
     this(conf, fs, files, imm, outputFile, propogateDeletes, acuTableConf, extent, env, new ArrayList<IteratorSetting>(), null);
   }
   
-  public FileSystem getFileSystem() {
+  public VolumeManager getFileSystem() {
     return fs;
   }
   
@@ -266,7 +268,7 @@ public class Compactor implements Callab
   }
   
   String getOutputFile() {
-    return outputFile;
+    return outputFile.toString();
   }
   
   @Override
@@ -282,7 +284,8 @@ public class Compactor implements Callab
 
     try {
       FileOperations fileFactory = FileOperations.getInstance();
-      mfw = fileFactory.openWriter(outputFile, fs, conf, acuTableConf);
+      FileSystem ns = this.fs.getFileSystemByPath(outputFile.path());
+      mfw = fileFactory.openWriter(outputFile.path().toString(), ns, ns.getConf(), acuTableConf);
       
       Map<String,Set<ByteSequence>> lGroups;
       try {
@@ -314,7 +317,7 @@ public class Compactor implements Callab
       
       // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
       try {
-        FileSKVIterator openReader = fileFactory.openReader(outputFile, false, fs, conf, acuTableConf);
+        FileSKVIterator openReader = fileFactory.openReader(outputFile.path().toString(), false, ns, ns.getConf(), acuTableConf);
         openReader.close();
       } catch (IOException ex) {
         log.error("Verification of successful compaction fails!!! " + extent + " " + outputFile, ex);
@@ -324,7 +327,7 @@ public class Compactor implements Callab
       log.debug(String.format("Compaction %s %,d read | %,d written | %,6d entries/sec | %6.3f secs", extent, majCStats.getEntriesRead(),
           majCStats.getEntriesWritten(), (int) (majCStats.getEntriesRead() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0));
       
-      majCStats.setFileSize(fileFactory.getFileSize(outputFile, fs, conf, acuTableConf));
+      majCStats.setFileSize(fileFactory.getFileSize(outputFile.path().toString(), ns, ns.getConf(), acuTableConf));
       return majCStats;
     } catch (IOException e) {
       log.error(e, e);
@@ -343,9 +346,8 @@ public class Compactor implements Callab
           try {
             mfw.close();
           } finally {
-            Path path = new Path(outputFile);
-            if (!fs.delete(path, true))
-              if (fs.exists(path))
+            if (!fs.deleteRecursively(outputFile.path()))
+              if (fs.exists(outputFile.path()))
                 log.error("Unable to delete " + outputFile);
           }
         }
@@ -359,18 +361,18 @@ public class Compactor implements Callab
     
     List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(filesToCompact.size());
     
-    for (String mapFile : filesToCompact.keySet()) {
+    for (FileRef mapFile : filesToCompact.keySet()) {
       try {
         
         FileOperations fileFactory = FileOperations.getInstance();
-        
+        FileSystem fs = this.fs.getFileSystemByPath(mapFile.path());
         FileSKVIterator reader;
         
-        reader = fileFactory.openReader(mapFile, false, fs, conf, acuTableConf);
+        reader = fileFactory.openReader(mapFile.path().toString(), false, fs, conf, acuTableConf);
         
         readers.add(reader);
         
-        SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(extent.getTableId().toString(), mapFile, false, reader);
+        SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(extent.getTableId().toString(), mapFile.path().toString(), false, reader);
         
         if (filesToCompact.get(mapFile).isTimeSet()) {
           iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
@@ -380,7 +382,7 @@ public class Compactor implements Callab
         
       } catch (Throwable e) {
         
-        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_READ, mapFile, e));
+        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_READ, mapFile.path().toString(), e));
         
         log.warn("Some problem opening map file " + mapFile + " " + e.getMessage(), e);
         // failed to open some map file... close the ones that were opened
@@ -462,7 +464,7 @@ public class Compactor implements Callab
             } catch (IOException e) {
               log.error(e, e);
             }
-            fs.delete(new Path(outputFile), true);
+            fs.deleteRecursively(outputFile.path());
           } catch (Exception e) {
             log.warn("Failed to delete Canceled compaction output file " + outputFile, e);
           }

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/FileManager.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/FileManager.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/FileManager.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/FileManager.java Mon Jun 24 21:34:20 2013
@@ -42,13 +42,17 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.iterators.system.SourceSwitchingIterator.DataSource;
 import org.apache.accumulo.core.iterators.system.TimeSettingIterator;
 import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
+import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.problems.ProblemReport;
 import org.apache.accumulo.server.problems.ProblemReportingIterator;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.accumulo.server.util.time.SimpleTimer;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
@@ -99,7 +103,7 @@ public class FileManager {
   
   private Semaphore filePermits;
   
-  private FileSystem fs;
+  private VolumeManager fs;
   
   // the data cache and index cache are allocated in
   // TabletResourceManager and passed through the file opener to
@@ -158,7 +162,7 @@ public class FileManager {
    * @param indexCache
    *          : underlying file can and should be able to handle a null cache
    */
-  FileManager(ServerConfiguration conf, FileSystem fs, int maxOpen, BlockCache dataCache, BlockCache indexCache) {
+  FileManager(ServerConfiguration conf, VolumeManager fs, int maxOpen, BlockCache dataCache, BlockCache indexCache) {
     
     if (maxOpen <= 0)
       throw new IllegalArgumentException("maxOpen <= 0");
@@ -239,8 +243,7 @@ public class FileManager {
   }
   
   private List<String> takeOpenFiles(Collection<String> files, List<FileSKVIterator> reservedFiles, Map<FileSKVIterator,String> readersReserved) {
-    List<String> filesToOpen;
-    filesToOpen = new LinkedList<String>(files);
+    List<String> filesToOpen = new LinkedList<String>(files);
     for (Iterator<String> iterator = filesToOpen.iterator(); iterator.hasNext();) {
       String file = iterator.next();
       
@@ -304,8 +307,10 @@ public class FileManager {
     // open any files that need to be opened
     for (String file : filesToOpen) {
       try {
-        // log.debug("Opening "+file);
-        FileSKVIterator reader = FileOperations.getInstance().openReader(file, false, fs, fs.getConf(), conf.getTableConfiguration(table.toString()),
+        Path path = fs.getFullPath(ServerConstants.getTablesDirs(), file);
+        FileSystem ns = fs.getFileSystemByPath(path);
+        //log.debug("Opening "+file + " path " + path);
+        FileSKVIterator reader = FileOperations.getInstance().openReader(path.toString(), false, ns, ns.getConf(), conf.getTableConfiguration(table.toString()),
             dataCache, indexCache);
         reservedFiles.add(reader);
         readersReserved.put(reader, file);
@@ -453,6 +458,13 @@ public class FileManager {
       }
     }
     
+    private List<FileSKVIterator> openFileRefs(Collection<FileRef> files) throws TooManyFilesException, IOException {
+      List<String> strings = new ArrayList<String>(files.size());
+      for (FileRef ref : files)
+        strings.add(ref.path().toString());
+      return openFiles(strings);
+    }
+    
     private List<FileSKVIterator> openFiles(Collection<String> files) throws TooManyFilesException, IOException {
       // one tablet can not open more than maxOpen files, otherwise it could get stuck
       // forever waiting on itself to release files
@@ -468,9 +480,9 @@ public class FileManager {
       return newlyReservedReaders;
     }
     
-    synchronized List<InterruptibleIterator> openFiles(Map<String,DataFileValue> files, boolean detachable) throws IOException {
+    synchronized List<InterruptibleIterator> openFiles(Map<FileRef,DataFileValue> files, boolean detachable) throws IOException {
       
-      List<FileSKVIterator> newlyReservedReaders = openFiles(files.keySet());
+      List<FileSKVIterator> newlyReservedReaders = openFileRefs(files.keySet());
       
       ArrayList<InterruptibleIterator> iters = new ArrayList<InterruptibleIterator>();
       
@@ -485,9 +497,9 @@ public class FileManager {
         } else {
           iter = new ProblemReportingIterator(tablet.getTableId().toString(), filename, continueOnFailure, reader);
         }
-        
-        if (files.get(filename).isTimeSet()) {
-          iter = new TimeSettingIterator(iter, files.get(filename).getTime());
+        DataFileValue value = files.get(new FileRef(filename));
+        if (value.isTimeSet()) {
+          iter = new TimeSettingIterator(iter, value.getTime());
         }
         
         iters.add(iter);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java Mon Jun 24 21:34:20 2013
@@ -20,7 +20,6 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.server.conf.ServerConfiguration;
@@ -30,6 +29,7 @@ import org.apache.log4j.Logger;
 public class LargestFirstMemoryManager implements MemoryManager {
   
   private static final Logger log = Logger.getLogger(LargestFirstMemoryManager.class);
+  private static final int TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER = 2;
   
   private long maxMemory = -1;
   private int maxConcurrentMincs;
@@ -48,11 +48,12 @@ public class LargestFirstMemoryManager i
     this.numWaitingMultiplier = numWaitingMultiplier;
   }
   
+  @Override
   public void init(ServerConfiguration conf) {
     this.config = conf;
     maxMemory = conf.getConfiguration().getMemoryInBytes(Property.TSERV_MAXMEM);
     maxConcurrentMincs = conf.getConfiguration().getCount(Property.TSERV_MINC_MAXCONCURRENT);
-    numWaitingMultiplier = Constants.TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER;
+    numWaitingMultiplier = TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER;
   }
   
   LargestFirstMemoryManager() {

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java Mon Jun 24 21:34:20 2013
@@ -21,7 +21,6 @@ import java.util.Collections;
 import java.util.Map;
 import java.util.Random;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
@@ -35,7 +34,8 @@ import org.apache.accumulo.server.proble
 import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.accumulo.server.tabletserver.Tablet.MinorCompactionReason;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Logger;
 
@@ -43,16 +43,16 @@ public class MinorCompactor extends Comp
   
   private static final Logger log = Logger.getLogger(MinorCompactor.class);
   
-  private static final Map<String,DataFileValue> EMPTY_MAP = Collections.emptyMap();
+  private static final Map<FileRef,DataFileValue> EMPTY_MAP = Collections.emptyMap();
   
-  private static Map<String,DataFileValue> toFileMap(String mergeFile, DataFileValue dfv) {
+  private static Map<FileRef,DataFileValue> toFileMap(FileRef mergeFile, DataFileValue dfv) {
     if (mergeFile == null)
       return EMPTY_MAP;
     
     return Collections.singletonMap(mergeFile, dfv);
   }
   
-  MinorCompactor(Configuration conf, FileSystem fs, InMemoryMap imm, String mergeFile, DataFileValue dfv, String outputFile, TableConfiguration acuTableConf,
+  MinorCompactor(Configuration conf, VolumeManager fs, InMemoryMap imm, FileRef mergeFile, DataFileValue dfv, FileRef outputFile, TableConfiguration acuTableConf,
       KeyExtent extent, MinorCompactionReason mincReason) {
     super(conf, fs, toFileMap(mergeFile, dfv), imm, outputFile, true, acuTableConf, extent, new CompactionEnv() {
       
@@ -78,7 +78,7 @@ public class MinorCompactor extends Comp
       return false; // can not get positive confirmation that its deleting.
     }
   }
-
+  
   @Override
   public CompactionStats call() {
     log.debug("Begin minor compaction " + getOutputFile() + " " + getExtent());
@@ -86,7 +86,7 @@ public class MinorCompactor extends Comp
     // output to new MapFile with a temporary name
     int sleepTime = 100;
     double growthFactor = 4;
-    int maxSleepTime = 1000 * Constants.DEFAULT_MINOR_COMPACTION_MAX_SLEEP_TIME;
+    int maxSleepTime = 1000 * 60 * 3; // 3 minutes
     boolean reportedProblem = false;
     
     runningCompactions.add(this);
@@ -127,7 +127,7 @@ public class MinorCompactor extends Comp
         // clean up
         try {
           if (getFileSystem().exists(new Path(getOutputFile()))) {
-            getFileSystem().delete(new Path(getOutputFile()), true);
+            getFileSystem().deleteRecursively(new Path(getOutputFile()));
           }
         } catch (IOException e) {
           log.warn("Failed to delete failed MinC file " + getOutputFile() + " " + e.getMessage());



Mime
View raw message