accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject svn commit: r1494671 [4/6] - in /accumulo/branches/ACCUMULO-118: ./ assemble/ core/ core/src/main/java/org/apache/accumulo/core/ core/src/main/java/org/apache/accumulo/core/client/ core/src/main/java/org/apache/accumulo/core/client/admin/ core/src/main...
Date Wed, 19 Jun 2013 15:57:14 GMT
Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java Wed Jun 19 15:57:11 2013
@@ -39,6 +39,7 @@ import org.apache.accumulo.core.security
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.master.Master;
@@ -133,7 +134,7 @@ public class SecurityOperation {
     authorizor.initializeSecurity(credentials, rootPrincipal);
     permHandle.initializeSecurity(credentials, rootPrincipal);
     try {
-      permHandle.grantTablePermission(rootPrincipal, Constants.METADATA_TABLE_ID, TablePermission.ALTER_TABLE);
+      permHandle.grantTablePermission(rootPrincipal, MetadataTable.ID, TablePermission.ALTER_TABLE);
     } catch (TableNotFoundException e) {
       // Shouldn't happen
       throw new RuntimeException(e);
@@ -254,7 +255,7 @@ public class SecurityOperation {
     
     targetUserExists(user);
     
-    if (table.equals(Constants.METADATA_TABLE_ID) && permission.equals(TablePermission.READ))
+    if (table.equals(MetadataTable.ID) && permission.equals(TablePermission.READ))
       return true;
     
     try {

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java Wed Jun 19 15:57:11 2013
@@ -22,13 +22,13 @@ import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
@@ -82,7 +82,7 @@ public class ZKAuthorizor implements Aut
       rootPerms.add(p);
     Map<String,Set<TablePermission>> tablePerms = new HashMap<String,Set<TablePermission>>();
     // Allow the root user to flush the !METADATA table
-    tablePerms.put(Constants.METADATA_TABLE_ID, Collections.singleton(TablePermission.ALTER_TABLE));
+    tablePerms.put(MetadataTable.ID, Collections.singleton(TablePermission.ALTER_TABLE));
     
     try {
       // prep parent node of users with root username

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java Wed Jun 19 15:57:11 2013
@@ -23,13 +23,13 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
@@ -252,7 +252,7 @@ public class ZKPermHandler implements Pe
       rootPerms.add(p);
     Map<String,Set<TablePermission>> tablePerms = new HashMap<String,Set<TablePermission>>();
     // Allow the root user to flush the !METADATA table
-    tablePerms.put(Constants.METADATA_TABLE_ID, Collections.singleton(TablePermission.ALTER_TABLE));
+    tablePerms.put(MetadataTable.ID, Collections.singleton(TablePermission.ALTER_TABLE));
     
     try {
       // prep parent node of users with root username

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java Wed Jun 19 15:57:11 2013
@@ -20,7 +20,6 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.server.conf.ServerConfiguration;
@@ -30,6 +29,7 @@ import org.apache.log4j.Logger;
 public class LargestFirstMemoryManager implements MemoryManager {
   
   private static final Logger log = Logger.getLogger(LargestFirstMemoryManager.class);
+  private static final int TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER = 2;
   
   private long maxMemory = -1;
   private int maxConcurrentMincs;
@@ -48,11 +48,12 @@ public class LargestFirstMemoryManager i
     this.numWaitingMultiplier = numWaitingMultiplier;
   }
   
+  @Override
   public void init(ServerConfiguration conf) {
     this.config = conf;
     maxMemory = conf.getConfiguration().getMemoryInBytes(Property.TSERV_MAXMEM);
     maxConcurrentMincs = conf.getConfiguration().getCount(Property.TSERV_MINC_MAXCONCURRENT);
-    numWaitingMultiplier = Constants.TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER;
+    numWaitingMultiplier = TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER;
   }
   
   LargestFirstMemoryManager() {

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java Wed Jun 19 15:57:11 2013
@@ -21,7 +21,6 @@ import java.util.Collections;
 import java.util.Map;
 import java.util.Random;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
@@ -79,7 +78,7 @@ public class MinorCompactor extends Comp
       return false; // can not get positive confirmation that its deleting.
     }
   }
-
+  
   @Override
   public CompactionStats call() {
     log.debug("Begin minor compaction " + getOutputFile() + " " + getExtent());
@@ -87,7 +86,7 @@ public class MinorCompactor extends Comp
     // output to new MapFile with a temporary name
     int sleepTime = 100;
     double growthFactor = 4;
-    int maxSleepTime = 1000 * Constants.DEFAULT_MINOR_COMPACTION_MAX_SLEEP_TIME;
+    int maxSleepTime = 1000 * 60 * 3; // 3 minutes
     boolean reportedProblem = false;
     
     runningCompactions.add(this);

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java Wed Jun 19 15:57:11 2013
@@ -1089,7 +1089,7 @@ public class Tablet {
       entries = new TreeMap<Key,Value>();
       Text rowName = extent.getMetadataEntry();
       for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
-        if (entry.getKey().compareRow(rowName) == 0 && Constants.METADATA_TIME_COLUMN.hasColumns(entry.getKey())) {
+        if (entry.getKey().compareRow(rowName) == 0 && MetadataTable.TIME_COLUMN.hasColumns(entry.getKey())) {
           entries.put(new Key(entry.getKey()), new Value(entry.getValue()));
         }
       }
@@ -1124,8 +1124,7 @@ public class Tablet {
       
       Text rowName = extent.getMetadataEntry();
       
-      ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), Constants.METADATA_TABLE_ID,
-          Authorizations.EMPTY);
+      ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), MetadataTable.ID, Authorizations.EMPTY);
       
       // Commented out because when no data file is present, each tablet will scan through metadata table and return nothing
       // reduced batch size to improve performance
@@ -1133,7 +1132,7 @@ public class Tablet {
       mdScanner.setBatchSize(1000);
       
       // leave these in, again, now using endKey for safety
-      mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
+      mdScanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
       
       mdScanner.setRange(new Range(rowName));
       
@@ -1165,7 +1164,7 @@ public class Tablet {
       for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
         Key key = entry.getKey();
         if (key.getRow().equals(row)) {
-          if (key.getColumnFamily().equals(Constants.METADATA_LOG_COLUMN_FAMILY)) {
+          if (key.getColumnFamily().equals(MetadataTable.LOG_COLUMN_FAMILY)) {
             logEntries.add(MetadataTable.entryFromKeyValue(key, entry.getValue()));
           }
         }
@@ -1182,7 +1181,7 @@ public class Tablet {
     Text row = extent.getMetadataEntry();
     for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
       Key key = entry.getKey();
-      if (key.getRow().equals(row) && key.getColumnFamily().equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
+      if (key.getRow().equals(row) && key.getColumnFamily().equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
         String meta = key.getColumnQualifier().toString();
         Path path = fs.getFullPath(ServerConstants.getTablesDirs(), meta);
         scanFiles.add(new FileRef(meta, path));
@@ -1196,7 +1195,7 @@ public class Tablet {
     Text row = extent.getMetadataEntry();
     for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
       Key key = entry.getKey();
-      if (key.getRow().equals(row) && Constants.METADATA_FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
+      if (key.getRow().equals(row) && MetadataTable.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
         return Long.parseLong(entry.getValue().toString());
     }
     
@@ -1207,7 +1206,7 @@ public class Tablet {
     Text row = extent.getMetadataEntry();
     for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
       Key key = entry.getKey();
-      if (key.getRow().equals(row) && Constants.METADATA_COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
+      if (key.getRow().equals(row) && MetadataTable.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
         return Long.parseLong(entry.getValue().toString());
     }
     
@@ -1223,7 +1222,7 @@ public class Tablet {
   
   private static TServerInstance lookupLastServer(KeyExtent extent, SortedMap<Key,Value> tabletsKeyValues) {
     for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
-      if (entry.getKey().getColumnFamily().compareTo(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY) == 0) {
+      if (entry.getKey().getColumnFamily().compareTo(MetadataTable.LAST_LOCATION_COLUMN_FAMILY) == 0) {
         return new TServerInstance(entry.getValue(), entry.getKey().getColumnQualifier());
       }
     }
@@ -2727,21 +2726,21 @@ public class Tablet {
       Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> fileLog = MetadataTable.getFileAndLogEntries(SecurityConstants.getSystemCredentials(), extent);
       
       if (fileLog.getFirst().size() != 0) {
-        String msg = "Closed tablet " + extent + " has walog entries in " + Constants.METADATA_TABLE_NAME + " " + fileLog.getFirst();
+        String msg = "Closed tablet " + extent + " has walog entries in " + MetadataTable.NAME + " " + fileLog.getFirst();
         log.error(msg);
         throw new RuntimeException(msg);
       }
       
       if (extent.isRootTablet()) {
         if (!fileLog.getSecond().keySet().equals(datafileManager.getDatafileSizes().keySet())) {
-          String msg = "Data file in " + Constants.METADATA_TABLE_NAME + " differ from in memory data " + extent + "  " + fileLog.getSecond().keySet() + "  "
+          String msg = "Data file in " + MetadataTable.NAME + " differ from in memory data " + extent + "  " + fileLog.getSecond().keySet() + "  "
               + datafileManager.getDatafileSizes().keySet();
           log.error(msg);
           throw new RuntimeException(msg);
         }
       } else {
         if (!fileLog.getSecond().equals(datafileManager.getDatafileSizes())) {
-          String msg = "Data file in " + Constants.METADATA_TABLE_NAME + " differ from in memory data " + extent + "  " + fileLog.getSecond() + "  "
+          String msg = "Data file in " + MetadataTable.NAME + " differ from in memory data " + extent + "  " + fileLog.getSecond() + "  "
               + datafileManager.getDatafileSizes();
           log.error(msg);
           throw new RuntimeException(msg);

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java Wed Jun 19 15:57:11 2013
@@ -121,6 +121,7 @@ import org.apache.accumulo.core.util.Col
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.LoggingRunnable;
 import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.ServerServices;
 import org.apache.accumulo.core.util.ServerServices.Service;
 import org.apache.accumulo.core.util.SimpleThreadPool;
@@ -1274,8 +1275,8 @@ public class TabletServer extends Abstra
         } else if (keyExtent.isRootTablet()) {
           throw new IllegalArgumentException("Cannot batch query root tablet with other tablets " + threadPoolExtent + " " + keyExtent);
         } else if (keyExtent.isMeta() && !threadPoolExtent.isMeta()) {
-          throw new IllegalArgumentException("Cannot batch query " + Constants.METADATA_TABLE_NAME + " and non " + Constants.METADATA_TABLE_NAME + " tablets "
-              + threadPoolExtent + " " + keyExtent);
+          throw new IllegalArgumentException("Cannot batch query " + MetadataTable.NAME + " and non " + MetadataTable.NAME + " tablets " + threadPoolExtent
+              + " " + keyExtent);
         }
         
       }
@@ -2872,7 +2873,7 @@ public class TabletServer extends Abstra
       throw new AccumuloException("Root tablet already has a location set");
     }
     
-    return new Pair<Text,KeyExtent>(new Text(Constants.ZROOT_TABLET), null);
+    return new Pair<Text,KeyExtent>(new Text(RootTable.ZROOT_TABLET), null);
   }
   
   public static Pair<Text,KeyExtent> verifyTabletInformation(KeyExtent extent, TServerInstance instance, SortedMap<Key,Value> tabletsKeyValues,
@@ -2883,11 +2884,10 @@ public class TabletServer extends Abstra
       return verifyRootTablet(extent, instance);
     }
     
-    List<ColumnFQ> columnsToFetch = Arrays.asList(new ColumnFQ[] {Constants.METADATA_DIRECTORY_COLUMN, Constants.METADATA_PREV_ROW_COLUMN,
-        Constants.METADATA_SPLIT_RATIO_COLUMN, Constants.METADATA_OLD_PREV_ROW_COLUMN, Constants.METADATA_TIME_COLUMN});
+    List<ColumnFQ> columnsToFetch = Arrays.asList(new ColumnFQ[] {MetadataTable.DIRECTORY_COLUMN, MetadataTable.PREV_ROW_COLUMN,
+        MetadataTable.SPLIT_RATIO_COLUMN, MetadataTable.OLD_PREV_ROW_COLUMN, MetadataTable.TIME_COLUMN});
     
-    ScannerImpl scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), Constants.METADATA_TABLE_ID,
-        Authorizations.EMPTY);
+    ScannerImpl scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), MetadataTable.ID, Authorizations.EMPTY);
     scanner.setRange(extent.toMetadataRange());
     
     TreeMap<Key,Value> tkv = new TreeMap<Key,Value>();
@@ -2910,7 +2910,7 @@ public class TabletServer extends Abstra
     
     Value oldPrevEndRow = null;
     for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
-      if (Constants.METADATA_OLD_PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
+      if (MetadataTable.OLD_PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
         oldPrevEndRow = entry.getValue();
       }
     }
@@ -2953,19 +2953,19 @@ public class TabletServer extends Abstra
         return null;
       }
       Text cf = key.getColumnFamily();
-      if (cf.equals(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY)) {
+      if (cf.equals(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY)) {
         if (future != null) {
           throw new AccumuloException("Tablet has multiple future locations " + extent);
         }
         future = new TServerInstance(entry.getValue(), key.getColumnQualifier());
-      } else if (cf.equals(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY)) {
+      } else if (cf.equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)) {
         log.info("Tablet seems to be already assigned to " + new TServerInstance(entry.getValue(), key.getColumnQualifier()));
         return null;
-      } else if (Constants.METADATA_PREV_ROW_COLUMN.hasColumns(key)) {
+      } else if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key)) {
         prevEndRow = entry.getValue();
-      } else if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
+      } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(key)) {
         dir = entry.getValue();
-      } else if (Constants.METADATA_TIME_COLUMN.hasColumns(key)) {
+      } else if (MetadataTable.TIME_COLUMN.hasColumns(key)) {
         time = entry.getValue();
       }
     }

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java Wed Jun 19 15:57:11 2013
@@ -36,7 +36,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.accumulo.trace.instrument.TraceExecutorService;
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
@@ -46,6 +45,7 @@ import org.apache.accumulo.core.util.Dae
 import org.apache.accumulo.core.util.LoggingRunnable;
 import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
 import org.apache.accumulo.core.util.NamingThreadFactory;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.fs.FileRef;
@@ -710,7 +710,7 @@ public class TabletServerResourceManager
   }
   
   public void executeMajorCompaction(KeyExtent tablet, Runnable compactionTask) {
-    if (tablet.equals(Constants.ROOT_TABLET_EXTENT)) {
+    if (tablet.equals(RootTable.ROOT_TABLET_EXTENT)) {
       rootMajorCompactionThreadPool.execute(compactionTask);
     } else if (tablet.isMeta()) {
       defaultMajorCompactionThreadPool.execute(compactionTask);

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java Wed Jun 19 15:57:11 2013
@@ -28,6 +28,7 @@ import org.apache.accumulo.core.client.B
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties;
@@ -36,6 +37,7 @@ import org.apache.accumulo.core.conf.Acc
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.user.AgeOffFilter;
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.trace.TraceFormatter;
 import org.apache.accumulo.core.util.AddressUtil;
@@ -184,6 +186,9 @@ public class TraceServer implements Watc
         connector = serverConfiguration.getInstance().getConnector(principal, at);
         if (!connector.tableOperations().exists(table)) {
           connector.tableOperations().create(table);
+          IteratorSetting setting = new IteratorSetting(10, "ageoff", AgeOffFilter.class.getName());
+          AgeOffFilter.setTTL(setting, 7 * 24 * 60 * 60 * 1000l);
+          connector.tableOperations().attachIterator(table, setting);
         }
         connector.tableOperations().setProperty(table, Property.TABLE_FORMATTER_CLASS.getKey(), TraceFormatter.class.getName());
         break;

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java Wed Jun 19 15:57:11 2013
@@ -20,7 +20,6 @@ import java.util.HashSet;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.client.MultiTableBatchWriter;
 import org.apache.accumulo.core.client.Scanner;
@@ -31,6 +30,8 @@ import org.apache.accumulo.core.data.Par
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.hadoop.conf.Configuration;
@@ -47,12 +48,12 @@ public class AddFilesWithMissingEntries 
   static final Logger log = Logger.getLogger(AddFilesWithMissingEntries.class);
   
   public static class Opts extends ClientOpts {
-    @Parameter(names = "-update", description = "Make changes to the " + Constants.METADATA_TABLE_NAME + " table to include missing files")
+    @Parameter(names = "-update", description = "Make changes to the " + MetadataTable.NAME + " table to include missing files")
     boolean update = false;
   }
   
   /**
-   * A utility to add files to the {@value Constants#METADATA_TABLE_NAME} table that are not listed in the root tablet. This is a recovery tool for someone who
+   * A utility to add files to the {@value MetadataTable#NAME} table that are not listed in the root tablet. This is a recovery tool for someone who
    * knows what they are doing. It might be better to save off files, and recover your instance by re-initializing and importing the existing files.
    */
   public static void main(String[] args) throws Exception {
@@ -60,9 +61,9 @@ public class AddFilesWithMissingEntries 
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(AddFilesWithMissingEntries.class.getName(), args, bwOpts);
     
-    final Key rootTableEnd = new Key(Constants.ROOT_TABLET_EXTENT.getEndRow());
-    final Range range = new Range(rootTableEnd.followingKey(PartialKey.ROW), true, Constants.METADATA_RESERVED_KEYSPACE_START_KEY, false);
-    final Scanner scanner = opts.getConnector().createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+    final Key rootTableEnd = new Key(RootTable.ROOT_TABLET_EXTENT.getEndRow());
+    final Range range = new Range(rootTableEnd.followingKey(PartialKey.ROW), true, MetadataTable.RESERVED_KEYSPACE_START_KEY, false);
+    final Scanner scanner = opts.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.setRange(range);
     final Configuration conf = new Configuration();
     final FileSystem fs = FileSystem.get(conf);
@@ -88,10 +89,10 @@ public class AddFilesWithMissingEntries 
         knownFiles.clear();
         last = ke;
       }
-      if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
+      if (MetadataTable.DIRECTORY_COLUMN.hasColumns(key)) {
         directory = entry.getValue().toString();
         log.debug("Found directory " + directory + " for row " + key.getRow().toString());
-      } else if (key.compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
+      } else if (key.compareColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY) == 0) {
         String filename = key.getColumnQualifier().toString();
         knownFiles.add(filename);
         log.debug("METADATA file found: " + filename);
@@ -123,9 +124,9 @@ public class AddFilesWithMissingEntries 
           String size = Long.toString(file.getLen());
           String entries = "1"; // lie
           String value = size + "," + entries;
-          m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text(filename), new Value(value.getBytes()));
+          m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text(filename), new Value(value.getBytes()));
           if (update) {
-            writer.getBatchWriter(Constants.METADATA_TABLE_NAME).addMutation(m);
+            writer.getBatchWriter(MetadataTable.NAME).addMutation(m);
           }
         }
       }

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/Admin.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/Admin.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/Admin.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/Admin.java Wed Jun 19 15:57:11 2013
@@ -22,7 +22,6 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
@@ -34,6 +33,7 @@ import org.apache.accumulo.core.client.s
 import org.apache.accumulo.core.master.thrift.MasterClientService;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.SecurityConstants;
@@ -132,7 +132,7 @@ public class Admin {
           Connector conn = instance.getConnector(principal, token);
           Set<String> tables = conn.tableOperations().tableIdMap().keySet();
           for (String table : tables) {
-            if (table.equals(Constants.METADATA_TABLE_NAME))
+            if (table.equals(MetadataTable.NAME))
               continue;
             try {
               conn.tableOperations().flush(table, null, null, false);

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java Wed Jun 19 15:57:11 2013
@@ -23,7 +23,6 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.Writer;
@@ -106,12 +105,12 @@ public class CheckForMetadataProblems {
     if (opts.offline) {
       scanner = new OfflineMetadataScanner(ServerConfiguration.getSystemConfiguration(opts.getInstance()), fs);
     } else {
-      scanner = opts.getConnector().createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+      scanner = opts.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     }
     
-    scanner.setRange(Constants.METADATA_KEYSPACE);
-    Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
-    scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
+    scanner.setRange(MetadataTable.KEYSPACE);
+    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
     
     Text colf = new Text();
     Text colq = new Text();
@@ -141,11 +140,11 @@ public class CheckForMetadataProblems {
         tables.put(tableName, tablets);
       }
       
-      if (Constants.METADATA_PREV_ROW_COLUMN.equals(colf, colq)) {
+      if (MetadataTable.PREV_ROW_COLUMN.equals(colf, colq)) {
         KeyExtent tabletKe = new KeyExtent(entry.getKey().getRow(), entry.getValue());
         tablets.add(tabletKe);
         justLoc = false;
-      } else if (colf.equals(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY)) {
+      } else if (colf.equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)) {
         if (justLoc) {
           System.out.println("Problem at key " + entry.getKey());
           sawProblems = true;
@@ -167,7 +166,7 @@ public class CheckForMetadataProblems {
     }
     
     if (count == 0) {
-      System.err.println("ERROR : " + Constants.METADATA_TABLE_NAME + " table is empty");
+      System.err.println("ERROR : " + MetadataTable.NAME + " table is empty");
       sawProblems = true;
     }
     

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java Wed Jun 19 15:57:11 2013
@@ -20,10 +20,11 @@ import java.util.Iterator;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.master.LiveTServerSet;
 import org.apache.accumulo.server.master.LiveTServerSet.Listener;
@@ -47,8 +48,8 @@ public class FindOfflineTablets {
     opts.parseArgs(FindOfflineTablets.class.getName(), args);
     final AtomicBoolean scanning = new AtomicBoolean(false);
     Instance instance = opts.getInstance();
-    MetaDataTableScanner rootScanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), Constants.METADATA_ROOT_TABLET_KEYSPACE);
-    MetaDataTableScanner metaScanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), Constants.NON_ROOT_METADATA_KEYSPACE);
+    MetaDataTableScanner rootScanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), RootTable.KEYSPACE);
+    MetaDataTableScanner metaScanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), MetadataTable.NON_ROOT_KEYSPACE);
     @SuppressWarnings("unchecked")
     Iterator<TabletLocationState> scanner = (Iterator<TabletLocationState>)new IteratorChain(rootScanner, metaScanner);
     LiveTServerSet tservers = new LiveTServerSet(instance, DefaultConfiguration.getDefaultConfiguration(), new Listener() {
@@ -66,7 +67,7 @@ public class FindOfflineTablets {
       TabletLocationState locationState = scanner.next();
       TabletState state = locationState.getState(tservers.getCurrentServers());
       if (state != null && state != TabletState.HOSTED && TableManager.getInstance().getTableState(locationState.extent.getTableId().toString()) != TableState.OFFLINE)
-        if (!locationState.extent.equals(Constants.ROOT_TABLET_EXTENT))
+        if (!locationState.extent.equals(RootTable.ROOT_TABLET_EXTENT))
           System.out.println(locationState + " is " + state + "  #walogs:" + locationState.walogs.size());
     }
   }

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/Initialize.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/Initialize.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/Initialize.java Wed Jun 19 15:57:11 2013
@@ -45,6 +45,8 @@ import org.apache.accumulo.core.master.s
 import org.apache.accumulo.core.master.thrift.MasterGoalState;
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
@@ -105,10 +107,10 @@ public class Initialize {
     initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.bulkLoadFilter", "20," + MetadataBulkLoadFilter.class.getName());
     initialMetadataConf.put(Property.TABLE_FAILURES_IGNORE.getKey(), "false");
     initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "tablet",
-        String.format("%s,%s", Constants.METADATA_TABLET_COLUMN_FAMILY.toString(), Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY.toString()));
+        String.format("%s,%s", MetadataTable.TABLET_COLUMN_FAMILY.toString(), MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY.toString()));
     initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server", String.format("%s,%s,%s,%s",
-        Constants.METADATA_DATAFILE_COLUMN_FAMILY.toString(), Constants.METADATA_LOG_COLUMN_FAMILY.toString(),
-        Constants.METADATA_SERVER_COLUMN_FAMILY.toString(), Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY.toString()));
+        MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), MetadataTable.LOG_COLUMN_FAMILY.toString(), MetadataTable.SERVER_COLUMN_FAMILY.toString(),
+        MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY.toString()));
     initialMetadataConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "tablet,server");
     initialMetadataConf.put(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "");
     initialMetadataConf.put(Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true");
@@ -189,9 +191,6 @@ public class Initialize {
     return true;
   }
   
-  /**
-   * @return
-   */
   private static boolean zookeeperAvailable() {
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
     try {
@@ -227,7 +226,7 @@ public class Initialize {
     // the actual disk location of the root tablet
     final Path rootTablet = new Path(ServerConstants.getRootTabletDir());
     
-    final Path tableMetadataTabletDirs[] = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), Constants.TABLE_TABLET_LOCATION));
+    final Path tableMetadataTabletDirs[] = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), MetadataTable.TABLE_TABLET_LOCATION));
     final Path defaultMetadataTabletDirs[] = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), Constants.DEFAULT_TABLET_LOCATION));
     
     final Path metadataTableDirs[] = paths(ServerConstants.getMetadataTableDirs());
@@ -281,48 +280,48 @@ public class Initialize {
     mfw.startDefaultLocalityGroup();
     
     // -----------] root tablet info
-    Text rootExtent = Constants.ROOT_TABLET_EXTENT.getMetadataEntry();
+    Text rootExtent = RootTable.ROOT_TABLET_EXTENT.getMetadataEntry();
     
     // root's directory
-    Key rootDirKey = new Key(rootExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
+    Key rootDirKey = new Key(rootExtent, MetadataTable.DIRECTORY_COLUMN.getColumnFamily(), MetadataTable.DIRECTORY_COLUMN.getColumnQualifier(), 0);
     mfw.append(rootDirKey, new Value("/root_tablet".getBytes()));
     
     // root's prev row
-    Key rootPrevRowKey = new Key(rootExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(), Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(), 0);
+    Key rootPrevRowKey = new Key(rootExtent, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(), MetadataTable.PREV_ROW_COLUMN.getColumnQualifier(), 0);
     mfw.append(rootPrevRowKey, new Value(new byte[] {0}));
     
     // ----------] table tablet info
-    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), Constants.METADATA_RESERVED_KEYSPACE_START_KEY.getRow()));
+    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataTable.RESERVED_KEYSPACE_START_KEY.getRow()));
     
     // table tablet's directory
-    Key tableDirKey = new Key(tableExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
-    mfw.append(tableDirKey, new Value(Constants.TABLE_TABLET_LOCATION.getBytes()));
+    Key tableDirKey = new Key(tableExtent, MetadataTable.DIRECTORY_COLUMN.getColumnFamily(), MetadataTable.DIRECTORY_COLUMN.getColumnQualifier(), 0);
+    mfw.append(tableDirKey, new Value(MetadataTable.TABLE_TABLET_LOCATION.getBytes()));
     
     // table tablet time
-    Key tableTimeKey = new Key(tableExtent, Constants.METADATA_TIME_COLUMN.getColumnFamily(), Constants.METADATA_TIME_COLUMN.getColumnQualifier(), 0);
+    Key tableTimeKey = new Key(tableExtent, MetadataTable.TIME_COLUMN.getColumnFamily(), MetadataTable.TIME_COLUMN.getColumnQualifier(), 0);
     mfw.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
     
     // table tablet's prevrow
-    Key tablePrevRowKey = new Key(tableExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(), Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(),
+    Key tablePrevRowKey = new Key(tableExtent, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(), MetadataTable.PREV_ROW_COLUMN.getColumnQualifier(),
         0);
-    mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null))));
+    mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), null))));
     
     // ----------] default tablet info
-    Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null));
+    Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), null));
     
     // default's directory
-    Key defaultDirKey = new Key(defaultExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(),
-        Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
+    Key defaultDirKey = new Key(defaultExtent, MetadataTable.DIRECTORY_COLUMN.getColumnFamily(),
+        MetadataTable.DIRECTORY_COLUMN.getColumnQualifier(), 0);
     mfw.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes()));
     
     // default's time
-    Key defaultTimeKey = new Key(defaultExtent, Constants.METADATA_TIME_COLUMN.getColumnFamily(), Constants.METADATA_TIME_COLUMN.getColumnQualifier(), 0);
+    Key defaultTimeKey = new Key(defaultExtent, MetadataTable.TIME_COLUMN.getColumnFamily(), MetadataTable.TIME_COLUMN.getColumnQualifier(), 0);
     mfw.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
     
     // default's prevrow
-    Key defaultPrevRowKey = new Key(defaultExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(),
-        Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(), 0);
-    mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(Constants.METADATA_RESERVED_KEYSPACE_START_KEY.getRow()));
+    Key defaultPrevRowKey = new Key(defaultExtent, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(),
+        MetadataTable.PREV_ROW_COLUMN.getColumnQualifier(), 0);
+    mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataTable.RESERVED_KEYSPACE_START_KEY.getRow()));
     
     mfw.close();
     
@@ -373,11 +372,11 @@ public class Initialize {
     String zkInstanceRoot = Constants.ZROOT + "/" + uuid;
     zoo.putPersistentData(zkInstanceRoot, new byte[0], NodeExistsPolicy.FAIL);
     zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLES, Constants.ZTABLES_INITIAL_ID, NodeExistsPolicy.FAIL);
-    TableManager.prepareNewTableState(uuid, Constants.METADATA_TABLE_ID, Constants.METADATA_TABLE_NAME, TableState.ONLINE, NodeExistsPolicy.FAIL);
+    TableManager.prepareNewTableState(uuid, MetadataTable.ID, MetadataTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL);
     zoo.putPersistentData(zkInstanceRoot + Constants.ZTSERVERS, new byte[0], NodeExistsPolicy.FAIL);
     zoo.putPersistentData(zkInstanceRoot + Constants.ZPROBLEMS, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZROOT_TABLET, new byte[0], NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZROOT_TABLET_WALOGS, new byte[0], NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET, new byte[0], NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_WALOGS, new byte[0], NodeExistsPolicy.FAIL);
     zoo.putPersistentData(zkInstanceRoot + Constants.ZTRACERS, new byte[0], NodeExistsPolicy.FAIL);
     zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTERS, new byte[0], NodeExistsPolicy.FAIL);
     zoo.putPersistentData(zkInstanceRoot + Constants.ZMASTER_LOCK, new byte[0], NodeExistsPolicy.FAIL);
@@ -458,7 +457,7 @@ public class Initialize {
       if (min > 5)
         setMetadataReplication(min, "min");
       for (Entry<String,String> entry : initialMetadataConf.entrySet())
-        if (!TablePropUtil.setTableProperty(Constants.METADATA_TABLE_ID, entry.getKey(), entry.getValue()))
+        if (!TablePropUtil.setTableProperty(MetadataTable.ID, entry.getKey(), entry.getValue()))
           throw new IOException("Cannot create per-table property " + entry.getKey());
     } catch (Exception e) {
       log.fatal("error talking to zookeeper", e);
@@ -468,8 +467,8 @@ public class Initialize {
   
   private static void setMetadataReplication(int replication, String reason) throws IOException {
     String rep = getConsoleReader().readLine(
-        "Your HDFS replication " + reason + " is not compatible with our default " + Constants.METADATA_TABLE_NAME
-            + " replication of 5. What do you want to set your " + Constants.METADATA_TABLE_NAME + " replication to? (" + replication + ") ");
+        "Your HDFS replication " + reason + " is not compatible with our default " + MetadataTable.NAME + " replication of 5. What do you want to set your "
+            + MetadataTable.NAME + " replication to? (" + replication + ") ");
     if (rep == null || rep.length() == 0)
       rep = Integer.toString(replication);
     else

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java Wed Jun 19 15:57:11 2013
@@ -22,12 +22,12 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
@@ -44,10 +44,10 @@ public class LocalityCheck {
     
     VolumeManager fs = VolumeManagerImpl.get();
     Connector connector = opts.getConnector();
-    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
-    scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
-    scanner.setRange(Constants.METADATA_KEYSPACE);
+    Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+    scanner.setRange(MetadataTable.KEYSPACE);
     
     Map<String,Long> totalBlocks = new HashMap<String,Long>();
     Map<String,Long> localBlocks = new HashMap<String,Long>();
@@ -55,13 +55,13 @@ public class LocalityCheck {
     
     for (Entry<Key,Value> entry : scanner) {
       Key key = entry.getKey();
-      if (key.compareColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
+      if (key.compareColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
         String location = entry.getValue().toString();
         String[] parts = location.split(":");
         String host = parts[0];
         addBlocks(fs, host, files, totalBlocks, localBlocks);
         files.clear();
-      } else if (key.compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
+      } else if (key.compareColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY) == 0) {
         
         files.add(fs.getFullPath(key).toString());
       }

Modified: accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java?rev=1494671&r1=1494670&r2=1494671&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java (original)
+++ accumulo/branches/ACCUMULO-118/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java Wed Jun 19 15:57:11 2013
@@ -33,7 +33,6 @@ import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -61,6 +60,7 @@ import org.apache.accumulo.core.tabletse
 import org.apache.accumulo.core.util.ColumnFQ;
 import org.apache.accumulo.core.util.FastFormat;
 import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.StringUtil;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
@@ -102,14 +102,14 @@ public class MetadataTable extends org.a
   public synchronized static Writer getMetadataTable(TCredentials credentials) {
     Writer metadataTable = metadata_tables.get(credentials);
     if (metadataTable == null) {
-      metadataTable = new Writer(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID);
+      metadataTable = new Writer(HdfsZooInstance.getInstance(), credentials, ID);
       metadata_tables.put(credentials, metadataTable);
     }
     return metadataTable;
   }
   
   public static void putLockID(ZooLock zooLock, Mutation m) {
-    Constants.METADATA_LOCK_COLUMN.put(m, new Value(zooLock.getLockID().serialize(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + "/").getBytes()));
+    LOCK_COLUMN.put(m, new Value(zooLock.getLockID().serialize(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + "/").getBytes()));
   }
   
   public static void update(TCredentials credentials, Mutation m) {
@@ -152,7 +152,7 @@ public class MetadataTable extends org.a
    */
   public static void updateTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time, TCredentials credentials,
       Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
-    if (extent.equals(Constants.ROOT_TABLET_EXTENT)) {
+    if (extent.equals(RootTable.ROOT_TABLET_EXTENT)) {
       if (unusedWalLogs != null) {
         IZooReaderWriter zk = ZooReaderWriter.getInstance();
         // unusedWalLogs will contain the location/name of each log in a log set
@@ -187,8 +187,8 @@ public class MetadataTable extends org.a
     Mutation m = new Mutation(extent.getMetadataEntry());
     
     if (dfv.getNumEntries() > 0) {
-      m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, path.meta(), new Value(dfv.encode()));
-      Constants.METADATA_TIME_COLUMN.put(m, new Value(time.getBytes()));
+      m.put(DATAFILE_COLUMN_FAMILY, path.meta(), new Value(dfv.encode()));
+      TIME_COLUMN.put(m, new Value(time.getBytes()));
       // stuff in this location
       TServerInstance self = getTServerInstance(address, zooLock);
       self.putLastLocation(m);
@@ -198,17 +198,17 @@ public class MetadataTable extends org.a
     }
     if (unusedWalLogs != null) {
       for (String entry : unusedWalLogs) {
-        m.putDelete(Constants.METADATA_LOG_COLUMN_FAMILY, new Text(entry));
+        m.putDelete(LOG_COLUMN_FAMILY, new Text(entry));
       }
     }
     
     for (FileRef scanFile : filesInUseByScans)
-      m.put(Constants.METADATA_SCANFILE_COLUMN_FAMILY, scanFile.meta(), new Value("".getBytes()));
+      m.put(SCANFILE_COLUMN_FAMILY, scanFile.meta(), new Value("".getBytes()));
     
     if (mergeFile != null)
-      m.putDelete(Constants.METADATA_DATAFILE_COLUMN_FAMILY, mergeFile.meta());
+      m.putDelete(DATAFILE_COLUMN_FAMILY, mergeFile.meta());
     
-    Constants.METADATA_FLUSH_COLUMN.put(m, new Value((flushId + "").getBytes()));
+    FLUSH_COLUMN.put(m, new Value((flushId + "").getBytes()));
     
     update(credentials, zooLock, m);
     
@@ -230,7 +230,7 @@ public class MetadataTable extends org.a
   public static void updateTabletFlushID(KeyExtent extent, long flushID, TCredentials credentials, ZooLock zooLock) {
     if (!extent.isRootTablet()) {
       Mutation m = new Mutation(extent.getMetadataEntry());
-      Constants.METADATA_FLUSH_COLUMN.put(m, new Value((flushID + "").getBytes()));
+      FLUSH_COLUMN.put(m, new Value((flushID + "").getBytes()));
       update(credentials, zooLock, m);
     }
   }
@@ -238,7 +238,7 @@ public class MetadataTable extends org.a
   public static void updateTabletCompactID(KeyExtent extent, long compactID, TCredentials credentials, ZooLock zooLock) {
     if (!extent.isRootTablet()) {
       Mutation m = new Mutation(extent.getMetadataEntry());
-      Constants.METADATA_COMPACT_COLUMN.put(m, new Value((compactID + "").getBytes()));
+      COMPACT_COLUMN.put(m, new Value((compactID + "").getBytes()));
       update(credentials, zooLock, m);
     }
   }
@@ -249,18 +249,18 @@ public class MetadataTable extends org.a
     
     for (Entry<FileRef,DataFileValue> entry : estSizes.entrySet()) {
       Text file = entry.getKey().meta();
-      m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, file, new Value(entry.getValue().encode()));
-      m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, file, new Value(tidBytes));
+      m.put(DATAFILE_COLUMN_FAMILY, file, new Value(entry.getValue().encode()));
+      m.put(BULKFILE_COLUMN_FAMILY, file, new Value(tidBytes));
     }
-    Constants.METADATA_TIME_COLUMN.put(m, new Value(time.getBytes()));
+    TIME_COLUMN.put(m, new Value(time.getBytes()));
     update(credentials, zooLock, m);
   }
   
   public static void addTablet(KeyExtent extent, String path, TCredentials credentials, char timeType, ZooLock lock) {
     Mutation m = extent.getPrevRowUpdateMutation();
     
-    Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
-    Constants.METADATA_TIME_COLUMN.put(m, new Value((timeType + "0").getBytes()));
+    DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
+    TIME_COLUMN.put(m, new Value((timeType + "0").getBytes()));
     
     update(credentials, lock, m);
   }
@@ -302,10 +302,10 @@ public class MetadataTable extends org.a
       colq = key.getColumnQualifier(colq);
       
       // interpret the row id as a key extent
-      if (Constants.METADATA_DIRECTORY_COLUMN.equals(colf, colq))
+      if (DIRECTORY_COLUMN.equals(colf, colq))
         datafile = new Text(val.toString());
       
-      else if (Constants.METADATA_PREV_ROW_COLUMN.equals(colf, colq))
+      else if (PREV_ROW_COLUMN.equals(colf, colq))
         prevRow = new Value(val);
       
       if (datafile != null && prevRow != null) {
@@ -324,7 +324,7 @@ public class MetadataTable extends org.a
     for (int i = 0; i < SAVE_ROOT_TABLET_RETRIES; i++) {
       try {
         log.info("trying to write root tablet location to ZooKeeper as " + address);
-        String zRootLocPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZROOT_TABLET_LOCATION;
+        String zRootLocPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_LOCATION;
         zoo.putPersistentData(zRootLocPath, address.getBytes(), NodeExistsPolicy.OVERWRITE);
         return true;
       } catch (Exception e) {
@@ -338,12 +338,12 @@ public class MetadataTable extends org.a
   public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, TCredentials credentials) throws IOException {
     TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();
     
-    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
-    mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
+    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
+    mdScanner.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
     Text row = extent.getMetadataEntry();
     VolumeManager fs = VolumeManagerImpl.get();
     
-    Key endKey = new Key(row, Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text(""));
+    Key endKey = new Key(row, DATAFILE_COLUMN_FAMILY, new Text(""));
     endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
     
     mdScanner.setRange(new Range(new Key(row), endKey));
@@ -362,25 +362,25 @@ public class MetadataTable extends org.a
       Map<FileRef,Long> bulkLoadedFiles, TCredentials credentials, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
     Mutation m = extent.getPrevRowUpdateMutation();
     
-    Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
-    Constants.METADATA_TIME_COLUMN.put(m, new Value(time.getBytes()));
+    DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
+    TIME_COLUMN.put(m, new Value(time.getBytes()));
     if (lastFlushID > 0)
-      Constants.METADATA_FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
+      FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
     if (lastCompactID > 0)
-      Constants.METADATA_COMPACT_COLUMN.put(m, new Value(("" + lastCompactID).getBytes()));
+      COMPACT_COLUMN.put(m, new Value(("" + lastCompactID).getBytes()));
     
     if (location != null) {
-      m.put(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, location.asColumnQualifier(), location.asMutationValue());
-      m.putDelete(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY, location.asColumnQualifier());
+      m.put(CURRENT_LOCATION_COLUMN_FAMILY, location.asColumnQualifier(), location.asMutationValue());
+      m.putDelete(FUTURE_LOCATION_COLUMN_FAMILY, location.asColumnQualifier());
     }
     
     for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
-      m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(entry.getValue().encode()));
+      m.put(DATAFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(entry.getValue().encode()));
     }
     
     for (Entry<FileRef,Long> entry : bulkLoadedFiles.entrySet()) {
       byte[] tidBytes = Long.toString(entry.getValue()).getBytes();
-      m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(tidBytes));
+      m.put(BULKFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(tidBytes));
     }
     
     update(credentials, zooLock, m);
@@ -389,34 +389,34 @@ public class MetadataTable extends org.a
   public static void rollBackSplit(Text metadataEntry, Text oldPrevEndRow, TCredentials credentials, ZooLock zooLock) {
     KeyExtent ke = new KeyExtent(metadataEntry, oldPrevEndRow);
     Mutation m = ke.getPrevRowUpdateMutation();
-    Constants.METADATA_SPLIT_RATIO_COLUMN.putDelete(m);
-    Constants.METADATA_OLD_PREV_ROW_COLUMN.putDelete(m);
+    SPLIT_RATIO_COLUMN.putDelete(m);
+    OLD_PREV_ROW_COLUMN.putDelete(m);
     update(credentials, zooLock, m);
   }
   
   public static void splitTablet(KeyExtent extent, Text oldPrevEndRow, double splitRatio, TCredentials credentials, ZooLock zooLock) {
     Mutation m = extent.getPrevRowUpdateMutation(); //
     
-    Constants.METADATA_SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(splitRatio).getBytes()));
+    SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(splitRatio).getBytes()));
     
-    Constants.METADATA_OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(oldPrevEndRow));
-    Constants.METADATA_CHOPPED_COLUMN.putDelete(m);
+    OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(oldPrevEndRow));
+    CHOPPED_COLUMN.putDelete(m);
     update(credentials, zooLock, m);
   }
   
   public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, TCredentials credentials,
       ZooLock zooLock) {
     Mutation m = new Mutation(metadataEntry);
-    Constants.METADATA_SPLIT_RATIO_COLUMN.putDelete(m);
-    Constants.METADATA_OLD_PREV_ROW_COLUMN.putDelete(m);
-    Constants.METADATA_CHOPPED_COLUMN.putDelete(m);
+    SPLIT_RATIO_COLUMN.putDelete(m);
+    OLD_PREV_ROW_COLUMN.putDelete(m);
+    CHOPPED_COLUMN.putDelete(m);
     
     for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
-      m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(entry.getValue().encode()));
+      m.put(DATAFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(entry.getValue().encode()));
     }
     
     for (FileRef pathToRemove : highDatafilesToRemove) {
-      m.putDelete(Constants.METADATA_DATAFILE_COLUMN_FAMILY, pathToRemove.meta());
+      m.putDelete(DATAFILE_COLUMN_FAMILY, pathToRemove.meta());
     }
     
     update(credentials, zooLock, m);
@@ -444,16 +444,16 @@ public class MetadataTable extends org.a
     Mutation m = new Mutation(extent.getMetadataEntry());
     
     for (FileRef pathToRemove : datafilesToDelete)
-      m.putDelete(Constants.METADATA_DATAFILE_COLUMN_FAMILY, pathToRemove.meta());
+      m.putDelete(DATAFILE_COLUMN_FAMILY, pathToRemove.meta());
     
     for (FileRef scanFile : scanFiles)
-      m.put(Constants.METADATA_SCANFILE_COLUMN_FAMILY, scanFile.meta(), new Value("".getBytes()));
+      m.put(SCANFILE_COLUMN_FAMILY, scanFile.meta(), new Value("".getBytes()));
     
     if (size.getNumEntries() > 0)
-      m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, path.meta(), new Value(size.encode()));
+      m.put(DATAFILE_COLUMN_FAMILY, path.meta(), new Value(size.encode()));
     
     if (compactionId != null)
-      Constants.METADATA_COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes()));
+      COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes()));
     
     TServerInstance self = getTServerInstance(address, zooLock);
     self.putLastLocation(m);
@@ -480,9 +480,9 @@ public class MetadataTable extends org.a
   }
   
   public static Mutation createDeleteMutation(String tableId, String pathToRemove) throws IOException {
-    String prefix = Constants.METADATA_DELETE_FLAG_PREFIX;
-    if (tableId.equals(Constants.METADATA_TABLE_ID))
-      prefix = Constants.METADATA_DELETE_FLAG_FOR_METADATA_PREFIX;
+    String prefix = DELETE_FLAG_PREFIX;
+    if (tableId.equals(ID))
+      prefix = RootTable.DELETE_FLAG_PREFIX;
     
     if (!pathToRemove.contains(":")) {
       if (pathToRemove.startsWith("../"))
@@ -501,7 +501,7 @@ public class MetadataTable extends org.a
     Mutation m = new Mutation(extent.getMetadataEntry());
     
     for (FileRef pathToRemove : scanFiles)
-      m.putDelete(Constants.METADATA_SCANFILE_COLUMN_FAMILY, pathToRemove.meta());
+      m.putDelete(SCANFILE_COLUMN_FAMILY, pathToRemove.meta());
     
     update(credentials, zooLock, m);
   }
@@ -516,7 +516,7 @@ public class MetadataTable extends org.a
     // check to see if prev tablet exist in metadata tablet
     Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(table, metadataPrevEndRow)));
     
-    ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
+    ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
     scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
     
     VolumeManager fs = VolumeManagerImpl.get();
@@ -529,17 +529,17 @@ public class MetadataTable extends org.a
       
       List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
       
-      Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
+      Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
       Key rowKey = new Key(metadataEntry);
       
       SortedMap<FileRef,DataFileValue> origDatafileSizes = new TreeMap<FileRef,DataFileValue>();
       SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
       SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>();
-      scanner3.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
+      scanner3.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
       scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
       
       for (Entry<Key,Value> entry : scanner3) {
-        if (entry.getKey().compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
+        if (entry.getKey().compareColumnFamily(DATAFILE_COLUMN_FAMILY) == 0) {
           origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
         }
       }
@@ -601,32 +601,32 @@ public class MetadataTable extends org.a
       throws AccumuloException, IOException {
     log.info("Incomplete split " + metadataEntry + " attempting to fix");
     
-    Value oper = columns.get(Constants.METADATA_OLD_PREV_ROW_COLUMN);
+    Value oper = columns.get(OLD_PREV_ROW_COLUMN);
     
-    if (columns.get(Constants.METADATA_SPLIT_RATIO_COLUMN) == null) {
+    if (columns.get(SPLIT_RATIO_COLUMN) == null) {
       throw new IllegalArgumentException("Metadata entry does not have split ratio (" + metadataEntry + ")");
     }
     
-    double splitRatio = Double.parseDouble(new String(columns.get(Constants.METADATA_SPLIT_RATIO_COLUMN).get()));
+    double splitRatio = Double.parseDouble(new String(columns.get(SPLIT_RATIO_COLUMN).get()));
     
-    Value prevEndRowIBW = columns.get(Constants.METADATA_PREV_ROW_COLUMN);
+    Value prevEndRowIBW = columns.get(PREV_ROW_COLUMN);
     
     if (prevEndRowIBW == null) {
       throw new IllegalArgumentException("Metadata entry does not have prev row (" + metadataEntry + ")");
     }
     
-    Value time = columns.get(Constants.METADATA_TIME_COLUMN);
+    Value time = columns.get(TIME_COLUMN);
     
     if (time == null) {
       throw new IllegalArgumentException("Metadata entry does not have time (" + metadataEntry + ")");
     }
     
-    Value flushID = columns.get(Constants.METADATA_FLUSH_COLUMN);
+    Value flushID = columns.get(FLUSH_COLUMN);
     long initFlushID = -1;
     if (flushID != null)
       initFlushID = Long.parseLong(flushID.toString());
     
-    Value compactID = columns.get(Constants.METADATA_COMPACT_COLUMN);
+    Value compactID = columns.get(COMPACT_COLUMN);
     long initCompactID = -1;
     if (compactID != null)
       initCompactID = Long.parseLong(compactID.toString());
@@ -639,9 +639,9 @@ public class MetadataTable extends org.a
   }
   
   public static void deleteTable(String tableId, boolean insertDeletes, TCredentials credentials, ZooLock lock) throws AccumuloException, IOException {
-    Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
+    Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
     Text tableIdText = new Text(tableId);
-    BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, new BatchWriterConfig().setMaxMemory(1000000)
+    BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, ID, new BatchWriterConfig().setMaxMemory(1000000)
         .setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2));
     
     // scan metadata for our table and delete everything we find
@@ -651,18 +651,18 @@ public class MetadataTable extends org.a
     // insert deletes before deleting data from !METADATA... this makes the code fault tolerant
     if (insertDeletes) {
       
-      ms.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
-      Constants.METADATA_DIRECTORY_COLUMN.fetch(ms);
+      ms.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
+      DIRECTORY_COLUMN.fetch(ms);
       
       for (Entry<Key,Value> cell : ms) {
         Key key = cell.getKey();
         
-        if (key.getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+        if (key.getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
           FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
           bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
         }
         
-        if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
+        if (DIRECTORY_COLUMN.hasColumns(key)) {
           bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
         }
       }
@@ -746,7 +746,7 @@ public class MetadataTable extends org.a
   }
   
   private static String getZookeeperLogLocation() {
-    return ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZROOT_TABLET_WALOGS;
+    return ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_WALOGS;
   }
   
   public static void addLogEntry(TCredentials credentials, LogEntry entry, ZooLock zooLock) {
@@ -773,7 +773,7 @@ public class MetadataTable extends org.a
     } else {
       String value = StringUtil.join(entry.logSet, ";") + "|" + entry.tabletId;
       Mutation m = new Mutation(entry.extent.getMetadataEntry());
-      m.put(Constants.METADATA_LOG_COLUMN_FAMILY, new Text(entry.server + "/" + entry.filename), new Value(value.getBytes()));
+      m.put(LOG_COLUMN_FAMILY, new Text(entry.server + "/" + entry.filename), new Value(value.getBytes()));
       update(credentials, zooLock, m);
     }
   }
@@ -811,9 +811,9 @@ public class MetadataTable extends org.a
       }
       
     } else {
-      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
-      scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
-      scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
+      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
+      scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
+      scanner.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
       scanner.setRange(extent.toMetadataRange());
       
       for (Entry<Key,Value> entry : scanner) {
@@ -821,9 +821,9 @@ public class MetadataTable extends org.a
           throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
         }
         
-        if (entry.getKey().getColumnFamily().equals(Constants.METADATA_LOG_COLUMN_FAMILY)) {
+        if (entry.getKey().getColumnFamily().equals(LOG_COLUMN_FAMILY)) {
           result.add(entryFromKeyValue(entry.getKey(), entry.getValue()));
-        } else if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+        } else if (entry.getKey().getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
           DataFileValue dfv = new DataFileValue(entry.getValue().get());
           sizes.put(new FileRef(fs, entry.getKey()), dfv);
         } else {
@@ -838,7 +838,7 @@ public class MetadataTable extends org.a
   public static List<LogEntry> getLogEntries(TCredentials credentials, KeyExtent extent) throws IOException, KeeperException, InterruptedException {
     log.info("Scanning logging entries for " + extent);
     ArrayList<LogEntry> result = new ArrayList<LogEntry>();
-    if (extent.equals(Constants.ROOT_TABLET_EXTENT)) {
+    if (extent.equals(RootTable.ROOT_TABLET_EXTENT)) {
       log.info("Getting logs for root tablet from zookeeper");
       getRootLogEntries(result);
     } else {
@@ -847,7 +847,7 @@ public class MetadataTable extends org.a
       Text pattern = extent.getMetadataEntry();
       for (Entry<Key,Value> entry : scanner) {
         Text row = entry.getKey().getRow();
-        if (entry.getKey().getColumnFamily().equals(Constants.METADATA_LOG_COLUMN_FAMILY)) {
+        if (entry.getKey().getColumnFamily().equals(LOG_COLUMN_FAMILY)) {
           if (row.equals(pattern)) {
             result.add(entryFromKeyValue(entry.getKey(), entry.getValue()));
           }
@@ -891,10 +891,10 @@ public class MetadataTable extends org.a
   }
   
   private static Scanner getTabletLogScanner(TCredentials credentials, KeyExtent extent) {
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
-    scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
+    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
+    scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
     Text start = extent.getMetadataEntry();
-    Key endKey = new Key(start, Constants.METADATA_LOG_COLUMN_FAMILY);
+    Key endKey = new Key(start, LOG_COLUMN_FAMILY);
     endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
     scanner.setRange(new Range(new Key(start), endKey));
     return scanner;
@@ -906,13 +906,13 @@ public class MetadataTable extends org.a
     Iterator<Entry<Key,Value>> metadataEntries = null;
     
     LogEntryIterator(TCredentials creds) throws IOException, KeeperException, InterruptedException {
-      rootTabletEntries = getLogEntries(creds, Constants.ROOT_TABLET_EXTENT).iterator();
+      rootTabletEntries = getLogEntries(creds, RootTable.ROOT_TABLET_EXTENT).iterator();
       try {
         Scanner scanner = HdfsZooInstance.getInstance().getConnector(creds.getPrincipal(), CredentialHelper.extractToken(creds))
-            .createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
-        log.info("Setting range to " + Constants.NON_ROOT_METADATA_KEYSPACE);
-        scanner.setRange(Constants.NON_ROOT_METADATA_KEYSPACE);
-        scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
+            .createScanner(NAME, Authorizations.EMPTY);
+        log.info("Setting range to " + KEYSPACE);
+        scanner.setRange(KEYSPACE);
+        scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
         metadataEntries = scanner.iterator();
       } catch (Exception ex) {
         throw new IOException(ex);
@@ -930,7 +930,6 @@ public class MetadataTable extends org.a
         return rootTabletEntries.next();
       }
       Entry<Key,Value> entry = metadataEntries.next();
-      log.info("entry " + entry + " in range " + Constants.NON_ROOT_METADATA_KEYSPACE.contains(entry.getKey()));
       return entryFromKeyValue(entry.getKey(), entry.getValue());
     }
     
@@ -961,7 +960,7 @@ public class MetadataTable extends org.a
         }
       } else {
         Mutation m = new Mutation(entry.extent.getMetadataEntry());
-        m.putDelete(Constants.METADATA_LOG_COLUMN_FAMILY, new Text(entry.server + "/" + entry.filename));
+        m.putDelete(LOG_COLUMN_FAMILY, new Text(entry.server + "/" + entry.filename));
         update(SecurityConstants.getSystemCredentials(), zooLock, m);
       }
     }
@@ -969,7 +968,7 @@ public class MetadataTable extends org.a
   
   private static void getFiles(Set<String> files, Map<Key,Value> tablet, String srcTableId) {
     for (Entry<Key,Value> entry : tablet.entrySet()) {
-      if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
         String cf = entry.getKey().getColumnQualifier().toString();
         if (srcTableId != null && !cf.startsWith("../") && !cf.contains(":")) {
           cf = "../" + srcTableId + entry.getKey().getColumnQualifier();
@@ -985,14 +984,14 @@ public class MetadataTable extends org.a
     Mutation m = new Mutation(KeyExtent.getMetadataEntry(new Text(tableId), ke.getEndRow()));
     
     for (Entry<Key,Value> entry : tablet.entrySet()) {
-      if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
         String cf = entry.getKey().getColumnQualifier().toString();
         if (!cf.startsWith("../") && !cf.contains(":"))
           cf = "../" + srcTableId + entry.getKey().getColumnQualifier();
         m.put(entry.getKey().getColumnFamily(), new Text(cf), entry.getValue());
-      } else if (entry.getKey().getColumnFamily().equals(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY)) {
-        m.put(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY, entry.getKey().getColumnQualifier(), entry.getValue());
-      } else if (entry.getKey().getColumnFamily().equals(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY)) {
+      } else if (entry.getKey().getColumnFamily().equals(CURRENT_LOCATION_COLUMN_FAMILY)) {
+        m.put(LAST_LOCATION_COLUMN_FAMILY, entry.getKey().getColumnQualifier(), entry.getValue());
+      } else if (entry.getKey().getColumnFamily().equals(LAST_LOCATION_COLUMN_FAMILY)) {
         // skip
       } else {
         m.put(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier(), entry.getValue());
@@ -1002,14 +1001,14 @@ public class MetadataTable extends org.a
   }
   
   private static Scanner createCloneScanner(String tableId, Connector conn) throws TableNotFoundException {
-    Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
+    Scanner mscanner = new IsolatedScanner(conn.createScanner(NAME, Authorizations.EMPTY));
     mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-    mscanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
-    mscanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
-    mscanner.fetchColumnFamily(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY);
-    mscanner.fetchColumnFamily(Constants.METADATA_CLONED_COLUMN_FAMILY);
-    Constants.METADATA_PREV_ROW_COLUMN.fetch(mscanner);
-    Constants.METADATA_TIME_COLUMN.fetch(mscanner);
+    mscanner.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
+    mscanner.fetchColumnFamily(CURRENT_LOCATION_COLUMN_FAMILY);
+    mscanner.fetchColumnFamily(LAST_LOCATION_COLUMN_FAMILY);
+    mscanner.fetchColumnFamily(CLONED_COLUMN_FAMILY);
+    PREV_ROW_COLUMN.fetch(mscanner);
+    TIME_COLUMN.fetch(mscanner);
     return mscanner;
   }
   
@@ -1046,7 +1045,7 @@ public class MetadataTable extends org.a
       
       boolean cloneSuccessful = false;
       for (Entry<Key,Value> entry : cloneTablet.entrySet()) {
-        if (entry.getKey().getColumnFamily().equals(Constants.METADATA_CLONED_COLUMN_FAMILY)) {
+        if (entry.getKey().getColumnFamily().equals(CLONED_COLUMN_FAMILY)) {
           cloneSuccessful = true;
           break;
         }
@@ -1102,7 +1101,7 @@ public class MetadataTable extends org.a
       } else {
         // write out marker that this tablet was successfully cloned
         Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());
-        m.put(Constants.METADATA_CLONED_COLUMN_FAMILY, new Text(""), new Value("OK".getBytes()));
+        m.put(CLONED_COLUMN_FAMILY, new Text(""), new Value("OK".getBytes()));
         bw.addMutation(m);
       }
     }
@@ -1114,7 +1113,7 @@ public class MetadataTable extends org.a
   public static void cloneTable(Instance instance, String srcTableId, String tableId) throws Exception {
     
     Connector conn = instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
-    BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw = conn.createBatchWriter(NAME, new BatchWriterConfig());
     
     while (true) {
       
@@ -1147,9 +1146,9 @@ public class MetadataTable extends org.a
     }
     
     // delete the clone markers and create directory entries
-    Scanner mscanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+    Scanner mscanner = conn.createScanner(NAME, Authorizations.EMPTY);
     mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-    mscanner.fetchColumnFamily(Constants.METADATA_CLONED_COLUMN_FAMILY);
+    mscanner.fetchColumnFamily(CLONED_COLUMN_FAMILY);
     
     int dirCount = 0;
     
@@ -1157,7 +1156,7 @@ public class MetadataTable extends org.a
       Key k = entry.getKey();
       Mutation m = new Mutation(k.getRow());
       m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
-      Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
+      DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
       bw.addMutation(m);
     }
     
@@ -1167,15 +1166,15 @@ public class MetadataTable extends org.a
   
   public static void chopped(KeyExtent extent, ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
-    Constants.METADATA_CHOPPED_COLUMN.put(m, new Value("chopped".getBytes()));
+    CHOPPED_COLUMN.put(m, new Value("chopped".getBytes()));
     update(SecurityConstants.getSystemCredentials(), zooLock, m);
   }
   
   public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception {
-    Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
+    Scanner mscanner = new IsolatedScanner(conn.createScanner(NAME, Authorizations.EMPTY));
     mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-    mscanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
-    BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    mscanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
+    BatchWriter bw = conn.createBatchWriter(NAME, new BatchWriterConfig());
     for (Entry<Key,Value> entry : mscanner) {
       log.debug("Looking at entry " + entry + " with tid " + tid);
       if (Long.parseLong(entry.getValue().toString()) == tid) {
@@ -1192,9 +1191,9 @@ public class MetadataTable extends org.a
     List<FileRef> result = new ArrayList<FileRef>();
     try {
       VolumeManager fs = VolumeManagerImpl.get();
-      Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
+      Scanner mscanner = new IsolatedScanner(conn.createScanner(NAME, Authorizations.EMPTY));
       mscanner.setRange(extent.toMetadataRange());
-      mscanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
+      mscanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
       for (Entry<Key,Value> entry : mscanner) {
         if (Long.parseLong(entry.getValue().toString()) == tid) {
           result.add(new FileRef(fs, entry.getKey()));
@@ -1216,9 +1215,9 @@ public class MetadataTable extends org.a
     Map<FileRef,Long> ret = new HashMap<FileRef,Long>();
     
     VolumeManager fs = VolumeManagerImpl.get();
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
+    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
     scanner.setRange(new Range(metadataRow));
-    scanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
+    scanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
     for (Entry<Key,Value> entry : scanner) {
       Long tid = Long.parseLong(entry.getValue().toString());
       ret.put(new FileRef(fs, entry.getKey()), tid);
@@ -1228,7 +1227,7 @@ public class MetadataTable extends org.a
   
   public static void addBulkLoadInProgressFlag(String path) {
     
-    Mutation m = new Mutation(Constants.METADATA_BLIP_FLAG_PREFIX + path);
+    Mutation m = new Mutation(BLIP_FLAG_PREFIX + path);
     m.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
     
     update(SecurityConstants.getSystemCredentials(), m);
@@ -1236,7 +1235,7 @@ public class MetadataTable extends org.a
   
   public static void removeBulkLoadInProgressFlag(String path) {
     
-    Mutation m = new Mutation(Constants.METADATA_BLIP_FLAG_PREFIX + path);
+    Mutation m = new Mutation(BLIP_FLAG_PREFIX + path);
     m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
     
     update(SecurityConstants.getSystemCredentials(), m);
@@ -1244,15 +1243,15 @@ public class MetadataTable extends org.a
   
   public static void moveMetaDeleteMarkers(Instance instance, TCredentials creds) {
     // move delete markers from the normal delete keyspace to the root tablet delete keyspace if the files are for the !METADATA table
-    Scanner scanner = new ScannerImpl(instance, creds, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
-    scanner.setRange(new Range(Constants.METADATA_DELETES_KEYSPACE));
+    Scanner scanner = new ScannerImpl(instance, creds, ID, Authorizations.EMPTY);
+    scanner.setRange(new Range(DELETES_KEYSPACE));
     for (Entry<Key,Value> entry : scanner) {
       String row = entry.getKey().getRow().toString();
-      if (row.startsWith(Constants.METADATA_DELETE_FLAG_PREFIX)) {
-        String filename = row.substring(Constants.METADATA_DELETE_FLAG_PREFIX.length());
+      if (row.startsWith(DELETE_FLAG_PREFIX)) {
+        String filename = row.substring(DELETE_FLAG_PREFIX.length());
         // add the new entry first
         log.info("Moving " + filename + " marker to the root tablet");
-        Mutation m = new Mutation(Constants.METADATA_DELETE_FLAG_FOR_METADATA_PREFIX + filename);
+        Mutation m = new Mutation(RootTable.DELETE_FLAG_PREFIX + filename);
         m.put(new byte[] {}, new byte[] {}, new byte[] {});
         update(creds, m);
         // remove the old entry



Mime
View raw message