accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vi...@apache.org
Subject svn commit: r1496226 [11/13] - in /accumulo/branches/ACCUMULO-CURATOR: ./ assemble/ conf/examples/1GB/native-standalone/ conf/examples/1GB/standalone/ conf/examples/2GB/native-standalone/ conf/examples/2GB/standalone/ conf/examples/3GB/native-standalon...
Date Mon, 24 Jun 2013 21:34:25 GMT
Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java Mon Jun 24 21:34:20 2013
@@ -28,7 +28,6 @@ import java.util.Map.Entry;
 import java.util.NoSuchElementException;
 import java.util.Set;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.ScannerOptions;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -45,12 +44,15 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.iterators.system.MultiIterator;
 import org.apache.accumulo.core.iterators.system.VisibilityFilter;
 import org.apache.accumulo.core.iterators.user.VersioningIterator;
-import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.util.MetadataTable.LogEntry;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -60,13 +62,14 @@ public class OfflineMetadataScanner exte
   
   private Set<String> allFiles = new HashSet<String>();
   private Range range = new Range();
-  private final FileSystem fs;
+  private final VolumeManager fs;
   private final AccumuloConfiguration conf;
   
-  private List<SortedKeyValueIterator<Key,Value>> openMapFiles(Collection<String> files, FileSystem fs, AccumuloConfiguration conf) throws IOException {
+  private List<SortedKeyValueIterator<Key,Value>> openMapFiles(Collection<String> files, VolumeManager fs, AccumuloConfiguration conf) throws IOException {
     List<SortedKeyValueIterator<Key,Value>> readers = new ArrayList<SortedKeyValueIterator<Key,Value>>();
     for (String file : files) {
-      FileSKVIterator reader = FileOperations.getInstance().openReader(file, true, fs, fs.getConf(), conf);
+      FileSystem ns = fs.getFileSystemByPath(new Path(file));
+      FileSKVIterator reader = FileOperations.getInstance().openReader(file, true, ns, ns.getConf(), conf);
       readers.add(reader);
     }
     return readers;
@@ -78,7 +81,7 @@ public class OfflineMetadataScanner exte
     DeletingIterator delIter = new DeletingIterator(multiIterator, false);
     ColumnFamilySkippingIterator cfsi = new ColumnFamilySkippingIterator(delIter);
     ColumnQualifierFilter colFilter = new ColumnQualifierFilter(cfsi, columns);
-    VisibilityFilter visFilter = new VisibilityFilter(colFilter, Constants.NO_AUTHS, new byte[0]);
+    VisibilityFilter visFilter = new VisibilityFilter(colFilter, Authorizations.EMPTY, new byte[0]);
     
     visFilter.seek(r, LocalityGroupUtil.EMPTY_CF_SET, false);
     
@@ -117,13 +120,13 @@ public class OfflineMetadataScanner exte
     
   }
   
-  public OfflineMetadataScanner(AccumuloConfiguration conf, FileSystem fs) throws IOException {
+  public OfflineMetadataScanner(AccumuloConfiguration conf, VolumeManager fs) throws IOException {
     super();
     this.fs = fs;
     this.conf = conf;
     List<LogEntry> rwal;
     try {
-      rwal = MetadataTable.getLogEntries(null, Constants.ROOT_TABLET_EXTENT);
+      rwal = MetadataTable.getLogEntries(null, RootTable.EXTENT);
     } catch (Exception e) {
       throw new RuntimeException("Failed to check if root tablet has write ahead log entries", e);
     }
@@ -141,16 +144,16 @@ public class OfflineMetadataScanner exte
     List<SortedKeyValueIterator<Key,Value>> readers = openMapFiles(allFiles, fs, conf);
     
     HashSet<Column> columns = new HashSet<Column>();
-    columns.add(new Column(TextUtil.getBytes(Constants.METADATA_DATAFILE_COLUMN_FAMILY), null, null));
-    columns.add(new Column(TextUtil.getBytes(Constants.METADATA_LOG_COLUMN_FAMILY), null, null));
+    columns.add(new Column(TextUtil.getBytes(MetadataTable.DATAFILE_COLUMN_FAMILY), null, null));
+    columns.add(new Column(TextUtil.getBytes(MetadataTable.LOG_COLUMN_FAMILY), null, null));
     
     SortedKeyValueIterator<Key,Value> ssi = createSystemIter(new Range(), readers, columns);
     
     int walogs = 0;
     
     while (ssi.hasTop()) {
-      if (ssi.getTopKey().compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
-        allFiles.add(ServerConstants.getMetadataTableDir() + "/" + ssi.getTopKey().getColumnQualifier().toString());
+      if (ssi.getTopKey().compareColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY) == 0) {
+        allFiles.add(fs.getFullPath(ssi.getTopKey()).toString());
       } else {
         walogs++;
       }
@@ -255,10 +258,10 @@ public class OfflineMetadataScanner exte
   }
   
   public static void main(String[] args) throws IOException {
-    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
     ServerConfiguration conf = new ServerConfiguration(HdfsZooInstance.getInstance());
+    VolumeManager fs = VolumeManagerImpl.get();
     OfflineMetadataScanner scanner = new OfflineMetadataScanner(conf.getConfiguration(), fs);
-    scanner.setRange(Constants.METADATA_KEYSPACE);
+    scanner.setRange(MetadataTable.KEYSPACE);
     for (Entry<Key,Value> entry : scanner)
       System.out.println(entry.getKey() + " " + entry.getValue());
   }

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java Mon Jun 24 21:34:20 2013
@@ -18,20 +18,18 @@ package org.apache.accumulo.server.util;
 
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.hadoop.fs.FileSystem;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.server.cli.ClientOpts;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Logger;
 
@@ -54,25 +52,21 @@ public class RemoveEntriesForMissingFile
     ScannerOpts scanOpts = new ScannerOpts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(RemoveEntriesForMissingFiles.class.getName(), args, scanOpts, bwOpts);
-    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+    VolumeManager fs = VolumeManagerImpl.get();
     Connector connector = opts.getConnector();
-    Scanner metadata = connector.createScanner(Constants.METADATA_TABLE_NAME, opts.auths);
+    Scanner metadata = connector.createScanner(MetadataTable.NAME, opts.auths);
     metadata.setBatchSize(scanOpts.scanBatchSize);
-    metadata.setRange(Constants.METADATA_KEYSPACE);
-    metadata.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
+    metadata.setRange(MetadataTable.KEYSPACE);
+    metadata.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
     int count = 0;
     int missing = 0;
     BatchWriter writer = null; 
     if (opts.fix)
-      writer = connector.createBatchWriter(Constants.METADATA_TABLE_NAME, bwOpts.getBatchWriterConfig());
+      writer = connector.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
     for (Entry<Key,Value> entry : metadata) {
       count++;
       Key key = entry.getKey();
-      String table = new String(KeyExtent.tableOfMetadataRow(entry.getKey().getRow()));
-      String file = key.getColumnQualifier().toString();
-      if (!file.startsWith("/"))
-        file = "/" + file;
-      Path map = new Path(ServerConstants.getTablesDir() + "/" + table + file);
+      Path map = fs.getFullPath(key);
       if (!fs.exists(map)) {
         missing++;
         log.info("File " + map + " is missing");

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java Mon Jun 24 21:34:20 2013
@@ -16,24 +16,261 @@
  */
 package org.apache.accumulo.server.util;
 
+import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
 
-import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.NumUtil;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
 
 import com.beust.jcommander.Parameter;
 
 public class TableDiskUsage {
   
+  
+  private static final Logger log = Logger.getLogger(Logger.class);
+  private int nextInternalId = 0;
+  private Map<String,Integer> internalIds = new HashMap<String,Integer>();
+  private Map<Integer,String> externalIds = new HashMap<Integer,String>();
+  private Map<String,Integer[]> tableFiles = new HashMap<String,Integer[]>();
+  private Map<String,Long> fileSizes = new HashMap<String,Long>();
+  
+  void addTable(String tableId) {
+    if (internalIds.containsKey(tableId))
+      throw new IllegalArgumentException("Already added table " + tableId);
+    
+    int iid = nextInternalId++;
+    
+    internalIds.put(tableId, iid);
+    externalIds.put(iid, tableId);
+  }
+  
+  void linkFileAndTable(String tableId, String file) {
+    int internalId = internalIds.get(tableId);
+    
+    Integer[] tables = tableFiles.get(file);
+    if (tables == null) {
+      tables = new Integer[internalIds.size()];
+      for (int i = 0; i < tables.length; i++)
+        tables[i] = 0;
+      tableFiles.put(file, tables);
+    }
+    
+    tables[internalId] = 1;
+  }
+  
+  void addFileSize(String file, long size) {
+    fileSizes.put(file, size);
+  }
+  
+  Map<List<String>,Long> calculateUsage() {
+
+    Map<List<Integer>,Long> usage = new HashMap<List<Integer>,Long>();
+    
+    for (Entry<String,Integer[]> entry : tableFiles.entrySet()) {
+      log.info("fileSizes " + fileSizes + " key " + Arrays.asList(entry.getKey()));
+      List<Integer> key = Arrays.asList(entry.getValue());
+      Long size = fileSizes.get(entry.getKey());
+      
+      Long tablesUsage = usage.get(key);
+      if (tablesUsage == null)
+        tablesUsage = 0l;
+      
+      tablesUsage += size;
+      
+      usage.put(key, tablesUsage);
+      
+    }
+    
+    Map<List<String>,Long> externalUsage = new HashMap<List<String>,Long>();
+    
+    for (Entry<List<Integer>,Long> entry : usage.entrySet()) {
+      List<String> externalKey = new ArrayList<String>();
+      List<Integer> key = entry.getKey();
+      for (int i = 0; i < key.size(); i++)
+        if (key.get(i) != 0)
+          externalKey.add(externalIds.get(i));
+      
+      externalUsage.put(externalKey, entry.getValue());
+    }
+    
+    return externalUsage;
+  }
+  
+  public interface Printer {
+    void print(String line);
+  }
+  
+  public static void printDiskUsage(AccumuloConfiguration acuConf, Collection<String> tables, FileSystem fs, Connector conn, boolean humanReadable)
+      throws TableNotFoundException, IOException {
+    printDiskUsage(acuConf, tables, fs, conn, new Printer() {
+      @Override
+      public void print(String line) {
+        System.out.println(line);
+      }
+    }, humanReadable);
+  }
+  
+  public static Map<TreeSet<String>,Long> getDiskUsage(AccumuloConfiguration acuConf, Set<String> tableIds, FileSystem fs, Connector conn)
+      throws IOException {
+    TableDiskUsage tdu = new TableDiskUsage();
+    
+    for (String tableId : tableIds)
+      tdu.addTable(tableId);
+    
+    HashSet<String> tablesReferenced = new HashSet<String>(tableIds);
+    HashSet<String> emptyTableIds = new HashSet<String>();
+    
+    for (String tableId : tableIds) {
+      Scanner mdScanner = null;
+      try {
+        mdScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+      } catch (TableNotFoundException e) {
+        throw new RuntimeException(e);
+      }
+      mdScanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+      mdScanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
+      
+      if (!mdScanner.iterator().hasNext()) {
+        emptyTableIds.add(tableId);
+      }
+      
+      for (Entry<Key,Value> entry : mdScanner) {
+        String file = entry.getKey().getColumnQualifier().toString();
+        String parts[] = file.split("/");
+        String uniqueName = parts[parts.length - 1];
+        if (file.contains(":") || file.startsWith("../")) {
+          String ref = parts[parts.length - 3];
+          if (!ref.equals(tableId)) {
+            tablesReferenced.add(ref);
+          }
+        }
+        
+        tdu.linkFileAndTable(tableId, uniqueName);
+      }
+    }
+    
+    for (String tableId : tablesReferenced) {
+      for (String tableDir : ServerConstants.getTablesDirs()) {
+        FileStatus[] files = fs.globStatus(new Path(tableDir + "/" + tableId + "/*/*"));
+        if (files != null) {
+          for (FileStatus fileStatus : files) {
+            // Assumes that all filenames are unique
+            String name = fileStatus.getPath().getName();
+            tdu.addFileSize(name, fileStatus.getLen());
+          }
+        }
+      }
+    }
+    
+    HashMap<String,String> reverseTableIdMap = new HashMap<String,String>();
+    for (Entry<String,String> entry : conn.tableOperations().tableIdMap().entrySet())
+      reverseTableIdMap.put(entry.getValue(), entry.getKey());
+    
+    TreeMap<TreeSet<String>,Long> usage = new TreeMap<TreeSet<String>,Long>(new Comparator<TreeSet<String>>() {
+      
+      @Override
+      public int compare(TreeSet<String> o1, TreeSet<String> o2) {
+        int len1 = o1.size();
+        int len2 = o2.size();
+        
+        int min = Math.min(len1, len2);
+        
+        Iterator<String> iter1 = o1.iterator();
+        Iterator<String> iter2 = o2.iterator();
+        
+        int count = 0;
+        
+        while (count < min) {
+          String s1 = iter1.next();
+          String s2 = iter2.next();
+          
+          int cmp = s1.compareTo(s2);
+          
+          if (cmp != 0)
+            return cmp;
+          
+          count++;
+        }
+        
+        return len1 - len2;
+      }
+    });
+    
+    for (Entry<List<String>,Long> entry : tdu.calculateUsage().entrySet()) {
+      TreeSet<String> tableNames = new TreeSet<String>();
+      for (String tableId : entry.getKey())
+        tableNames.add(reverseTableIdMap.get(tableId));
+      
+      usage.put(tableNames, entry.getValue());
+    }
+    
+    if (!emptyTableIds.isEmpty()) {
+      TreeSet<String> emptyTables = new TreeSet<String>();
+      for (String tableId : emptyTableIds) {
+        emptyTables.add(reverseTableIdMap.get(tableId));
+      }
+      usage.put(emptyTables, 0L);
+    }
+    
+    return usage;
+  }
+  
+  public static void printDiskUsage(AccumuloConfiguration acuConf, Collection<String> tables, FileSystem fs, Connector conn, Printer printer,
+      boolean humanReadable) throws TableNotFoundException, IOException {
+    
+    HashSet<String> tableIds = new HashSet<String>();
+    
+    for (String tableName : tables) {
+      String tableId = conn.tableOperations().tableIdMap().get(tableName);
+      if (tableId == null)
+        throw new TableNotFoundException(null, tableName, "Table " + tableName + " not found");
+      
+      tableIds.add(tableId);
+    }
+    
+    Map<TreeSet<String>,Long> usage = getDiskUsage(acuConf, tableIds, fs, conn);
+    
+    String valueFormat = humanReadable ? "%9s" : "%,24d";
+    for (Entry<TreeSet<String>,Long> entry : usage.entrySet()) {
+      Object value = humanReadable ? NumUtil.bigNumberForSize(entry.getValue()) : entry.getValue();
+      printer.print(String.format(valueFormat + " %s", value, entry.getKey()));
+    }
+  }
+
+  
   static class Opts extends ClientOpts {
     @Parameter(description=" <table> { <table> ... } ")
     List<String> tables = new ArrayList<String>();
   }
-  
+    
   /**
    * @param args
    */
@@ -42,7 +279,7 @@ public class TableDiskUsage {
     Opts opts = new Opts();
     opts.parseArgs(TableDiskUsage.class.getName(), args);
     Connector conn = opts.getConnector();
-    org.apache.accumulo.core.util.TableDiskUsage.printDiskUsage(DefaultConfiguration.getInstance(), opts.tables, fs, conn, false);
+    org.apache.accumulo.server.util.TableDiskUsage.printDiskUsage(DefaultConfiguration.getInstance(), opts.tables, fs, conn, false);
   }
   
 }

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java Mon Jun 24 21:34:20 2013
@@ -24,13 +24,13 @@ import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
@@ -93,8 +93,8 @@ public class TabletIterator implements I
     this.scanner = s;
     this.range = range;
     this.scanner.setRange(range);
-    Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
-    Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
+    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+    MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
     this.iter = s.iterator();
     this.returnPrevEndRow = returnPrevEndRow;
     this.returnDir = returnDir;
@@ -112,7 +112,7 @@ public class TabletIterator implements I
       Key prevEndRowKey = currentTabletKeys.lastKey();
       Value prevEndRowValue = currentTabletKeys.get(prevEndRowKey);
       
-      if (!Constants.METADATA_PREV_ROW_COLUMN.hasColumns(prevEndRowKey)) {
+      if (!MetadataTable.PREV_ROW_COLUMN.hasColumns(prevEndRowKey)) {
         log.debug(currentTabletKeys);
         throw new RuntimeException("Unexpected key " + prevEndRowKey);
       }
@@ -176,11 +176,11 @@ public class TabletIterator implements I
     
     while (esIter.hasNext()) {
       Map.Entry<Key,Value> entry = esIter.next();
-      if (!returnPrevEndRow && Constants.METADATA_PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
+      if (!returnPrevEndRow && MetadataTable.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
         esIter.remove();
       }
       
-      if (!returnDir && Constants.METADATA_DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
+      if (!returnDir && MetadataTable.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
         esIter.remove();
       }
     }
@@ -216,7 +216,7 @@ public class TabletIterator implements I
         
         tm.put(entry.getKey(), entry.getValue());
         
-        if (Constants.METADATA_PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
+        if (MetadataTable.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
           sawPrevEndRow = true;
           break;
         }
@@ -259,7 +259,7 @@ public class TabletIterator implements I
       range = new Range(new Key(lastTablet).followingKey(PartialKey.ROW), true, this.range.getEndKey(), this.range.isEndKeyInclusive());
     }
     
-    log.info("Resetting " + Constants.METADATA_TABLE_NAME + " scanner to " + range);
+    log.info("Resetting " + MetadataTable.NAME + " scanner to " + range);
     
     scanner.setRange(range);
     iter = scanner.iterator();

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TabletOperations.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TabletOperations.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TabletOperations.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/TabletOperations.java Mon Jun 24 21:34:20 2013
@@ -19,10 +19,11 @@ package org.apache.accumulo.server.util;
 import java.io.IOException;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.tabletserver.UniqueNameAllocator;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
@@ -31,32 +32,33 @@ public class TabletOperations {
   
   private static final Logger log = Logger.getLogger(TabletOperations.class);
   
-  public static String createTabletDirectory(FileSystem fs, String tableDir, Text endRow) {
+  public static String createTabletDirectory(VolumeManager fs, String tableId, Text endRow) {
     String lowDirectory;
     
     UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+    String volume = fs.choose(ServerConstants.getTablesDirs());
     
     while (true) {
       try {
         if (endRow == null) {
           lowDirectory = Constants.DEFAULT_TABLET_LOCATION;
-          Path lowDirectoryPath = new Path(tableDir + lowDirectory);
+          Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
           if (fs.exists(lowDirectoryPath) || fs.mkdirs(lowDirectoryPath))
-            return lowDirectory;
+            return lowDirectoryPath.makeQualified(fs.getFileSystemByPath(lowDirectoryPath)).toString();
           log.warn("Failed to create " + lowDirectoryPath + " for unknown reason");
         } else {
           lowDirectory = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
-          Path lowDirectoryPath = new Path(tableDir + lowDirectory);
+          Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" +  lowDirectory);
           if (fs.exists(lowDirectoryPath))
             throw new IllegalStateException("Dir exist when it should not " + lowDirectoryPath);
           if (fs.mkdirs(lowDirectoryPath))
-            return lowDirectory;
+            return lowDirectoryPath.makeQualified(fs.getFileSystemByPath(lowDirectoryPath)).toString();
         }
       } catch (IOException e) {
         log.warn(e);
       }
       
-      log.warn("Failed to create dir for tablet in table " + tableDir + " will retry ...");
+      log.warn("Failed to create dir for tablet in table " + tableId + " in volume " + volume + " + will retry ...");
       UtilWaitThread.sleep(3000);
       
     }
@@ -65,7 +67,7 @@ public class TabletOperations {
   public static String createTabletDirectory(String tableDir, Text endRow) {
     while (true) {
       try {
-        FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+        VolumeManager fs = VolumeManagerImpl.get();
         return createTabletDirectory(fs, tableDir, endRow);
       } catch (IOException e) {
         log.warn(e);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java Mon Jun 24 21:34:20 2013
@@ -29,7 +29,6 @@ import java.util.concurrent.ExecutorServ
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
@@ -45,6 +44,7 @@ import org.apache.accumulo.core.data.thr
 import org.apache.accumulo.core.data.thrift.TColumn;
 import org.apache.accumulo.core.data.thrift.TKeyExtent;
 import org.apache.accumulo.core.data.thrift.TRange;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
@@ -90,8 +90,7 @@ public class VerifyTabletAssignments {
     
     Connector conn = opts.getConnector();
     Instance inst = conn.getInstance();
-    MetadataTable.getEntries(conn.getInstance(), CredentialHelper.create(opts.principal, opts.getToken(), opts.instance), tableName, false,
-        locations, tablets);
+    MetadataTable.getEntries(conn.getInstance(), CredentialHelper.create(opts.principal, opts.getToken(), opts.instance), tableName, false, locations, tablets);
     
     final HashSet<KeyExtent> failures = new HashSet<KeyExtent>();
     
@@ -151,8 +150,8 @@ public class VerifyTabletAssignments {
     }
   }
   
-  private static void checkTabletServer(AccumuloConfiguration conf, TCredentials st, Entry<String,List<KeyExtent>> entry,
-      HashSet<KeyExtent> failures) throws ThriftSecurityException, TException, NoSuchScanIDException {
+  private static void checkTabletServer(AccumuloConfiguration conf, TCredentials st, Entry<String,List<KeyExtent>> entry, HashSet<KeyExtent> failures)
+      throws ThriftSecurityException, TException, NoSuchScanIDException {
     TabletClientService.Iface client = ThriftUtil.getTServerClient(entry.getKey(), conf);
     
     Map<TKeyExtent,List<TRange>> batch = new TreeMap<TKeyExtent,List<TRange>>();
@@ -187,8 +186,8 @@ public class VerifyTabletAssignments {
     Map<String,Map<String,String>> emptyMapSMapSS = Collections.emptyMap();
     List<IterInfo> emptyListIterInfo = Collections.emptyList();
     List<TColumn> emptyListColumn = Collections.emptyList();
-    InitialMultiScan is = client.startMultiScan(tinfo, st, batch, emptyListColumn, emptyListIterInfo, emptyMapSMapSS, Constants.NO_AUTHS.getAuthorizationsBB(),
-        false);
+    InitialMultiScan is = client.startMultiScan(tinfo, st, batch, emptyListColumn, emptyListIterInfo, emptyMapSMapSS,
+        Authorizations.EMPTY.getAuthorizationsBB(), false);
     if (is.result.more) {
       MultiScanResult result = client.continueMultiScan(tinfo, is.scanID);
       checkFailures(entry.getKey(), failures, result);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java Mon Jun 24 21:34:20 2013
@@ -16,12 +16,11 @@
  */
 package org.apache.accumulo.server.util;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
@@ -42,7 +41,7 @@ public class ZooKeeperMain {
     Opts opts = new Opts();
     opts.parseArgs(ZooKeeperMain.class.getName(), args);
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    String baseDir = Constants.getBaseDir(ServerConfiguration.getSiteConfiguration());
+    String baseDir = ServerConstants.getBaseDirs()[0];
     System.out.println("Using " + fs.makeQualified(new Path(baseDir + "/instance_id")) + " to lookup accumulo instance");
     Instance instance = HdfsZooInstance.getInstance();
     if (opts.servers == null) {
@@ -50,7 +49,7 @@ public class ZooKeeperMain {
     }
     System.out.println("The accumulo instance id is " + instance.getInstanceID());
     if (!opts.servers.contains("/"))
-      opts.servers += "/accumulo/"+instance.getInstanceID(); 
-    org.apache.zookeeper.ZooKeeperMain.main(new String[]{"-server", opts.servers, "-timeout", "" + (opts.timeout * 1000)});
+      opts.servers += "/accumulo/" + instance.getInstanceID();
+    org.apache.zookeeper.ZooKeeperMain.main(new String[] {"-server", opts.servers, "-timeout", "" + (opts.timeout * 1000)});
   }
 }

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java Mon Jun 24 21:34:20 2013
@@ -22,10 +22,10 @@ import static org.junit.Assert.assertNul
 
 import java.util.List;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
@@ -58,7 +58,7 @@ public class MetadataConstraintsTest {
   public void testCheck() {
     Logger.getLogger(AccumuloConfiguration.class).setLevel(Level.ERROR);
     Mutation m = new Mutation(new Text("0;foo"));
-    Constants.METADATA_PREV_ROW_COLUMN.put(m, new Value("1foo".getBytes()));
+    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("1foo".getBytes()));
     
     MetadataConstraints mc = new MetadataConstraints();
     
@@ -69,7 +69,7 @@ public class MetadataConstraintsTest {
     assertEquals(Short.valueOf((short) 3), violations.get(0));
     
     m = new Mutation(new Text("0:foo"));
-    Constants.METADATA_PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
+    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
     
     violations = mc.check(null, m);
     
@@ -87,7 +87,7 @@ public class MetadataConstraintsTest {
     assertEquals(Short.valueOf((short) 2), violations.get(0));
     
     m = new Mutation(new Text("!!<"));
-    Constants.METADATA_PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
+    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
     
     violations = mc.check(null, m);
     
@@ -97,7 +97,7 @@ public class MetadataConstraintsTest {
     assertEquals(Short.valueOf((short) 5), violations.get(1));
     
     m = new Mutation(new Text("0;foo"));
-    Constants.METADATA_PREV_ROW_COLUMN.put(m, new Value("".getBytes()));
+    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("".getBytes()));
     
     violations = mc.check(null, m);
     
@@ -106,21 +106,21 @@ public class MetadataConstraintsTest {
     assertEquals(Short.valueOf((short) 6), violations.get(0));
     
     m = new Mutation(new Text("0;foo"));
-    Constants.METADATA_PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
+    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
     
     violations = mc.check(null, m);
     
     assertEquals(null, violations);
     
     m = new Mutation(new Text("!0<"));
-    Constants.METADATA_PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
+    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
     
     violations = mc.check(null, m);
     
     assertEquals(null, violations);
     
     m = new Mutation(new Text("!1<"));
-    Constants.METADATA_PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
+    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
     
     violations = mc.check(null, m);
     
@@ -138,8 +138,8 @@ public class MetadataConstraintsTest {
 
     // inactive txid
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
-    m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
+    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
     violations = mc.check(null, m);
     assertNotNull(violations);
     assertEquals(1, violations.size());
@@ -147,8 +147,8 @@ public class MetadataConstraintsTest {
     
     // txid that throws exception
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("9".getBytes()));
-    m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("9".getBytes()));
+    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
     violations = mc.check(null, m);
     assertNotNull(violations);
     assertEquals(1, violations.size());
@@ -156,14 +156,14 @@ public class MetadataConstraintsTest {
     
     // active txid w/ file
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
     
     // active txid w/o file
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
     violations = mc.check(null, m);
     assertNotNull(violations);
     assertEquals(1, violations.size());
@@ -171,10 +171,10 @@ public class MetadataConstraintsTest {
     
     // two active txids w/ files
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("7".getBytes()));
-    m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("1,1".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("7".getBytes()));
+    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("1,1".getBytes()));
     violations = mc.check(null, m);
     assertNotNull(violations);
     assertEquals(1, violations.size());
@@ -182,18 +182,18 @@ public class MetadataConstraintsTest {
 
     // two files w/ one active txid
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("5".getBytes()));
-    m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("1,1".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("5".getBytes()));
+    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("1,1".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
 
     // two loaded w/ one active txid and one file
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("5".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("5".getBytes()));
     violations = mc.check(null, m);
     assertNotNull(violations);
     assertEquals(1, violations.size());
@@ -201,35 +201,35 @@ public class MetadataConstraintsTest {
 
     // active txid, mutation that looks like split
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
+    MetadataTable.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
     
     // inactive txid, mutation that looks like split
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
-    Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
+    MetadataTable.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
     
     // active txid, mutation that looks like a load
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
     
     // inactive txid, mutation that looks like a load
     m = new Mutation(new Text("0;foo"));
-    m.put(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
-    m.put(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
+    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
+    m.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
     
     // deleting a load flag
     m = new Mutation(new Text("0;foo"));
-    m.putDelete(Constants.METADATA_BULKFILE_COLUMN_FAMILY, new Text("/someFile"));
+    m.putDelete(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"));
     violations = mc.check(null, m);
     assertNull(violations);
 

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java Mon Jun 24 21:34:20 2013
@@ -21,7 +21,6 @@ import java.util.Map.Entry;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
@@ -32,10 +31,12 @@ import org.apache.accumulo.core.client.s
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.hadoop.fs.FileSystem;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.hadoop.io.Text;
 import org.junit.Assert;
 import org.junit.Test;
@@ -93,7 +94,7 @@ public class TestConfirmDeletes {
     TCredentials auth = CredentialHelper.create("root", new PasswordToken(new byte[0]), "instance");
     
     Instance instance = new MockInstance();
-    FileSystem fs = FileSystem.getLocal(CachedConfiguration.getInstance());
+    VolumeManager fs = VolumeManagerImpl.getLocal();
     
     load(instance, metadata, deletes);
     
@@ -108,8 +109,8 @@ public class TestConfirmDeletes {
   private void load(Instance instance, String[] metadata, String[] deletes) throws Exception {
     TCredentials credential = CredentialHelper.create("root", new PasswordToken(new byte[0]), "instance");
     
-    Scanner scanner = instance.getConnector(credential.getPrincipal(), CredentialHelper.extractToken(credential)).createScanner(Constants.METADATA_TABLE_NAME,
-        Constants.NO_AUTHS);
+    Scanner scanner = instance.getConnector(credential.getPrincipal(), CredentialHelper.extractToken(credential)).createScanner(MetadataTable.NAME,
+        Authorizations.EMPTY);
     int count = 0;
     for (@SuppressWarnings("unused")
     Entry<Key,Value> entry : scanner) {
@@ -120,7 +121,7 @@ public class TestConfirmDeletes {
     Assert.assertEquals(0, count);
     
     Connector conn = instance.getConnector(credential.getPrincipal(), CredentialHelper.extractToken(credential));
-    BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     for (String line : metadata) {
       String[] parts = line.split(" ");
       String[] columnParts = parts[1].split(":");

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java Mon Jun 24 21:34:20 2013
@@ -23,7 +23,6 @@ import java.util.TreeMap;
 
 import org.junit.Assert;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
@@ -34,6 +33,7 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.SortedMapIterator;
 import org.apache.accumulo.core.util.ColumnFQ;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
@@ -83,24 +83,24 @@ public class MetadataBulkLoadFilterTest 
     TreeMap<Key,Value> expected = new TreeMap<Key,Value>();
     
     // following should not be deleted by filter
-    put(tm1, "2;m", Constants.METADATA_DIRECTORY_COLUMN, "/t1");
-    put(tm1, "2;m", Constants.METADATA_DATAFILE_COLUMN_FAMILY, "/t1/file1", "1,1");
-    put(tm1, "2;m", Constants.METADATA_BULKFILE_COLUMN_FAMILY, "/t1/file1", "5");
-    put(tm1, "2;m", Constants.METADATA_BULKFILE_COLUMN_FAMILY, "/t1/file3", "7");
-    put(tm1, "2;m", Constants.METADATA_BULKFILE_COLUMN_FAMILY, "/t1/file4", "9");
-    put(tm1, "2<", Constants.METADATA_DIRECTORY_COLUMN, "/t2");
-    put(tm1, "2<", Constants.METADATA_DATAFILE_COLUMN_FAMILY, "/t2/file2", "1,1");
-    put(tm1, "2<", Constants.METADATA_BULKFILE_COLUMN_FAMILY, "/t2/file6", "5");
-    put(tm1, "2<", Constants.METADATA_BULKFILE_COLUMN_FAMILY, "/t2/file7", "7");
-    put(tm1, "2<", Constants.METADATA_BULKFILE_COLUMN_FAMILY, "/t2/file8", "9");
-    put(tm1, "2<", Constants.METADATA_BULKFILE_COLUMN_FAMILY, "/t2/fileC", null);
+    put(tm1, "2;m", MetadataTable.DIRECTORY_COLUMN, "/t1");
+    put(tm1, "2;m", MetadataTable.DATAFILE_COLUMN_FAMILY, "/t1/file1", "1,1");
+    put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file1", "5");
+    put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file3", "7");
+    put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file4", "9");
+    put(tm1, "2<", MetadataTable.DIRECTORY_COLUMN, "/t2");
+    put(tm1, "2<", MetadataTable.DATAFILE_COLUMN_FAMILY, "/t2/file2", "1,1");
+    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file6", "5");
+    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file7", "7");
+    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file8", "9");
+    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/fileC", null);
     
     expected.putAll(tm1);
 
     // the following should be deleted by filter
-    put(tm1, "2;m", Constants.METADATA_BULKFILE_COLUMN_FAMILY, "/t1/file5", "8");
-    put(tm1, "2<", Constants.METADATA_BULKFILE_COLUMN_FAMILY, "/t2/file9", "8");
-    put(tm1, "2<", Constants.METADATA_BULKFILE_COLUMN_FAMILY, "/t2/fileA", "2");
+    put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file5", "8");
+    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file9", "8");
+    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/fileA", "2");
     
     TestMetadataBulkLoadFilter iter = new TestMetadataBulkLoadFilter();
     iter.init(new SortedMapIterator(tm1), new HashMap<String,String>(), new IteratorEnvironment() {

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java Mon Jun 24 21:34:20 2013
@@ -21,7 +21,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Set;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchDeleter;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
@@ -35,8 +34,10 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.master.state.Assignment;
 import org.apache.accumulo.server.master.state.CurrentState;
 import org.apache.accumulo.server.master.state.MergeInfo;
@@ -81,7 +82,7 @@ public class TestMergeState {
   }
   
   private static void update(Connector c, Mutation m) throws TableNotFoundException, MutationsRejectedException {
-    BatchWriter bw = c.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     bw.addMutation(m);
     bw.close();
   }
@@ -100,14 +101,14 @@ public class TestMergeState {
     for (String s : splits) {
       Text split = new Text(s);
       Mutation prevRow = KeyExtent.getPrevRowUpdateMutation(new KeyExtent(tableId, split, pr));
-      prevRow.put(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
-      Constants.METADATA_CHOPPED_COLUMN.put(prevRow, new Value("junk".getBytes()));
+      prevRow.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
+      MetadataTable.CHOPPED_COLUMN.put(prevRow, new Value("junk".getBytes()));
       bw.addMutation(prevRow);
       pr = split;
     }
     // Add the default tablet
     Mutation defaultTablet = KeyExtent.getPrevRowUpdateMutation(new KeyExtent(tableId, null, pr));
-    defaultTablet.put(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
+    defaultTablet.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
     bw.addMutation(defaultTablet);
     bw.close();
     
@@ -127,8 +128,8 @@ public class TestMergeState {
     // Create the hole
     // Split the tablet at one end of the range
     Mutation m = new KeyExtent(tableId, new Text("t"), new Text("p")).getPrevRowUpdateMutation();
-    Constants.METADATA_SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
-    Constants.METADATA_OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(new Text("o")));
+    MetadataTable.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
+    MetadataTable.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(new Text("o")));
     update(connector, m);
     
     // do the state check
@@ -137,8 +138,8 @@ public class TestMergeState {
     Assert.assertEquals(MergeState.WAITING_FOR_OFFLINE, newState);
     
     // unassign the tablets
-    BatchDeleter deleter = connector.createBatchDeleter("!METADATA", Constants.NO_AUTHS, 1000, new BatchWriterConfig());
-    deleter.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
+    BatchDeleter deleter = connector.createBatchDeleter("!METADATA", Authorizations.EMPTY, 1000, new BatchWriterConfig());
+    deleter.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
     deleter.setRanges(Collections.singletonList(new Range()));
     deleter.delete();
     
@@ -149,7 +150,7 @@ public class TestMergeState {
     // finish the split
     KeyExtent tablet = new KeyExtent(tableId, new Text("p"), new Text("o"));
     m = tablet.getPrevRowUpdateMutation();
-    Constants.METADATA_SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
+    MetadataTable.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
     update(connector, m);
     metaDataStateStore.setLocations(Collections.singletonList(new Assignment(tablet, state.someTServer)));
     
@@ -159,7 +160,7 @@ public class TestMergeState {
     
     // chop it
     m = tablet.getPrevRowUpdateMutation();
-    Constants.METADATA_CHOPPED_COLUMN.put(m, new Value("junk".getBytes()));
+    MetadataTable.CHOPPED_COLUMN.put(m, new Value("junk".getBytes()));
     update(connector, m);
     
     stats = scan(state, metaDataStateStore);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/state/MergeInfoTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/state/MergeInfoTest.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/state/MergeInfoTest.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/state/MergeInfoTest.java Mon Jun 24 21:34:20 2013
@@ -34,7 +34,7 @@ public class MergeInfoTest {
     in.reset(buffer.getData(), 0, buffer.getLength());
     MergeInfo info2 = new MergeInfo();
     info2.readFields(in);
-    Assert.assertEquals(info.range, info2.range);
+    Assert.assertEquals(info.extent, info2.extent);
     Assert.assertEquals(info.state, info2.state);
     Assert.assertEquals(info.operation, info2.operation);
     return info2;

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java Mon Jun 24 21:34:20 2013
@@ -27,9 +27,9 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
 import org.apache.hadoop.io.Text;
 import org.junit.Assert;
@@ -146,7 +146,7 @@ public class RootTabletStateStoreTest {
   @Test
   public void testRootTabletStateStore() throws DistributedStoreException {
     ZooTabletStateStore tstore = new ZooTabletStateStore(new FakeZooStore());
-    KeyExtent root = Constants.ROOT_TABLET_EXTENT;
+    KeyExtent root = RootTable.EXTENT;
     String sessionId = "this is my unique session data";
     TServerInstance server = new TServerInstance(AddressUtil.parseAddress("127.0.0.1", 10000), sessionId);
     List<Assignment> assignments = Collections.singletonList(new Assignment(root, server));

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java Mon Jun 24 21:34:20 2013
@@ -19,11 +19,11 @@ package org.apache.accumulo.server.table
 
 import java.util.TreeMap;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.ColumnFQ;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.hadoop.io.Text;
 import org.junit.Assert;
@@ -75,10 +75,10 @@ public class CheckTabletMetadataTest {
     
     TreeMap<Key,Value> tabletMeta = new TreeMap<Key,Value>();
     
-    put(tabletMeta, "1<", Constants.METADATA_PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(null).get());
-    put(tabletMeta, "1<", Constants.METADATA_DIRECTORY_COLUMN, "/t1".getBytes());
-    put(tabletMeta, "1<", Constants.METADATA_TIME_COLUMN, "M0".getBytes());
-    put(tabletMeta, "1<", Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY, "4", "127.0.0.1:9997");
+    put(tabletMeta, "1<", MetadataTable.PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(null).get());
+    put(tabletMeta, "1<", MetadataTable.DIRECTORY_COLUMN, "/t1".getBytes());
+    put(tabletMeta, "1<", MetadataTable.TIME_COLUMN, "M0".getBytes());
+    put(tabletMeta, "1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "4", "127.0.0.1:9997");
     
     TServerInstance tsi = new TServerInstance("127.0.0.1:9997", 4);
     
@@ -94,27 +94,27 @@ public class CheckTabletMetadataTest {
     
     assertFail(tabletMeta, new KeyExtent(new Text("1"), new Text("r"), new Text("m")), tsi);
     
-    assertFail(tabletMeta, ke, tsi, nk("1<", Constants.METADATA_PREV_ROW_COLUMN));
+    assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.PREV_ROW_COLUMN));
 
-    assertFail(tabletMeta, ke, tsi, nk("1<", Constants.METADATA_DIRECTORY_COLUMN));
+    assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.DIRECTORY_COLUMN));
     
-    assertFail(tabletMeta, ke, tsi, nk("1<", Constants.METADATA_TIME_COLUMN));
+    assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.TIME_COLUMN));
     
-    assertFail(tabletMeta, ke, tsi, nk("1<", Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY, "4"));
+    assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "4"));
     
     TreeMap<Key,Value> copy = new TreeMap<Key,Value>(tabletMeta);
-    put(copy, "1<", Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, "4", "127.0.0.1:9997");
+    put(copy, "1<", MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, "4", "127.0.0.1:9997");
     assertFail(copy, ke, tsi);
-    assertFail(copy, ke, tsi, nk("1<", Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY, "4"));
+    assertFail(copy, ke, tsi, nk("1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "4"));
     
     copy = new TreeMap<Key,Value>(tabletMeta);
-    put(copy, "1<", Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, "5", "127.0.0.1:9998");
+    put(copy, "1<", MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, "5", "127.0.0.1:9998");
     assertFail(copy, ke, tsi);
-    put(copy, "1<", Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, "6", "127.0.0.1:9999");
+    put(copy, "1<", MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, "6", "127.0.0.1:9999");
     assertFail(copy, ke, tsi);
     
     copy = new TreeMap<Key,Value>(tabletMeta);
-    put(copy, "1<", Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY, "5", "127.0.0.1:9998");
+    put(copy, "1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "5", "127.0.0.1:9998");
     assertFail(copy, ke, tsi);
     
     assertFail(new TreeMap<Key,Value>(), ke, tsi);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/log/MultiReaderTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/log/MultiReaderTest.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/log/MultiReaderTest.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/log/MultiReaderTest.java Mon Jun 24 21:34:20 2013
@@ -16,13 +16,14 @@
  */
 package org.apache.accumulo.server.tabletserver.log;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.server.tabletserver.log.MultiReader;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
@@ -33,27 +34,31 @@ import org.apache.log4j.Logger;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 
 public class MultiReaderTest {
   
-  Configuration conf = CachedConfiguration.getInstance();
-  FileSystem fs;
+  VolumeManager fs;
+  TemporaryFolder root = new TemporaryFolder();
   
   @Before
   public void setUp() throws Exception {
     // quiet log messages about compress.CodecPool
     Logger.getRootLogger().setLevel(Level.ERROR);
-    fs = FileSystem.getLocal(conf);
-    Path root = new Path("manyMaps");
+    fs = VolumeManagerImpl.getLocal();
+    root.create();
+    String path = root.getRoot().getAbsolutePath();
+    Path root = new Path("file://" + path + "/manyMaps");
     fs.mkdirs(root);
     fs.create(new Path(root, "finished")).close();
-    Writer writer = new Writer(conf, fs, "manyMaps/odd", IntWritable.class, BytesWritable.class);
+    FileSystem ns = fs.getDefaultVolume();
+    Writer writer = new Writer(ns.getConf(), ns, new Path(root, "odd").toString(), IntWritable.class, BytesWritable.class);
     BytesWritable value = new BytesWritable("someValue".getBytes());
     for (int i = 1; i < 1000; i += 2) {
       writer.append(new IntWritable(i), value);
     }
     writer.close();
-    writer = new Writer(conf, fs, "manyMaps/even", IntWritable.class, BytesWritable.class);
+    writer = new Writer(ns.getConf(), ns, new Path(root, "even").toString(), IntWritable.class, BytesWritable.class);
     for (int i = 0; i < 1000; i += 2) {
       if (i == 10)
         continue;
@@ -64,8 +69,7 @@ public class MultiReaderTest {
   
   @After
   public void tearDown() throws Exception {
-    if (fs != null)
-      fs.delete(new Path("manyMaps"), true);
+    root.create();
   }
   
   private void scan(MultiReader reader, int start) throws IOException {
@@ -92,7 +96,8 @@ public class MultiReaderTest {
   
   @Test
   public void testMultiReader() throws IOException {
-    MultiReader reader = new MultiReader(fs, conf, "manyMaps");
+    Path manyMaps = new Path("file://" + root.getRoot().getAbsolutePath() + "/manyMaps");
+    MultiReader reader = new MultiReader(fs, manyMaps);
     IntWritable key = new IntWritable();
     BytesWritable value = new BytesWritable();
     
@@ -121,8 +126,8 @@ public class MultiReaderTest {
     assertEquals(0, key.get());
     reader.close();
     
-    fs.delete(new Path("manyMaps/even"), true);
-    reader = new MultiReader(fs, conf, "manyMaps");
+    fs.deleteRecursively(new Path(manyMaps, "even"));
+    reader = new MultiReader(fs, manyMaps);
     key.set(501);
     assertTrue(reader.seek(key));
     scanOdd(reader, 501);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/log/SortedLogRecoveryTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/log/SortedLogRecoveryTest.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/log/SortedLogRecoveryTest.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/tabletserver/log/SortedLogRecoveryTest.java Mon Jun 24 21:34:20 2013
@@ -29,28 +29,27 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
-import java.util.Map.Entry;
 
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.server.data.ServerMutation;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.logger.LogEvents;
 import org.apache.accumulo.server.logger.LogFileKey;
 import org.apache.accumulo.server.logger.LogFileValue;
-import org.apache.accumulo.server.tabletserver.log.MutationReceiver;
-import org.apache.accumulo.server.tabletserver.log.SortedLogRecovery;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.MapFile.Writer;
+import org.apache.hadoop.io.Text;
 import org.junit.Assert;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 
 public class SortedLogRecoveryTest {
   
@@ -115,29 +114,31 @@ public class SortedLogRecoveryTest {
   }
   
   private static List<Mutation> recover(Map<String,KeyValue[]> logs, Set<String> files, KeyExtent extent) throws IOException {
-    final String workdir = "workdir";
-    Configuration conf = CachedConfiguration.getInstance();
-    FileSystem local = FileSystem.getLocal(conf).getRaw();
-    local.delete(new Path(workdir), true);
-    ArrayList<String> dirs = new ArrayList<String>();
+    TemporaryFolder root = new TemporaryFolder();
+    root.create();
+    final String workdir = "file://" + root.getRoot().getAbsolutePath() + "/workdir";
+    VolumeManager fs = VolumeManagerImpl.getLocal();
+    fs.deleteRecursively(new Path(workdir));
+    ArrayList<Path> dirs = new ArrayList<Path>();
     try {
       for (Entry<String,KeyValue[]> entry : logs.entrySet()) {
         String path = workdir + "/" + entry.getKey();
-        Writer map = new MapFile.Writer(conf, local, path + "/log1", LogFileKey.class, LogFileValue.class);
+        FileSystem ns = fs.getFileSystemByPath(new Path(path));
+        Writer map = new MapFile.Writer(ns.getConf(), ns, path + "/log1", LogFileKey.class, LogFileValue.class);
         for (KeyValue lfe : entry.getValue()) {
           map.append(lfe.key, lfe.value);
         }
         map.close();
-        local.create(new Path(path, "finished")).close();
-        dirs.add(path);
+        ns.create(new Path(path, "finished")).close();
+        dirs.add(new Path(path));
       }
       // Recover
-      SortedLogRecovery recovery = new SortedLogRecovery();
+      SortedLogRecovery recovery = new SortedLogRecovery(fs);
       CaptureMutations capture = new CaptureMutations();
       recovery.recover(extent, dirs, files, capture);
       return capture.result;
     } finally {
-      local.delete(new Path(workdir), true);
+      root.delete();
     }
   }
   

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java Mon Jun 24 21:34:20 2013
@@ -21,7 +21,6 @@ import java.util.Map.Entry;
 
 import junit.framework.TestCase;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
@@ -32,6 +31,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.hadoop.io.Text;
 
 public class CloneTest extends TestCase {
@@ -43,16 +43,16 @@ public class CloneTest extends TestCase 
     KeyExtent ke = new KeyExtent(new Text("0"), null, null);
     Mutation mut = ke.getPrevRowUpdateMutation();
     
-    Constants.METADATA_TIME_COLUMN.put(mut, new Value("M0".getBytes()));
-    Constants.METADATA_DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
+    MetadataTable.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
+    MetadataTable.DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
     
-    BatchWriter bw1 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     bw1.addMutation(mut);
     
     bw1.close();
     
-    BatchWriter bw2 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     MetadataTable.initializeClone("0", "1", conn, bw2);
     
@@ -71,23 +71,23 @@ public class CloneTest extends TestCase 
     KeyExtent ke = new KeyExtent(new Text("0"), null, null);
     Mutation mut = ke.getPrevRowUpdateMutation();
     
-    Constants.METADATA_TIME_COLUMN.put(mut, new Value("M0".getBytes()));
-    Constants.METADATA_DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
-    mut.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/0_0.rf", "1,200");
+    MetadataTable.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
+    MetadataTable.DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
+    mut.put(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/0_0.rf", "1,200");
     
-    BatchWriter bw1 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     bw1.addMutation(mut);
     
     bw1.flush();
     
-    BatchWriter bw2 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     MetadataTable.initializeClone("0", "1", conn, bw2);
     
     Mutation mut2 = new Mutation(ke.getMetadataEntry());
-    mut2.putDelete(Constants.METADATA_DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/0_0.rf");
-    mut2.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/1_0.rf", "2,300");
+    mut2.putDelete(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/0_0.rf");
+    mut2.put(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/1_0.rf", "2,300");
     
     bw1.addMutation(mut2);
     bw1.flush();
@@ -100,13 +100,13 @@ public class CloneTest extends TestCase 
     
     assertEquals(0, rc);
     
-    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
     
     HashSet<String> files = new HashSet<String>();
     
     for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY))
+      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY))
         files.add(entry.getKey().getColumnQualifier().toString());
     }
     
@@ -120,13 +120,13 @@ public class CloneTest extends TestCase 
     MockInstance mi = new MockInstance();
     Connector conn = mi.getConnector("", new PasswordToken(""));
     
-    BatchWriter bw1 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     bw1.addMutation(createTablet("0", null, null, "/default_tablet", "/default_tablet/0_0.rf"));
     
     bw1.flush();
     
-    BatchWriter bw2 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     MetadataTable.initializeClone("0", "1", conn, bw2);
     
@@ -139,14 +139,14 @@ public class CloneTest extends TestCase 
     
     assertEquals(0, rc);
     
-    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
     
     HashSet<String> files = new HashSet<String>();
     
     int count = 0;
     for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
         files.add(entry.getKey().getColumnQualifier().toString());
         count++;
       }
@@ -162,19 +162,19 @@ public class CloneTest extends TestCase 
     MockInstance mi = new MockInstance();
     Connector conn = mi.getConnector("", new PasswordToken(""));
     
-    BatchWriter bw1 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     bw1.addMutation(createTablet("0", null, null, "/default_tablet", "/default_tablet/0_0.rf"));
     
     bw1.flush();
     
-    BatchWriter bw2 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     MetadataTable.initializeClone("0", "1", conn, bw2);
     
     bw1.addMutation(createTablet("0", "m", null, "/default_tablet", "/default_tablet/1_0.rf"));
     Mutation mut3 = createTablet("0", null, "m", "/t-1", "/default_tablet/1_0.rf");
-    mut3.putDelete(Constants.METADATA_DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/0_0.rf");
+    mut3.putDelete(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/0_0.rf");
     bw1.addMutation(mut3);
     
     bw1.flush();
@@ -187,7 +187,7 @@ public class CloneTest extends TestCase 
     
     assertEquals(0, rc);
     
-    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
     
     HashSet<String> files = new HashSet<String>();
@@ -195,7 +195,7 @@ public class CloneTest extends TestCase 
     int count = 0;
     
     for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
         files.add(entry.getKey().getColumnQualifier().toString());
         count++;
       }
@@ -209,10 +209,10 @@ public class CloneTest extends TestCase 
   private static Mutation deleteTablet(String tid, String endRow, String prevRow, String dir, String file) throws Exception {
     KeyExtent ke = new KeyExtent(new Text(tid), endRow == null ? null : new Text(endRow), prevRow == null ? null : new Text(prevRow));
     Mutation mut = new Mutation(ke.getMetadataEntry());
-    Constants.METADATA_PREV_ROW_COLUMN.putDelete(mut);
-    Constants.METADATA_TIME_COLUMN.putDelete(mut);
-    Constants.METADATA_DIRECTORY_COLUMN.putDelete(mut);
-    mut.putDelete(Constants.METADATA_DATAFILE_COLUMN_FAMILY.toString(), file);
+    MetadataTable.PREV_ROW_COLUMN.putDelete(mut);
+    MetadataTable.TIME_COLUMN.putDelete(mut);
+    MetadataTable.DIRECTORY_COLUMN.putDelete(mut);
+    mut.putDelete(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), file);
     
     return mut;
   }
@@ -221,9 +221,9 @@ public class CloneTest extends TestCase 
     KeyExtent ke = new KeyExtent(new Text(tid), endRow == null ? null : new Text(endRow), prevRow == null ? null : new Text(prevRow));
     Mutation mut = ke.getPrevRowUpdateMutation();
     
-    Constants.METADATA_TIME_COLUMN.put(mut, new Value("M0".getBytes()));
-    Constants.METADATA_DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes()));
-    mut.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY.toString(), file, "10,200");
+    MetadataTable.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
+    MetadataTable.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes()));
+    mut.put(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), file, "10,200");
     
     return mut;
   }
@@ -233,14 +233,14 @@ public class CloneTest extends TestCase 
     MockInstance mi = new MockInstance();
     Connector conn = mi.getConnector("", new PasswordToken(""));
     
-    BatchWriter bw1 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
     bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
     
     bw1.flush();
     
-    BatchWriter bw2 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     MetadataTable.initializeClone("0", "1", conn, bw2);
     
@@ -255,14 +255,14 @@ public class CloneTest extends TestCase 
     
     assertEquals(0, rc);
     
-    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
     
     HashSet<String> files = new HashSet<String>();
     
     int count = 0;
     for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
         files.add(entry.getKey().getColumnQualifier().toString());
         count++;
       }
@@ -280,14 +280,14 @@ public class CloneTest extends TestCase 
     MockInstance mi = new MockInstance();
     Connector conn = mi.getConnector("", new PasswordToken(""));
     
-    BatchWriter bw1 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
     bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
     
     bw1.flush();
     
-    BatchWriter bw2 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     MetadataTable.initializeClone("0", "1", conn, bw2);
     
@@ -319,14 +319,14 @@ public class CloneTest extends TestCase 
     
     assertEquals(0, rc);
     
-    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
     
     HashSet<String> files = new HashSet<String>();
     
     int count = 0;
     for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
         files.add(entry.getKey().getColumnQualifier().toString());
         count++;
       }
@@ -344,20 +344,20 @@ public class CloneTest extends TestCase 
     MockInstance mi = new MockInstance();
     Connector conn = mi.getConnector("", new PasswordToken(""));
     
-    BatchWriter bw1 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
     bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
     
     bw1.flush();
     
-    BatchWriter bw2 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
     MetadataTable.initializeClone("0", "1", conn, bw2);
     
     bw1.addMutation(deleteTablet("0", "m", null, "/d1", "/d1/file1"));
     Mutation mut = createTablet("0", null, null, "/d2", "/d2/file2");
-    mut.put(Constants.METADATA_DATAFILE_COLUMN_FAMILY.toString(), "/d1/file1", "10,200");
+    mut.put(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), "/d1/file1", "10,200");
     bw1.addMutation(mut);
     
     bw1.flush();

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java Mon Jun 24 21:34:20 2013
@@ -20,7 +20,6 @@ import java.util.Map.Entry;
 
 import junit.framework.TestCase;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
@@ -32,6 +31,8 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.util.TabletIterator.TabletDeletedException;
 import org.apache.hadoop.io.Text;
 
@@ -42,19 +43,19 @@ public class TabletIteratorTest extends 
     private Connector conn;
     
     public TestTabletIterator(Connector conn) throws Exception {
-      super(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS), Constants.METADATA_KEYSPACE, true, true);
+      super(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY), MetadataTable.KEYSPACE, true, true);
       this.conn = conn;
     }
     
     protected void resetScanner() {
       try {
-        Scanner ds = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+        Scanner ds = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
         Text tablet = new KeyExtent(new Text("0"), new Text("m"), null).getMetadataEntry();
         ds.setRange(new Range(tablet, true, tablet, true));
         
         Mutation m = new Mutation(tablet);
         
-        BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+        BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
         for (Entry<Key,Value> entry : ds) {
           Key k = entry.getKey();
           m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp());
@@ -80,13 +81,13 @@ public class TabletIteratorTest extends 
     
     KeyExtent ke1 = new KeyExtent(new Text("0"), new Text("m"), null);
     Mutation mut1 = ke1.getPrevRowUpdateMutation();
-    Constants.METADATA_DIRECTORY_COLUMN.put(mut1, new Value("/d1".getBytes()));
+    MetadataTable.DIRECTORY_COLUMN.put(mut1, new Value("/d1".getBytes()));
     
     KeyExtent ke2 = new KeyExtent(new Text("0"), null, null);
     Mutation mut2 = ke2.getPrevRowUpdateMutation();
-    Constants.METADATA_DIRECTORY_COLUMN.put(mut2, new Value("/d2".getBytes()));
+    MetadataTable.DIRECTORY_COLUMN.put(mut2, new Value("/d2".getBytes()));
     
-    BatchWriter bw1 = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     bw1.addMutation(mut1);
     bw1.addMutation(mut2);
     bw1.close();



Mime
View raw message