accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vi...@apache.org
Subject svn commit: r1496226 [5/13] - in /accumulo/branches/ACCUMULO-CURATOR: ./ assemble/ conf/examples/1GB/native-standalone/ conf/examples/1GB/standalone/ conf/examples/2GB/native-standalone/ conf/examples/2GB/standalone/ conf/examples/3GB/native-standalone...
Date Mon, 24 Jun 2013 21:34:25 GMT
Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java Mon Jun 24 21:34:20 2013
@@ -29,6 +29,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.ColumnFQ;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
@@ -54,14 +55,14 @@ public class MetadataConstraints impleme
     }
   }
   
-  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(new ColumnFQ[] {Constants.METADATA_PREV_ROW_COLUMN,
-      Constants.METADATA_OLD_PREV_ROW_COLUMN, Constants.METADATA_DIRECTORY_COLUMN, Constants.METADATA_SPLIT_RATIO_COLUMN, Constants.METADATA_TIME_COLUMN,
-      Constants.METADATA_LOCK_COLUMN, Constants.METADATA_FLUSH_COLUMN, Constants.METADATA_COMPACT_COLUMN}));
-  
-  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(new Text[] {Constants.METADATA_BULKFILE_COLUMN_FAMILY,
-      Constants.METADATA_LOG_COLUMN_FAMILY, Constants.METADATA_SCANFILE_COLUMN_FAMILY, Constants.METADATA_DATAFILE_COLUMN_FAMILY,
-      Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY, Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY,
-      Constants.METADATA_CHOPPED_COLUMN_FAMILY, Constants.METADATA_CLONED_COLUMN_FAMILY}));
+  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(new ColumnFQ[] {MetadataTable.PREV_ROW_COLUMN,
+      MetadataTable.OLD_PREV_ROW_COLUMN, MetadataTable.DIRECTORY_COLUMN, MetadataTable.SPLIT_RATIO_COLUMN, MetadataTable.TIME_COLUMN,
+      MetadataTable.LOCK_COLUMN, MetadataTable.FLUSH_COLUMN, MetadataTable.COMPACT_COLUMN}));
+  
+  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(new Text[] {MetadataTable.BULKFILE_COLUMN_FAMILY,
+      MetadataTable.LOG_COLUMN_FAMILY, MetadataTable.SCANFILE_COLUMN_FAMILY, MetadataTable.DATAFILE_COLUMN_FAMILY,
+      MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, MetadataTable.LAST_LOCATION_COLUMN_FAMILY, MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY,
+      MetadataTable.CHOPPED_COLUMN_FAMILY, MetadataTable.CLONED_COLUMN_FAMILY}));
   
   private static boolean isValidColumn(ColumnUpdate cu) {
     
@@ -138,7 +139,7 @@ public class MetadataConstraints impleme
     }
     
     // ensure row is not less than Constants.METADATA_TABLE_ID
-    if (new Text(row).compareTo(new Text(Constants.METADATA_TABLE_ID)) < 0) {
+    if (new Text(row).compareTo(new Text(MetadataTable.ID)) < 0) {
       violations = addViolation(violations, 5);
     }
     
@@ -154,11 +155,11 @@ public class MetadataConstraints impleme
         continue;
       }
       
-      if (columnUpdate.getValue().length == 0 && !columnFamily.equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
+      if (columnUpdate.getValue().length == 0 && !columnFamily.equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
         violations = addViolation(violations, 6);
       }
       
-      if (columnFamily.equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+      if (columnFamily.equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
         try {
           DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
           
@@ -170,9 +171,9 @@ public class MetadataConstraints impleme
         } catch (ArrayIndexOutOfBoundsException aiooe) {
           violations = addViolation(violations, 1);
         }
-      } else if (columnFamily.equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
+      } else if (columnFamily.equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
         
-      } else if (columnFamily.equals(Constants.METADATA_BULKFILE_COLUMN_FAMILY)) {
+      } else if (columnFamily.equals(MetadataTable.BULKFILE_COLUMN_FAMILY)) {
         if (!columnUpdate.isDeleted() && !checkedBulk) {
           // splits, which also write the time reference, are allowed to write this reference even when
           // the transaction is not running because the other half of the tablet is holding a reference
@@ -190,13 +191,13 @@ public class MetadataConstraints impleme
           int otherTidCount = 0;
 
           for (ColumnUpdate update : mutation.getUpdates()) {
-            if (new ColumnFQ(update).equals(Constants.METADATA_DIRECTORY_COLUMN)) {
+            if (new ColumnFQ(update).equals(MetadataTable.DIRECTORY_COLUMN)) {
               isSplitMutation = true;
-            } else if (new Text(update.getColumnFamily()).equals(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)) {
               isLocationMutation = true;
-            } else if (new Text(update.getColumnFamily()).equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
               dataFiles.add(new Text(update.getColumnQualifier()));
-            } else if (new Text(update.getColumnFamily()).equals(Constants.METADATA_BULKFILE_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.BULKFILE_COLUMN_FAMILY)) {
               loadedFiles.add(new Text(update.getColumnQualifier()));
               
               if (!new String(update.getValue()).equals(tidString)) {
@@ -222,7 +223,7 @@ public class MetadataConstraints impleme
       } else {
         if (!isValidColumn(columnUpdate)) {
           violations = addViolation(violations, 2);
-        } else if (new ColumnFQ(columnUpdate).equals(Constants.METADATA_PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
+        } else if (new ColumnFQ(columnUpdate).equals(MetadataTable.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
             && (violations == null || !violations.contains((short) 4))) {
           KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
           
@@ -233,7 +234,7 @@ public class MetadataConstraints impleme
           if (!prevEndRowLessThanEndRow) {
             violations = addViolation(violations, 3);
           }
-        } else if (new ColumnFQ(columnUpdate).equals(Constants.METADATA_LOCK_COLUMN)) {
+        } else if (new ColumnFQ(columnUpdate).equals(MetadataTable.LOCK_COLUMN)) {
           if (zooCache == null) {
             zooCache = new ZooCache();
           }
@@ -284,9 +285,9 @@ public class MetadataConstraints impleme
       case 4:
         return "Invalid metadata row format";
       case 5:
-        return "Row can not be less than " + Constants.METADATA_TABLE_ID;
+        return "Row can not be less than " + MetadataTable.ID;
       case 6:
-        return "Empty values are not allowed for any " + Constants.METADATA_TABLE_NAME + " column";
+        return "Empty values are not allowed for any " + MetadataTable.NAME + " column";
       case 7:
         return "Lock not held in zookeeper by writer";
       case 8:

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java Mon Jun 24 21:34:20 2013
@@ -38,6 +38,8 @@ import org.apache.accumulo.core.tabletse
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client;
 import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.util.AddressUtil;
 import org.apache.accumulo.server.util.MetadataTable;
@@ -47,9 +49,7 @@ import org.apache.accumulo.trace.instrum
 import org.apache.accumulo.trace.instrument.Trace;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.Trash;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
 import org.apache.zookeeper.KeeperException;
@@ -58,15 +58,13 @@ public class GarbageCollectWriteAheadLog
   private static final Logger log = Logger.getLogger(GarbageCollectWriteAheadLogs.class);
   
   private final Instance instance;
-  private final FileSystem fs;
+  private final VolumeManager fs;
   
-  private Trash trash;
+  private boolean useTrash;
   
-  GarbageCollectWriteAheadLogs(Instance instance, FileSystem fs, boolean noTrash) throws IOException {
+  GarbageCollectWriteAheadLogs(Instance instance, VolumeManager fs, boolean useTrash) throws IOException {
     this.instance = instance;
     this.fs = fs;
-    if (!noTrash)
-      this.trash = new Trash(fs, fs.getConf());
   }
   
   public void collect(GCStatus status) {
@@ -74,11 +72,11 @@ public class GarbageCollectWriteAheadLog
     Span span = Trace.start("scanServers");
     try {
       
-      Set<String> sortedWALogs = getSortedWALogs();
+      Set<Path> sortedWALogs = getSortedWALogs();
       
       status.currentLog.started = System.currentTimeMillis();
       
-      Map<String,String> fileToServerMap = new HashMap<String,String>();
+      Map<Path,String> fileToServerMap = new HashMap<Path,String>();
       int count = scanServers(fileToServerMap);
       long fileScanStop = System.currentTimeMillis();
       log.info(String.format("Fetched %d files from %d servers in %.2f seconds", fileToServerMap.size(), count,
@@ -100,7 +98,7 @@ public class GarbageCollectWriteAheadLog
       log.info(String.format("%d log entries scanned in %.2f seconds", count, (logEntryScanStop - fileScanStop) / 1000.));
       
       span = Trace.start("removeFiles");
-      Map<String,ArrayList<String>> serverToFileMap = mapServersToFiles(fileToServerMap);
+      Map<String,ArrayList<Path>> serverToFileMap = mapServersToFiles(fileToServerMap);
       
       count = removeFiles(serverToFileMap, sortedWALogs, status);
       
@@ -130,39 +128,36 @@ public class GarbageCollectWriteAheadLog
     }
   }
   
-  private int removeFiles(Map<String,ArrayList<String>> serverToFileMap, Set<String> sortedWALogs, final GCStatus status) {
+  private int removeFiles(Map<String,ArrayList<Path>> serverToFileMap, Set<Path> sortedWALogs, final GCStatus status) {
     AccumuloConfiguration conf = instance.getConfiguration();
-    for (Entry<String,ArrayList<String>> entry : serverToFileMap.entrySet()) {
-      if (entry.getKey().length() == 0) {
+    for (Entry<String,ArrayList<Path>> entry : serverToFileMap.entrySet()) {
+      if (entry.getKey().isEmpty()) {
         // old-style log entry, just remove it
-        for (String filename : entry.getValue()) {
-          log.debug("Removing old-style WAL " + entry.getValue());
+        for (Path path : entry.getValue()) {
+          log.debug("Removing old-style WAL " + path);
           try {
-            Path path = new Path(Constants.getWalDirectory(conf), filename);
-            if (trash == null || !trash.moveToTrash(path))
-              fs.delete(path, true);
+            if (!useTrash || !fs.moveToTrash(path))
+              fs.deleteRecursively(path);
             status.currentLog.deleted++;
           } catch (FileNotFoundException ex) {
             // ignored
           } catch (IOException ex) {
-            log.error("Unable to delete wal " + filename + ": " + ex);
+            log.error("Unable to delete wal " + path + ": " + ex);
           }
         }
       } else {
         InetSocketAddress address = AddressUtil.parseAddress(entry.getKey());
         if (!holdsLock(address)) {
-          Path serverPath = new Path(Constants.getWalDirectory(conf), entry.getKey());
-          for (String filename : entry.getValue()) {
-            log.debug("Removing WAL for offline server " + filename);
+          for (Path path : entry.getValue()) {
+            log.debug("Removing WAL for offline server " + path);
             try {
-              Path path = new Path(serverPath, filename);
-              if (trash == null || !trash.moveToTrash(path))
-                fs.delete(path, true);
+              if (!useTrash || !fs.moveToTrash(path))
+                fs.deleteRecursively(path);
               status.currentLog.deleted++;
             } catch (FileNotFoundException ex) {
               // ignored
             } catch (IOException ex) {
-              log.error("Unable to delete wal " + filename + ": " + ex);
+              log.error("Unable to delete wal " + path + ": " + ex);
             }
           }
           continue;
@@ -170,7 +165,7 @@ public class GarbageCollectWriteAheadLog
           Client tserver = null;
           try {
             tserver = ThriftUtil.getClient(new TabletClientService.Client.Factory(), address, conf);
-            tserver.removeLogs(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), entry.getValue());
+            tserver.removeLogs(Tracer.traceInfo(), SecurityConstants.getSystemCredentials(), paths2strings(entry.getValue()));
             log.debug("deleted " + entry.getValue() + " from " + entry.getKey());
             status.currentLog.deleted += entry.getValue().size();
           } catch (TException e) {
@@ -183,24 +178,21 @@ public class GarbageCollectWriteAheadLog
       }
     }
     
-    Path recoveryDir = new Path(Constants.getRecoveryDir(conf));
-    
-    for (String sortedWALog : sortedWALogs) {
-      log.debug("Removing sorted WAL " + sortedWALog);
-      Path swalog = new Path(recoveryDir, sortedWALog);
+    for (Path swalog : sortedWALogs) {
+      log.debug("Removing sorted WAL " + swalog);
       try {
-        if (trash == null || !trash.moveToTrash(swalog)) {
-          fs.delete(swalog, true);
+        if (!useTrash || !fs.moveToTrash(swalog)) {
+          fs.deleteRecursively(swalog);
         }
       } catch (FileNotFoundException ex) {
         // ignored
       } catch (IOException ioe) {
         try {
           if (fs.exists(swalog)) {
-            log.error("Unable to delete sorted walog " + sortedWALog + ": " + ioe);
+            log.error("Unable to delete sorted walog " + swalog + ": " + ioe);
           }
         } catch (IOException ex) {
-          log.error("Unable to check for the existence of " + sortedWALog, ex);
+          log.error("Unable to check for the existence of " + swalog, ex);
         }
       }
     }
@@ -208,30 +200,42 @@ public class GarbageCollectWriteAheadLog
     return 0;
   }
   
-  private static Map<String,ArrayList<String>> mapServersToFiles(Map<String,String> fileToServerMap) {
-    Map<String,ArrayList<String>> serverToFileMap = new HashMap<String,ArrayList<String>>();
-    for (Entry<String,String> fileServer : fileToServerMap.entrySet()) {
-      ArrayList<String> files = serverToFileMap.get(fileServer.getValue());
+  private List<String> paths2strings(ArrayList<Path> paths) {
+    List<String> result = new ArrayList<String>(paths.size());
+    for (Path path : paths)
+      result.add(path.toString());
+    return result;
+  }
+
+  private static Map<String,ArrayList<Path>> mapServersToFiles(Map<Path,String> fileToServerMap) {
+    Map<String,ArrayList<Path>> result = new HashMap<String,ArrayList<Path>>();
+    for (Entry<Path,String> fileServer : fileToServerMap.entrySet()) {
+      ArrayList<Path> files = result.get(fileServer.getValue());
       if (files == null) {
-        files = new ArrayList<String>();
-        serverToFileMap.put(fileServer.getValue(), files);
+        files = new ArrayList<Path>();
+        result.put(fileServer.getValue(), files);
       }
       files.add(fileServer.getKey());
     }
-    return serverToFileMap;
+    return result;
   }
   
-  private static int removeMetadataEntries(Map<String,String> fileToServerMap, Set<String> sortedWALogs, GCStatus status) throws IOException, KeeperException,
+  private static int removeMetadataEntries(Map<Path,String> fileToServerMap, Set<Path> sortedWALogs, GCStatus status) throws IOException, KeeperException,
       InterruptedException {
     int count = 0;
     Iterator<LogEntry> iterator = MetadataTable.getLogEntries(SecurityConstants.getSystemCredentials());
     while (iterator.hasNext()) {
       for (String filename : iterator.next().logSet) {
-        filename = filename.split("/", 2)[1];
-        if (fileToServerMap.remove(filename) != null)
+        Path path;
+        if (filename.contains(":"))
+          path = new Path(filename);
+        else
+          path = new Path(ServerConstants.getWalDirs()[0] + filename);
+        
+        if (fileToServerMap.remove(path) != null)
           status.currentLog.inUse++;
         
-        sortedWALogs.remove(filename);
+        sortedWALogs.remove(path);
         
         count++;
       }
@@ -239,54 +243,54 @@ public class GarbageCollectWriteAheadLog
     return count;
   }
   
-  private int scanServers(Map<String,String> fileToServerMap) throws Exception {
-    AccumuloConfiguration conf = instance.getConfiguration();
-    Path walRoot = new Path(Constants.getWalDirectory(conf));
-    for (FileStatus status : fs.listStatus(walRoot)) {
-      String name = status.getPath().getName();
-      if (status.isDir()) {
-        for (FileStatus file : fs.listStatus(new Path(walRoot, name))) {
-          if (isUUID(file.getPath().getName()))
-            fileToServerMap.put(file.getPath().getName(), name);
-          else {
-            log.info("Ignoring file " + file.getPath() + " because it doesn't look like a uuid");
+  private int scanServers(Map<Path,String> fileToServerMap) throws Exception {
+    Set<String> servers = new HashSet<String>();
+    for (String walDir : ServerConstants.getWalDirs()) {
+      Path walRoot = new Path(walDir);
+      FileStatus[] listing = fs.listStatus(walRoot);
+      if (listing == null)
+        continue;
+      for (FileStatus status : listing) {
+        String server = status.getPath().getName();
+        servers.add(server);
+        if (status.isDir()) {
+          for (FileStatus file : fs.listStatus(new Path(walRoot, server))) {
+            if (isUUID(file.getPath().getName()))
+              fileToServerMap.put(file.getPath(), server);
+            else {
+              log.info("Ignoring file " + file.getPath() + " because it doesn't look like a uuid");
+            }
           }
+        } else if (isUUID(server)) {
+          // old-style WAL are not under a directory
+          fileToServerMap.put(status.getPath(), "");
+        } else {
+          log.info("Ignoring file " + status.getPath() + " because it doesn't look like a uuid");
         }
-      } else if (isUUID(name)) {
-        // old-style WAL are not under a directory
-        fileToServerMap.put(name, "");
-      } else {
-        log.info("Ignoring file " + name + " because it doesn't look like a uuid");
       }
     }
-    
-    int count = 0;
-    return count;
+    return servers.size();
   }
   
-  private Set<String> getSortedWALogs() throws IOException {
-    AccumuloConfiguration conf = instance.getConfiguration();
-    Path recoveryDir = new Path(Constants.getRecoveryDir(conf));
-    
-    Set<String> sortedWALogs = new HashSet<String>();
+  private Set<Path> getSortedWALogs() throws IOException {
+    Set<Path> result = new HashSet<Path>();
     
-    if (fs.exists(recoveryDir)) {
-      for (FileStatus status : fs.listStatus(recoveryDir)) {
-        if (isUUID(status.getPath().getName())) {
-          sortedWALogs.add(status.getPath().getName());
-        } else {
-          log.debug("Ignoring file " + status.getPath() + " because it doesn't look like a uuid");
+    for (String dir : ServerConstants.getRecoveryDirs()) {
+      Path recoveryDir = new Path(dir);
+      
+      if (fs.exists(recoveryDir)) {
+        for (FileStatus status : fs.listStatus(recoveryDir)) {
+          if (isUUID(status.getPath().getName())) {
+            result.add(status.getPath());
+          } else {
+            log.debug("Ignoring file " + status.getPath() + " because it doesn't look like a uuid");
+          }
         }
       }
     }
-    
-    return sortedWALogs;
+    return result;
   }
   
-  /**
-   * @param name
-   * @return
-   */
   static private boolean isUUID(String name) {
     try {
       UUID.fromString(name);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java Mon Jun 24 21:34:20 2013
@@ -56,16 +56,16 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileUtil;
 import org.apache.accumulo.core.gc.thrift.GCMonitorService.Iface;
 import org.apache.accumulo.core.gc.thrift.GCMonitorService.Processor;
 import org.apache.accumulo.core.gc.thrift.GCStatus;
 import org.apache.accumulo.core.gc.thrift.GcCycleStats;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.ServerServices;
 import org.apache.accumulo.core.util.ServerServices.Service;
@@ -77,11 +77,11 @@ import org.apache.accumulo.server.Accumu
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.master.state.tables.TableManager;
 import org.apache.accumulo.server.security.SecurityConstants;
-import org.apache.accumulo.server.trace.TraceFileSystem;
 import org.apache.accumulo.server.util.Halt;
-import org.apache.accumulo.server.util.OfflineMetadataScanner;
 import org.apache.accumulo.server.util.TServerUtils;
 import org.apache.accumulo.server.util.TabletIterator;
 import org.apache.accumulo.server.zookeeper.ZooLock;
@@ -92,9 +92,7 @@ import org.apache.accumulo.trace.instrum
 import org.apache.accumulo.trace.instrument.thrift.TraceWrap;
 import org.apache.accumulo.trace.thrift.TInfo;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
@@ -125,8 +123,8 @@ public class SimpleGarbageCollector impl
   private TCredentials credentials;
   private long gcStartDelay;
   private boolean checkForBulkProcessingFiles;
-  private FileSystem fs;
-  private Trash trash = null;
+  private VolumeManager fs;
+  private boolean useTrash = true;
   private boolean safemode = false, offline = false, verbose = false;
   private String address = "localhost";
   private ZooLock lock;
@@ -143,7 +141,7 @@ public class SimpleGarbageCollector impl
     
     Instance instance = HdfsZooInstance.getInstance();
     ServerConfiguration serverConf = new ServerConfiguration(instance);
-    final FileSystem fs = FileUtil.getFileSystem(CachedConfiguration.getInstance(), serverConf.getConfiguration());
+    final VolumeManager fs = VolumeManagerImpl.get();
     Accumulo.init(fs, serverConf, "gc");
     String address = "localhost";
     SimpleGarbageCollector gc = new SimpleGarbageCollector();
@@ -182,8 +180,8 @@ public class SimpleGarbageCollector impl
     this.address = address;
   }
   
-  public void init(FileSystem fs, Instance instance, TCredentials credentials, boolean noTrash) throws IOException {
-    this.fs = TraceFileSystem.wrap(fs);
+  public void init(VolumeManager fs, Instance instance, TCredentials credentials, boolean noTrash) throws IOException {
+    this.fs = fs;
     this.credentials = credentials;
     this.instance = instance;
     
@@ -197,9 +195,7 @@ public class SimpleGarbageCollector impl
     log.info("verbose: " + verbose);
     log.info("memory threshold: " + CANDIDATE_MEMORY_PERCENTAGE + " of " + Runtime.getRuntime().maxMemory() + " bytes");
     log.info("delete threads: " + numDeleteThreads);
-    if (!noTrash) {
-      this.trash = new Trash(fs, fs.getConf());
-    }
+    useTrash = !noTrash;
   }
   
   private void run() {
@@ -299,7 +295,7 @@ public class SimpleGarbageCollector impl
       // Clean up any unused write-ahead logs
       Span waLogs = Trace.start("walogs");
       try {
-        GarbageCollectWriteAheadLogs walogCollector = new GarbageCollectWriteAheadLogs(instance, fs, trash == null);
+        GarbageCollectWriteAheadLogs walogCollector = new GarbageCollectWriteAheadLogs(instance, fs, useTrash);
         log.info("Beginning garbage collection of write-ahead logs");
         walogCollector.collect(status);
       } catch (Exception e) {
@@ -311,7 +307,7 @@ public class SimpleGarbageCollector impl
       // we just made a lot of changes to the !METADATA table: flush them out
       try {
         Connector connector = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials));
-        connector.tableOperations().compact(Constants.METADATA_TABLE_NAME, null, null, true, true);
+        connector.tableOperations().compact(MetadataTable.NAME, null, null, true, true);
       } catch (Exception e) {
         log.warn(e, e);
       }
@@ -329,10 +325,10 @@ public class SimpleGarbageCollector impl
   }
   
   private boolean moveToTrash(Path path) throws IOException {
-    if (trash == null)
+    if (!useTrash)
       return false;
     try {
-      return trash.moveToTrash(path);
+      return fs.moveToTrash(path);
     } catch (FileNotFoundException ex) {
       return false;
     }
@@ -364,20 +360,21 @@ public class SimpleGarbageCollector impl
       // if dir exist and is empty, then empty list is returned...
       // hadoop 1.0 will return null if the file doesn't exist
       // hadoop 2.0 will throw an exception if the file does not exist
-      FileStatus[] tabletDirs = null;
-      try {
-        tabletDirs = fs.listStatus(new Path(ServerConstants.getTablesDir() + "/" + delTableId));
-      } catch (FileNotFoundException ex) {
-        // ignored
-      }
-      
-      if (tabletDirs == null)
-        continue;
-      
-      if (tabletDirs.length == 0) {
-        Path p = new Path(ServerConstants.getTablesDir() + "/" + delTableId);
-        if (!moveToTrash(p))
-          fs.delete(p, false);
+      for (String dir : ServerConstants.getTablesDirs()) {
+        FileStatus[] tabletDirs = null;
+        try {
+          tabletDirs = fs.listStatus(new Path(dir + "/" + delTableId));
+        } catch (FileNotFoundException ex) {
+          // ignored
+        }
+        if (tabletDirs == null)
+          continue;
+        
+        if (tabletDirs.length == 0) {
+          Path p = new Path(dir + "/" + delTableId);
+          if (!moveToTrash(p))
+            fs.delete(p);
+        }
       }
     }
   }
@@ -437,10 +434,12 @@ public class SimpleGarbageCollector impl
       checkForBulkProcessingFiles = true;
       try {
         for (String validExtension : FileOperations.getValidExtensions()) {
-          for (FileStatus stat : fs.globStatus(new Path(ServerConstants.getTablesDir() + "/*/*/*." + validExtension))) {
-            String cand = stat.getPath().toUri().getPath();
-            if (!cand.contains(ServerConstants.getRootTabletDir())) {
-              candidates.add(cand.substring(ServerConstants.getTablesDir().length()));
+          for (String dir : ServerConstants.getTablesDirs()) {
+            for (FileStatus stat : fs.globStatus(new Path(dir + "/*/*/*." + validExtension))) {
+              String cand = stat.getPath().toUri().getPath();
+              if (cand.contains(ServerConstants.getRootTabletDir()))
+                continue;
+              candidates.add(cand.substring(dir.length()));
               log.debug("Offline candidate: " + cand);
             }
           }
@@ -453,13 +452,13 @@ public class SimpleGarbageCollector impl
     }
     
     checkForBulkProcessingFiles = false;
-    Range range = Constants.METADATA_DELETES_FOR_METADATA_KEYSPACE;
-    candidates.addAll(getBatch(Constants.METADATA_DELETE_FLAG_FOR_METADATA_PREFIX, range));
+    Range range = MetadataTable.DELETED_RANGE;
+    candidates.addAll(getBatch(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(), range));
     if (candidateMemExceeded)
       return candidates;
     
-    range = Constants.METADATA_DELETES_KEYSPACE;
-    candidates.addAll(getBatch(Constants.METADATA_DELETE_FLAG_PREFIX, range));
+    range = MetadataTable.DELETED_RANGE;
+    candidates.addAll(getBatch(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(), range));
     return candidates;
   }
   
@@ -475,8 +474,8 @@ public class SimpleGarbageCollector impl
       continueKey = null;
     }
     
-    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(
-        Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(MetadataTable.NAME,
+        Authorizations.EMPTY);
     scanner.setRange(range);
     List<String> result = new ArrayList<String>();
     // find candidates for deletion; chop off the prefix
@@ -508,15 +507,17 @@ public class SimpleGarbageCollector impl
     
     Scanner scanner;
     if (offline) {
-      try {
-        scanner = new OfflineMetadataScanner(instance.getConfiguration(), fs);
-      } catch (IOException e) {
-        throw new IllegalStateException("Unable to create offline metadata scanner", e);
-      }
+      // TODO
+      throw new RuntimeException("Offline scanner no longer supported");
+      // try {
+      // scanner = new OfflineMetadataScanner(instance.getConfiguration(), fs);
+      // } catch (IOException e) {
+      // throw new IllegalStateException("Unable to create offline metadata scanner", e);
+      // }
     } else {
       try {
         scanner = new IsolatedScanner(instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(
-            Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS));
+            MetadataTable.NAME, Authorizations.EMPTY));
       } catch (AccumuloSecurityException ex) {
         throw new AccumuloException(ex);
       } catch (TableNotFoundException ex) {
@@ -529,14 +530,14 @@ public class SimpleGarbageCollector impl
       
       log.debug("Checking for bulk processing flags");
       
-      scanner.setRange(Constants.METADATA_BLIP_KEYSPACE);
+      scanner.setRange(MetadataTable.BLIP_KEYSPACE);
       
       // WARNING: This block is IMPORTANT
       // You MUST REMOVE candidates that are in the same folder as a bulk
       // processing flag!
       
       for (Entry<Key,Value> entry : scanner) {
-        String blipPath = entry.getKey().getRow().toString().substring(Constants.METADATA_BLIP_FLAG_PREFIX.length());
+        String blipPath = entry.getKey().getRow().toString().substring(MetadataTable.BLIP_FLAG_PREFIX.length());
         Iterator<String> tailIter = candidates.tailSet(blipPath).iterator();
         int count = 0;
         while (tailIter.hasNext()) {
@@ -557,26 +558,30 @@ public class SimpleGarbageCollector impl
     // skip candidates that are still in use in the file column family in
     // the metadata table
     scanner.clearColumns();
-    scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(Constants.METADATA_SCANFILE_COLUMN_FAMILY);
-    Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
-    
-    TabletIterator tabletIterator = new TabletIterator(scanner, Constants.METADATA_KEYSPACE, false, true);
+    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+    scanner.fetchColumnFamily(MetadataTable.SCANFILE_COLUMN_FAMILY);
+    MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
+    TabletIterator tabletIterator = new TabletIterator(scanner, MetadataTable.KEYSPACE, false, true);
     
     while (tabletIterator.hasNext()) {
       Map<Key,Value> tabletKeyValues = tabletIterator.next();
       
       for (Entry<Key,Value> entry : tabletKeyValues.entrySet()) {
-        if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)
-            || entry.getKey().getColumnFamily().equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
+        if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)
+            || entry.getKey().getColumnFamily().equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
           
           String cf = entry.getKey().getColumnQualifier().toString();
-          String delete;
-          if (cf.startsWith("../")) {
-            delete = cf.substring(2);
-          } else {
-            String table = new String(KeyExtent.tableOfMetadataRow(entry.getKey().getRow()));
-            delete = "/" + table + cf;
+          String delete = cf;
+          if (!cf.contains(":")) {
+            if (cf.startsWith("../")) {
+              delete = cf.substring(2);
+            } else {
+              String table = new String(KeyExtent.tableOfMetadataRow(entry.getKey().getRow()));
+              if (cf.startsWith("/"))
+                delete = "/" + table + cf;
+              else
+                delete = "/" + table + "/" + cf;
+            }
           }
           // WARNING: This line is EXTREMELY IMPORTANT.
           // You MUST REMOVE candidates that are still in use
@@ -586,7 +591,7 @@ public class SimpleGarbageCollector impl
           String path = delete.substring(0, delete.lastIndexOf('/'));
           if (candidates.remove(path))
             log.debug("Candidate was still in use in the METADATA table: " + path);
-        } else if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
+        } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
           String table = new String(KeyExtent.tableOfMetadataRow(entry.getKey().getRow()));
           String delete = "/" + table + entry.getValue().toString();
           if (candidates.remove(delete))
@@ -597,15 +602,15 @@ public class SimpleGarbageCollector impl
     }
   }
   
-  final static String METADATA_TABLE_DIR = "/" + Constants.METADATA_TABLE_ID;
+  final static String METADATA_TABLE_DIR = "/" + MetadataTable.ID;
   
   private static void putMarkerDeleteMutation(final String delete, final BatchWriter writer, final BatchWriter rootWriter) throws MutationsRejectedException {
-    if (delete.startsWith(METADATA_TABLE_DIR)) {
-      Mutation m = new Mutation(new Text(Constants.METADATA_DELETE_FLAG_FOR_METADATA_PREFIX + delete));
+    if (delete.contains(METADATA_TABLE_DIR)) {
+      Mutation m = new Mutation(new Text(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString() + delete));
       m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
       rootWriter.addMutation(m);
     } else {
-      Mutation m = new Mutation(new Text(Constants.METADATA_DELETE_FLAG_PREFIX + delete));
+      Mutation m = new Mutation(new Text(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString() + delete));
       m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
       writer.addMutation(m);
     }
@@ -623,10 +628,10 @@ public class SimpleGarbageCollector impl
       Connector c;
       try {
         c = instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
-        writer = c.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
-        rootWriter = c.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+        writer = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+        rootWriter = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
       } catch (Exception e) {
-        log.error("Unable to create writer to remove file from the " + Constants.METADATA_TABLE_NAME + " table", e);
+        log.error("Unable to create writer to remove file from the " + MetadataTable.NAME + " table", e);
       }
     }
     // when deleting a dir and all files in that dir, only need to delete the dir
@@ -666,26 +671,29 @@ public class SimpleGarbageCollector impl
         public void run() {
           boolean removeFlag;
           
-          String fullPath = ServerConstants.getTablesDir() + delete;
-          log.debug("Deleting " + fullPath);
           try {
+            Path fullPath;
             
-            Path p = new Path(fullPath);
+            if (delete.contains(":"))
+              fullPath = new Path(delete);
+            else
+              fullPath = fs.getFullPath(ServerConstants.getTablesDirs(), delete);
+            log.debug("Deleting " + fullPath);
             
-            if (moveToTrash(p) || fs.delete(p, true)) {
+            if (moveToTrash(fullPath) || fs.deleteRecursively(fullPath)) {
               // delete succeeded, still want to delete
               removeFlag = true;
               synchronized (SimpleGarbageCollector.this) {
                 ++status.current.deleted;
               }
-            } else if (fs.exists(p)) {
+            } else if (fs.exists(fullPath)) {
               // leave the entry in the METADATA table; we'll try again
               // later
               removeFlag = false;
               synchronized (SimpleGarbageCollector.this) {
                 ++status.current.errors;
               }
-              log.warn("File exists, but was not deleted for an unknown reason: " + p);
+              log.warn("File exists, but was not deleted for an unknown reason: " + fullPath);
             } else {
               // this failure, we still want to remove the METADATA table
               // entry
@@ -695,14 +703,14 @@ public class SimpleGarbageCollector impl
               }
               String parts[] = delete.split("/");
               if (parts.length > 2) {
-                String tableId = parts[1];
-                String tabletDir = parts[2];
+                String tableId = parts[parts.length - 3];
+                String tabletDir = parts[parts.length - 2];
                 TableManager.getInstance().updateTableStateCache(tableId);
                 TableState tableState = TableManager.getInstance().getTableState(tableId);
                 if (tableState != null && tableState != TableState.DELETING) {
                   // clone directories don't always exist
                   if (!tabletDir.startsWith("c-"))
-                    log.warn("File doesn't exist: " + p);
+                    log.warn("File doesn't exist: " + fullPath);
                 }
               } else {
                 log.warn("Very strange path name: " + delete);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java Mon Jun 24 21:34:20 2013
@@ -27,6 +27,7 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
 import org.apache.log4j.Logger;
@@ -47,7 +48,7 @@ public class MetadataBulkLoadFilter exte
   
   @Override
   public boolean accept(Key k, Value v) {
-    if (!k.isDeleted() && k.compareColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY) == 0) {
+    if (!k.isDeleted() && k.compareColumnFamily(MetadataTable.BULKFILE_COLUMN_FAMILY) == 0) {
       long txid = Long.valueOf(v.toString());
       
       Status status = bulkTxStatusCache.get(txid);

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/logger/LogReader.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/logger/LogReader.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/logger/LogReader.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/logger/LogReader.java Mon Jun 24 21:34:20 2013
@@ -30,15 +30,11 @@ import java.util.regex.Pattern;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.file.FileUtil;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.tabletserver.log.DfsLogger;
 import org.apache.accumulo.server.tabletserver.log.MultiReader;
-import org.apache.accumulo.server.trace.TraceFileSystem;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 
@@ -70,9 +66,7 @@ public class LogReader {
   public static void main(String[] args) throws IOException {
     Opts opts = new Opts();
     opts.parseArgs(LogReader.class.getName(), args);
-    Configuration conf = CachedConfiguration.getInstance();
-    FileSystem fs = TraceFileSystem.wrap(FileUtil.getFileSystem(conf, ServerConfiguration.getSiteConfiguration()));
-    FileSystem local = TraceFileSystem.wrap(FileSystem.getLocal(conf));
+    VolumeManager fs = VolumeManagerImpl.get();
     
     Matcher rowMatcher = null;
     KeyExtent ke = null;
@@ -117,25 +111,9 @@ public class LogReader {
         } finally {
           f.close();
         }
-      } else if (local.isFile(path)) {
-        // read log entries from a simple file
-        FSDataInputStream f = DfsLogger.readHeader(fs, path, meta);
-        try {
-          while (true) {
-            try {
-              key.readFields(f);
-              value.readFields(f);
-            } catch (EOFException ex) {
-              break;
-            }
-            printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
-          }
-        } finally {
-          f.close();
-        }
       } else {
         // read the log entries sorted in a map file
-        MultiReader input = new MultiReader(fs, conf, file);
+        MultiReader input = new MultiReader(fs, path);
         while (input.next(key, value)) {
           printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
         }

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/Master.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/Master.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/Master.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/Master.java Mon Jun 24 21:34:20 2013
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.server.master;
 
-
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
@@ -58,7 +57,6 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.data.thrift.TKeyExtent;
-import org.apache.accumulo.core.file.FileUtil;
 import org.apache.accumulo.core.iterators.IteratorUtil;
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.master.thrift.MasterClientService;
@@ -71,11 +69,12 @@ import org.apache.accumulo.core.master.t
 import org.apache.accumulo.core.master.thrift.TabletLoadState;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.master.thrift.TabletSplit;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Daemon;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.AgeOffStore;
@@ -87,8 +86,11 @@ import org.apache.accumulo.fate.zookeepe
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.Accumulo;
+import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
 import org.apache.accumulo.server.master.balancer.DefaultLoadBalancer;
 import org.apache.accumulo.server.master.balancer.TabletBalancer;
@@ -126,7 +128,6 @@ import org.apache.accumulo.server.monito
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.trace.TraceFileSystem;
 import org.apache.accumulo.server.util.AddressUtil;
 import org.apache.accumulo.server.util.DefaultMap;
 import org.apache.accumulo.server.util.Halt;
@@ -141,7 +142,6 @@ import org.apache.accumulo.server.zookee
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 import org.apache.accumulo.trace.instrument.thrift.TraceWrap;
 import org.apache.accumulo.trace.thrift.TInfo;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
@@ -165,17 +165,17 @@ public class Master implements LiveTServ
   final static Logger log = Logger.getLogger(Master.class);
   
   final private static int ONE_SECOND = 1000;
-  final private static Text METADATA_TABLE_ID = new Text(Constants.METADATA_TABLE_ID);
+  final private static Text METADATA_TABLE_ID = new Text(MetadataTable.ID);
   final static long TIME_TO_WAIT_BETWEEN_SCANS = 60 * ONE_SECOND;
   final private static long TIME_BETWEEN_MIGRATION_CLEANUPS = 5 * 60 * ONE_SECOND;
   final static long WAIT_BETWEEN_ERRORS = ONE_SECOND;
   final private static long DEFAULT_WAIT_FOR_WATCHER = 10 * ONE_SECOND;
-  final private static int MAX_CLEANUP_WAIT_TIME = 1000;
-  final private static int TIME_TO_WAIT_BETWEEN_LOCK_CHECKS = 1000;
-  final static int MAX_TSERVER_WORK_CHUNK = 5000;
+  final private static int MAX_CLEANUP_WAIT_TIME = ONE_SECOND;
+  final private static int TIME_TO_WAIT_BETWEEN_LOCK_CHECKS = ONE_SECOND;
+  final static int MAX_TSERVER_WORK_CHUNK = 5 * ONE_SECOND;
   final private static int MAX_BAD_STATUS_COUNT = 3;
   
-  final private FileSystem fs;
+  final VolumeManager fs;
   final private Instance instance;
   final private String hostname;
   final LiveTServerSet tserverSet;
@@ -196,8 +196,7 @@ public class Master implements LiveTServ
   
   private Fate<Master> fate;
   
-  volatile SortedMap<TServerInstance,TabletServerStatus> tserverStatus = Collections
-      .unmodifiableSortedMap(new TreeMap<TServerInstance,TabletServerStatus>());
+  volatile SortedMap<TServerInstance,TabletServerStatus> tserverStatus = Collections.unmodifiableSortedMap(new TreeMap<TServerInstance,TabletServerStatus>());
   
   private final Set<String> recoveriesInProgress = Collections.synchronizedSet(new HashSet<String>());
   
@@ -255,7 +254,7 @@ public class Master implements LiveTServ
   }
   
   private void upgradeZookeeper() {
-    if (Accumulo.getAccumuloPersistentVersion(fs) == Constants.PREV_DATA_VERSION) {
+    if (Accumulo.getAccumuloPersistentVersion(fs) == ServerConstants.PREV_DATA_VERSION) {
       try {
         log.info("Upgrading zookeeper");
         
@@ -283,7 +282,7 @@ public class Master implements LiveTServ
   private final ServerConfiguration serverConfig;
   
   private void upgradeMetadata() {
-    if (Accumulo.getAccumuloPersistentVersion(fs) == Constants.PREV_DATA_VERSION) {
+    if (Accumulo.getAccumuloPersistentVersion(fs) == ServerConstants.PREV_DATA_VERSION) {
       if (upgradeMetadataRunning.compareAndSet(false, true)) {
         Runnable upgradeTask = new Runnable() {
           @Override
@@ -329,7 +328,7 @@ public class Master implements LiveTServ
   }
   
   private int nonMetaDataTabletsAssignedOrHosted() {
-    return totalAssignedOrHosted() - assignedOrHosted(new Text(Constants.METADATA_TABLE_ID));
+    return totalAssignedOrHosted() - assignedOrHosted(new Text(MetadataTable.ID)) - assignedOrHosted(new Text(RootTable.ID));
   }
   
   private int notHosted() {
@@ -345,7 +344,7 @@ public class Master implements LiveTServ
   // The number of unassigned tablets that should be assigned: displayed on the monitor page
   private int displayUnassigned() {
     int result = 0;
-    Text meta = new Text(Constants.METADATA_TABLE_ID);
+    Text meta = new Text(MetadataTable.ID);
     switch (getMasterState()) {
       case NORMAL:
         // Count offline tablets for online tables
@@ -380,8 +379,8 @@ public class Master implements LiveTServ
   }
   
   private void checkNotMetadataTable(String tableName, TableOperation operation) throws ThriftTableOperationException {
-    if (tableName.compareTo(Constants.METADATA_TABLE_NAME) == 0) {
-      String why = "Table names cannot be == " + Constants.METADATA_TABLE_NAME;
+    if (tableName.compareTo(MetadataTable.NAME) == 0) {
+      String why = "Table names cannot be == " + MetadataTable.NAME;
       log.warn(why);
       throw new ThriftTableOperationException(null, tableName, operation, TableOperationExceptionType.OTHER, why);
     }
@@ -434,10 +433,10 @@ public class Master implements LiveTServ
     return instance;
   }
   
-  public Master(ServerConfiguration config, FileSystem fs, String hostname) throws IOException {
+  public Master(ServerConfiguration config, VolumeManager fs, String hostname) throws IOException {
     this.serverConfig = config;
     this.instance = config.getInstance();
-    this.fs = TraceFileSystem.wrap(fs);
+    this.fs = fs;
     this.hostname = hostname;
     
     AccumuloConfiguration aconf = serverConfig.getConfiguration();
@@ -527,11 +526,11 @@ public class Master implements LiveTServ
         
         try {
           Connector conn = getConnector();
-          Scanner scanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS));
-          Constants.METADATA_FLUSH_COLUMN.fetch(scanner);
-          Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
-          scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
-          scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
+          Scanner scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+          MetadataTable.FLUSH_COLUMN.fetch(scanner);
+          MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
+          scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+          scanner.fetchColumnFamily(MetadataTable.LOG_COLUMN_FAMILY);
           scanner.setRange(new KeyExtent(new Text(tableId), null, ByteBufferUtil.toText(startRow)).toMetadataRange());
           
           RowIterator ri = new RowIterator(scanner);
@@ -554,14 +553,14 @@ public class Master implements LiveTServ
               entry = row.next();
               Key key = entry.getKey();
               
-              if (Constants.METADATA_FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
+              if (MetadataTable.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
                 tabletFlushID = Long.parseLong(entry.getValue().toString());
               }
               
-              if (Constants.METADATA_LOG_COLUMN_FAMILY.equals(key.getColumnFamily()))
+              if (MetadataTable.LOG_COLUMN_FAMILY.equals(key.getColumnFamily()))
                 logs++;
               
-              if (Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY.equals(key.getColumnFamily())) {
+              if (MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY.equals(key.getColumnFamily())) {
                 online = true;
                 server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
               }
@@ -591,9 +590,9 @@ public class Master implements LiveTServ
             throw new ThriftTableOperationException(tableId, null, TableOperation.FLUSH, TableOperationExceptionType.NOTFOUND, null);
           
         } catch (AccumuloException e) {
-          log.debug("Failed to scan " + Constants.METADATA_TABLE_NAME + " table to wait for flush " + tableId, e);
+          log.debug("Failed to scan " + MetadataTable.NAME + " table to wait for flush " + tableId, e);
         } catch (TabletDeletedException tde) {
-          log.debug("Failed to scan " + Constants.METADATA_TABLE_NAME + " table to wait for flush " + tableId, tde);
+          log.debug("Failed to scan " + MetadataTable.NAME + " table to wait for flush " + tableId, tde);
         } catch (AccumuloSecurityException e) {
           log.warn(e.getMessage(), e);
           throw new ThriftSecurityException();
@@ -900,13 +899,9 @@ public class Master implements LiveTServ
           Text startRow = ByteBufferUtil.toText(arguments.get(1));
           Text endRow = ByteBufferUtil.toText(arguments.get(2));
           final String tableId = checkTableId(tableName, TableOperation.MERGE);
-          if (tableName.equals(Constants.METADATA_TABLE_NAME)) {
-            if (startRow.compareTo(new Text("0")) < 0) {
-              startRow = new Text("0");
-              if (endRow.getLength() != 0 && endRow.compareTo(startRow) < 0)
-                throw new ThriftTableOperationException(null, tableName, TableOperation.MERGE, TableOperationExceptionType.OTHER,
-                    "end-row specification is in the root tablet, which cannot be merged or split");
-            }
+          if (tableId.equals(RootTable.ID)) {
+            throw new ThriftTableOperationException(null, tableName, TableOperation.MERGE, TableOperationExceptionType.OTHER,
+                "cannot merge or split the root table");
           }
           log.debug("Creating merge op: " + tableId + " " + startRow + " " + endRow);
           
@@ -1061,7 +1056,7 @@ public class Master implements LiveTServ
   
   public void setMergeState(MergeInfo info, MergeState state) throws IOException, KeeperException, InterruptedException {
     synchronized (mergeLock) {
-      String path = ZooUtil.getRoot(instance.getInstanceID()) + Constants.ZTABLES + "/" + info.getRange().getTableId().toString() + "/merge";
+      String path = ZooUtil.getRoot(instance.getInstanceID()) + Constants.ZTABLES + "/" + info.getExtent().getTableId().toString() + "/merge";
       info.setState(state);
       if (state.equals(MergeState.NONE)) {
         ZooReaderWriter.getInstance().recursiveDelete(path, NodeMissingPolicy.SKIP);
@@ -1077,7 +1072,7 @@ public class Master implements LiveTServ
       }
       mergeLock.notifyAll();
     }
-    nextEvent.event("Merge state of %s set to %s", info.getRange(), state);
+    nextEvent.event("Merge state of %s set to %s", info.getExtent(), state);
   }
   
   public void clearMergeState(Text tableId) throws IOException, KeeperException, InterruptedException {
@@ -1163,9 +1158,9 @@ public class Master implements LiveTServ
         return TabletGoalState.UNASSIGNED;
       case STOP:
         return TabletGoalState.UNASSIGNED;
+      default:
+        throw new IllegalStateException("Unknown Master State");
     }
-    // unreachable
-    return TabletGoalState.HOSTED;
   }
   
   TabletGoalState getTableGoalState(KeyExtent extent) {
@@ -1192,7 +1187,7 @@ public class Master implements LiveTServ
         return TabletGoalState.UNASSIGNED;
       }
       // Handle merge transitions
-      if (mergeInfo.getRange() != null) {
+      if (mergeInfo.getExtent() != null) {
         log.debug("mergeInfo overlaps: " + extent + " " + mergeInfo.overlaps(extent));
         if (mergeInfo.overlaps(extent)) {
           switch (mergeInfo.getState()) {
@@ -1254,8 +1249,8 @@ public class Master implements LiveTServ
     // remove any migrating tablets that no longer exist.
     private void cleanupMutations() throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
       Connector connector = getConnector();
-      Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
-      Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
+      Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+      MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
       Set<KeyExtent> found = new HashSet<KeyExtent>();
       for (Entry<Key,Value> entry : scanner) {
         KeyExtent extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
@@ -1302,6 +1297,7 @@ public class Master implements LiveTServ
                   break;
                 case UNLOAD_METADATA_TABLETS:
                   count = assignedOrHosted(METADATA_TABLE_ID);
+                  count += assignedOrHosted(new Text(RootTable.ID));
                   log.debug(String.format("There are %d metadata tablets assigned or hosted", count));
                   // Assumes last tablet hosted is the root tablet;
                   // it's possible
@@ -1311,6 +1307,7 @@ public class Master implements LiveTServ
                   break;
                 case UNLOAD_ROOT_TABLET:
                   count = assignedOrHosted(METADATA_TABLE_ID);
+                  count += assignedOrHosted(new Text(RootTable.ID));
                   if (count > 0)
                     log.debug(String.format("The root tablet is still assigned or hosted"));
                   if (count == 0) {
@@ -1632,7 +1629,7 @@ public class Master implements LiveTServ
     try {
       SecurityUtil.serverLogin();
       
-      FileSystem fs = FileUtil.getFileSystem(CachedConfiguration.getInstance(), ServerConfiguration.getSiteConfiguration());
+      VolumeManager fs = VolumeManagerImpl.get();
       String hostname = Accumulo.getLocalAddress(args);
       Instance instance = HdfsZooInstance.getInstance();
       ServerConfiguration conf = new ServerConfiguration(instance);
@@ -1728,7 +1725,9 @@ public class Master implements LiveTServ
     Set<String> result = new HashSet<String>();
     if (getMasterState() != MasterState.NORMAL) {
       if (getMasterState() != MasterState.UNLOAD_METADATA_TABLETS)
-        result.add(Constants.METADATA_TABLE_ID);
+        result.add(MetadataTable.ID);
+      if (getMasterState() != MasterState.UNLOAD_ROOT_TABLET)
+        result.add(RootTable.ID);
       return result;
     }
     TableManager manager = TableManager.getInstance();
@@ -1784,7 +1783,7 @@ public class Master implements LiveTServ
     return serverConfig;
   }
   
-  public FileSystem getFileSystem() {
+  public VolumeManager getFileSystem() {
     return this.fs;
   }
   

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java Mon Jun 24 21:34:20 2013
@@ -46,9 +46,12 @@ import org.apache.accumulo.core.data.Par
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
 import org.apache.accumulo.core.util.Daemon;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
 import org.apache.accumulo.server.master.Master.TabletGoalState;
 import org.apache.accumulo.server.master.state.Assignment;
@@ -126,7 +129,8 @@ class TabletGroupWatcher extends Daemon 
         
         int[] counts = new int[TabletState.values().length];
         stats.begin();
-        // Walk through the tablets in our store, and work tablets towards their goal
+        // Walk through the tablets in our store, and work tablets
+        // towards their goal
         for (TabletLocationState tls : store) {
           if (tls == null) {
             continue;
@@ -282,7 +286,7 @@ class TabletGroupWatcher extends Daemon 
     if (!state.equals(TabletState.HOSTED))
       return;
     // Does this extent cover the end points of the delete?
-    KeyExtent range = info.getRange();
+    KeyExtent range = info.getExtent();
     if (tls.extent.overlaps(range)) {
       for (Text splitPoint : new Text[] {range.getPrevEndRow(), range.getEndRow()}) {
         if (splitPoint == null)
@@ -365,72 +369,73 @@ class TabletGroupWatcher extends Daemon 
           }
         }
       } catch (Exception ex) {
-        Master.log.error("Unable to update merge state for merge " + stats.getMergeInfo().getRange(), ex);
+        Master.log.error("Unable to update merge state for merge " + stats.getMergeInfo().getExtent(), ex);
       }
     }
   }
   
   private void deleteTablets(MergeInfo info) throws AccumuloException {
-    KeyExtent range = info.getRange();
-    Master.log.debug("Deleting tablets for " + range);
+    KeyExtent extent = info.getExtent();
+    String targetSystemTable = extent.isMeta() ? RootTable.NAME : MetadataTable.NAME;
+    Master.log.debug("Deleting tablets for " + extent);
     char timeType = '\0';
     KeyExtent followingTablet = null;
-    if (range.getEndRow() != null) {
-      Key nextExtent = new Key(range.getEndRow()).followingKey(PartialKey.ROW);
-      followingTablet = getHighTablet(new KeyExtent(range.getTableId(), nextExtent.getRow(), range.getEndRow()));
+    if (extent.getEndRow() != null) {
+      Key nextExtent = new Key(extent.getEndRow()).followingKey(PartialKey.ROW);
+      followingTablet = getHighTablet(new KeyExtent(extent.getTableId(), nextExtent.getRow(), extent.getEndRow()));
       Master.log.debug("Found following tablet " + followingTablet);
     }
     try {
       Connector conn = this.master.getConnector();
-      Text start = range.getPrevEndRow();
+      Text start = extent.getPrevEndRow();
       if (start == null) {
         start = new Text();
       }
-      Master.log.debug("Making file deletion entries for " + range);
-      Range deleteRange = new Range(KeyExtent.getMetadataEntry(range.getTableId(), start), false, KeyExtent.getMetadataEntry(range.getTableId(),
-          range.getEndRow()), true);
-      Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+      Master.log.debug("Making file deletion entries for " + extent);
+      Range deleteRange = new Range(KeyExtent.getMetadataEntry(extent.getTableId(), start), false, KeyExtent.getMetadataEntry(extent.getTableId(),
+          extent.getEndRow()), true);
+      Scanner scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
       scanner.setRange(deleteRange);
-      Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
-      Constants.METADATA_TIME_COLUMN.fetch(scanner);
-      scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
-      scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
-      Set<String> datafiles = new TreeSet<String>();
+      MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
+      MetadataTable.TIME_COLUMN.fetch(scanner);
+      scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+      scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+      Set<FileRef> datafiles = new TreeSet<FileRef>();
       for (Entry<Key,Value> entry : scanner) {
         Key key = entry.getKey();
-        if (key.compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
-          datafiles.add(key.getColumnQualifier().toString());
+        if (key.compareColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY) == 0) {
+          datafiles.add(new FileRef(this.master.fs, key));
           if (datafiles.size() > 1000) {
-            MetadataTable.addDeleteEntries(range, datafiles, SecurityConstants.getSystemCredentials());
+            MetadataTable.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
             datafiles.clear();
           }
-        } else if (Constants.METADATA_TIME_COLUMN.hasColumns(key)) {
+        } else if (MetadataTable.TIME_COLUMN.hasColumns(key)) {
           timeType = entry.getValue().toString().charAt(0);
-        } else if (key.compareColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
+        } else if (key.compareColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
           throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
-        } else if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
-          datafiles.add(entry.getValue().toString());
+        } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(key)) {
+          datafiles.add(new FileRef(this.master.fs, key));
           if (datafiles.size() > 1000) {
-            MetadataTable.addDeleteEntries(range, datafiles, SecurityConstants.getSystemCredentials());
+            MetadataTable.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
             datafiles.clear();
           }
         }
       }
-      MetadataTable.addDeleteEntries(range, datafiles, SecurityConstants.getSystemCredentials());
-      BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+      MetadataTable.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
+      BatchWriter bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
       try {
-        deleteTablets(deleteRange, bw, conn);
+        deleteTablets(info, deleteRange, bw, conn);
       } finally {
         bw.close();
       }
       
       if (followingTablet != null) {
-        Master.log.debug("Updating prevRow of " + followingTablet + " to " + range.getPrevEndRow());
-        bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+        Master.log.debug("Updating prevRow of " + followingTablet + " to " + extent.getPrevEndRow());
+        bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
         try {
           Mutation m = new Mutation(followingTablet.getMetadataEntry());
-          Constants.METADATA_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(range.getPrevEndRow()));
-          Constants.METADATA_CHOPPED_COLUMN.putDelete(m);
+          MetadataTable.PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(extent.getPrevEndRow()));
+          MetadataTable.CHOPPED_COLUMN.putDelete(m);
           bw.addMutation(m);
           bw.flush();
         } finally {
@@ -438,8 +443,8 @@ class TabletGroupWatcher extends Daemon 
         }
       } else {
         // Recreate the default tablet to hold the end of the table
-        Master.log.debug("Recreating the last tablet to point to " + range.getPrevEndRow());
-        MetadataTable.addTablet(new KeyExtent(range.getTableId(), null, range.getPrevEndRow()), Constants.DEFAULT_TABLET_LOCATION,
+        Master.log.debug("Recreating the last tablet to point to " + extent.getPrevEndRow());
+        MetadataTable.addTablet(new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), Constants.DEFAULT_TABLET_LOCATION,
             SecurityConstants.getSystemCredentials(), timeType, this.master.masterLock);
       }
     } catch (Exception ex) {
@@ -448,7 +453,7 @@ class TabletGroupWatcher extends Daemon 
   }
   
   private void mergeMetadataRecords(MergeInfo info) throws AccumuloException {
-    KeyExtent range = info.getRange();
+    KeyExtent range = info.getExtent();
     Master.log.debug("Merging metadata for " + range);
     KeyExtent stop = getHighTablet(range);
     Master.log.debug("Highest tablet is " + stop);
@@ -459,56 +464,54 @@ class TabletGroupWatcher extends Daemon 
       start = new Text();
     }
     Range scanRange = new Range(KeyExtent.getMetadataEntry(range.getTableId(), start), false, stopRow, false);
-    if (range.isMeta())
-      scanRange = scanRange.clip(Constants.METADATA_ROOT_TABLET_KEYSPACE);
+    String targetSystemTable = MetadataTable.NAME;
+    if (range.isMeta()) {
+      targetSystemTable = RootTable.NAME;
+    }
     
     BatchWriter bw = null;
     try {
       long fileCount = 0;
       Connector conn = this.master.getConnector();
       // Make file entries in highest tablet
-      bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
-      Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+      bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
+      Scanner scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
       scanner.setRange(scanRange);
-      Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
-      Constants.METADATA_TIME_COLUMN.fetch(scanner);
-      Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
-      scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
+      MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+      MetadataTable.TIME_COLUMN.fetch(scanner);
+      MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
+      scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
       Mutation m = new Mutation(stopRow);
       String maxLogicalTime = null;
       for (Entry<Key,Value> entry : scanner) {
         Key key = entry.getKey();
         Value value = entry.getValue();
-        if (key.getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+        if (key.getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
           m.put(key.getColumnFamily(), key.getColumnQualifier(), value);
           fileCount++;
-        } else if (Constants.METADATA_PREV_ROW_COLUMN.hasColumns(key) && firstPrevRowValue == null) {
+        } else if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key) && firstPrevRowValue == null) {
           Master.log.debug("prevRow entry for lowest tablet is " + value);
           firstPrevRowValue = new Value(value);
-        } else if (Constants.METADATA_TIME_COLUMN.hasColumns(key)) {
+        } else if (MetadataTable.TIME_COLUMN.hasColumns(key)) {
           maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, value.toString());
-        } else if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
-          if (!range.isMeta())
-            bw.addMutation(MetadataTable.createDeleteMutation(range.getTableId().toString(), entry.getValue().toString()));
+        } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(key)) {
+          bw.addMutation(MetadataTable.createDeleteMutation(range.getTableId().toString(), entry.getValue().toString()));
         }
       }
       
       // read the logical time from the last tablet in the merge range, it is not included in
       // the loop above
-      scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
-      Range last = new Range(stopRow);
-      if (range.isMeta())
-        last = last.clip(Constants.METADATA_ROOT_TABLET_KEYSPACE);
-      scanner.setRange(last);
-      Constants.METADATA_TIME_COLUMN.fetch(scanner);
+      scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
+      scanner.setRange(new Range(stopRow));
+      MetadataTable.TIME_COLUMN.fetch(scanner);
       for (Entry<Key,Value> entry : scanner) {
-        if (Constants.METADATA_TIME_COLUMN.hasColumns(entry.getKey())) {
+        if (MetadataTable.TIME_COLUMN.hasColumns(entry.getKey())) {
           maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, entry.getValue().toString());
         }
       }
       
       if (maxLogicalTime != null)
-        Constants.METADATA_TIME_COLUMN.put(m, new Value(maxLogicalTime.getBytes()));
+        MetadataTable.TIME_COLUMN.put(m, new Value(maxLogicalTime.getBytes()));
       
       if (!m.getUpdates().isEmpty()) {
         bw.addMutation(m);
@@ -529,11 +532,11 @@ class TabletGroupWatcher extends Daemon 
       bw.addMutation(updatePrevRow);
       bw.flush();
       
-      deleteTablets(scanRange, bw, conn);
+      deleteTablets(info, scanRange, bw, conn);
       
       // Clean-up the last chopped marker
       m = new Mutation(stopRow);
-      Constants.METADATA_CHOPPED_COLUMN.putDelete(m);
+      MetadataTable.CHOPPED_COLUMN.putDelete(m);
       bw.addMutation(m);
       bw.flush();
       
@@ -549,14 +552,14 @@ class TabletGroupWatcher extends Daemon 
     }
   }
   
-  private void deleteTablets(Range scanRange, BatchWriter bw, Connector conn) throws TableNotFoundException, MutationsRejectedException {
+  private void deleteTablets(MergeInfo info, Range scanRange, BatchWriter bw, Connector conn) throws TableNotFoundException, MutationsRejectedException {
     Scanner scanner;
     Mutation m;
     // Delete everything in the other tablets
     // group all deletes into tablet into one mutation, this makes tablets
     // either disappear entirely or not all.. this is important for the case
     // where the process terminates in the loop below...
-    scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    scanner = conn.createScanner(info.getExtent().isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY);
     Master.log.debug("Deleting range " + scanRange);
     scanner.setRange(scanRange);
     RowIterator rowIter = new RowIterator(scanner);
@@ -582,8 +585,8 @@ class TabletGroupWatcher extends Daemon 
   private KeyExtent getHighTablet(KeyExtent range) throws AccumuloException {
     try {
       Connector conn = this.master.getConnector();
-      Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
-      Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
+      Scanner scanner = conn.createScanner(range.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY);
+      MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
       KeyExtent start = new KeyExtent(range.getTableId(), range.getEndRow(), null);
       scanner.setRange(new Range(start.getMetadataEntry(), null));
       Iterator<Entry<Key,Value>> iterator = scanner.iterator();

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java Mon Jun 24 21:34:20 2013
@@ -25,12 +25,12 @@ import java.util.Random;
 import java.util.Set;
 import java.util.SortedMap;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;
@@ -103,7 +103,7 @@ public class ChaoticLoadBalancer extends
     
     for (Entry<TServerInstance,TabletServerStatus> e : current.entrySet()) {
       for (String table : e.getValue().getTableMap().keySet()) {
-        if (!moveMetadata && Constants.METADATA_TABLE_NAME.equals(table))
+        if (!moveMetadata && MetadataTable.NAME.equals(table))
           continue;
         try {
           for (TabletStats ts : getOnlineTabletsForTable(e.getKey(), table)) {
@@ -135,11 +135,6 @@ public class ChaoticLoadBalancer extends
     return 100;
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.master.balancer.TabletBalancer#init(org.apache.accumulo.server.conf.ServerConfiguration)
-   */
   @Override
   public void init(ServerConfiguration conf) {}
   

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java Mon Jun 24 21:34:20 2013
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.server.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
@@ -32,10 +33,10 @@ public class HadoopLogCloser implements 
   private static Logger log = Logger.getLogger(HadoopLogCloser.class);
 
   @Override
-  public long close(Master master, FileSystem fs, Path source) throws IOException {
-    
-    if (fs instanceof DistributedFileSystem) {
-      DistributedFileSystem dfs = (DistributedFileSystem) fs;
+  public long close(Master master, VolumeManager fs, Path source) throws IOException {
+    FileSystem ns = fs.getFileSystemByPath(source);
+    if (ns instanceof DistributedFileSystem) {
+      DistributedFileSystem dfs = (DistributedFileSystem) ns;
       try {
         if (!dfs.recoverLease(source)) {
           log.info("Waiting for file to be closed " + source.toString());
@@ -48,12 +49,12 @@ public class HadoopLogCloser implements 
       } catch (Exception ex) {
         log.warn("Error recovery lease on " + source.toString(), ex);
       }
-    } else if (fs instanceof LocalFileSystem) {
+    } else if (ns instanceof LocalFileSystem) {
       // ignore
     } else {
       throw new IllegalStateException("Don't know how to recover a lease for " + fs.getClass().getName());
     }
-    fs.append(source).close();
+    ns.append(source).close();
     log.info("Recovered lease on " + source.toString() + " using append");
     return 0;
   }

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/LogCloser.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/LogCloser.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/LogCloser.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/LogCloser.java Mon Jun 24 21:34:20 2013
@@ -19,9 +19,9 @@ package org.apache.accumulo.server.maste
 import java.io.IOException;
 
 import org.apache.accumulo.server.master.Master;
-import org.apache.hadoop.fs.FileSystem;
+import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.fs.Path;
 
 public interface LogCloser {
-  public long close(Master master, FileSystem fs, Path path) throws IOException;
+  public long close(Master master, VolumeManager fs, Path path) throws IOException;
 }

Modified: accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java
URL: http://svn.apache.org/viewvc/accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java?rev=1496226&r1=1496225&r2=1496226&view=diff
==============================================================================
--- accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java (original)
+++ accumulo/branches/ACCUMULO-CURATOR/server/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java Mon Jun 24 21:34:20 2013
@@ -19,6 +19,7 @@ package org.apache.accumulo.server.maste
 import java.io.IOException;
 
 import org.apache.accumulo.server.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -29,11 +30,12 @@ public class MapRLogCloser implements Lo
   private static Logger log = Logger.getLogger(MapRLogCloser.class);
   
   @Override
-  public long close(Master m, FileSystem fs, Path path) throws IOException {
+  public long close(Master m, VolumeManager fs, Path path) throws IOException {
     log.info("Recovering file " + path.toString() + " by changing permission to readonly");
+    FileSystem ns = fs.getFileSystemByPath(path);
     FsPermission roPerm = new FsPermission((short) 0444);
     try {
-      fs.setPermission(path, roPerm);
+      ns.setPermission(path, roPerm);
       return 0;
     } catch (IOException ex) {
       log.error("error recovering lease ", ex);



Mime
View raw message