accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ktur...@apache.org
Subject [3/4] git commit: ACCUMULO-1832 added volume replacement
Date Tue, 25 Feb 2014 16:23:43 GMT
ACCUMULO-1832 added volume replacement


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/2ef2d885
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/2ef2d885
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/2ef2d885

Branch: refs/heads/master
Commit: 2ef2d88598f5e14f8f96b77fecca66dcd7196448
Parents: 739082d
Author: Keith Turner <kturner@apache.org>
Authored: Mon Feb 24 18:23:06 2014 -0500
Committer: Keith Turner <kturner@apache.org>
Committed: Mon Feb 24 18:23:06 2014 -0500

----------------------------------------------------------------------
 .../org/apache/accumulo/core/conf/Property.java |  12 +-
 .../core/tabletserver/log/LogEntry.java         |  12 +-
 .../chapters/multivolume.tex                    |  46 ++-
 .../apache/accumulo/server/ServerConstants.java | 115 +++++--
 .../accumulo/server/fs/VolumeManager.java       |  99 +++---
 .../accumulo/server/fs/VolumeManagerImpl.java   |  93 +++---
 .../apache/accumulo/server/fs/VolumeUtil.java   | 330 +++++++++++++++++++
 .../apache/accumulo/server/init/Initialize.java |  36 +-
 .../server/master/recovery/HadoopLogCloser.java |   3 +-
 .../org/apache/accumulo/server/util/Admin.java  |  11 +
 .../accumulo/server/util/ListVolumesUsed.java   | 141 ++++++++
 .../accumulo/server/util/MetadataTableUtil.java |  42 ++-
 .../apache/accumulo/server/fs/FileTypeTest.java |  54 +++
 .../accumulo/server/fs/VolumeUtilTest.java      | 144 ++++++++
 .../accumulo/gc/SimpleGarbageCollector.java     |  94 +++---
 .../master/recovery/RecoveryManager.java        |  10 +
 .../monitor/servlets/DefaultServlet.java        |   2 +-
 .../tserver/DirectoryDecommissioner.java        | 169 ----------
 .../org/apache/accumulo/tserver/Tablet.java     |  21 +-
 .../apache/accumulo/tserver/TabletServer.java   |   2 +
 .../tserver/DirectoryDecommissionerTest.java    | 108 ------
 .../tserver/TabletServerSyncCheckTest.java      |  14 +-
 .../org/apache/accumulo/test/CleanWalIT.java    |   3 +-
 .../test/ConfigurableMajorCompactionIT.java     |   3 +-
 .../org/apache/accumulo/test/DumpConfigIT.java  |   3 +-
 .../test/MasterRepairsDualAssignmentIT.java     |   3 +-
 .../accumulo/test/NoMutationRecoveryIT.java     |   8 +-
 .../java/org/apache/accumulo/test/VolumeIT.java | 174 +++++++---
 .../functional/BalanceAfterCommsFailureIT.java  |   3 +-
 .../test/functional/BatchScanSplitIT.java       |   3 +-
 .../test/functional/BigRootTabletIT.java        |   3 +-
 .../test/functional/BinaryStressIT.java         |   3 +-
 .../accumulo/test/functional/BloomFilterIT.java |   3 +-
 .../functional/BulkSplitOptimizationIT.java     |   3 +-
 .../test/functional/ChaoticBalancerIT.java      |   3 +-
 .../accumulo/test/functional/CleanTmpIT.java    |   3 +-
 .../accumulo/test/functional/CompactionIT.java  |   3 +-
 .../accumulo/test/functional/ConcurrencyIT.java |   3 +-
 .../functional/ConfigurableCompactionIT.java    |   3 +-
 .../test/functional/ConfigurableMacIT.java      |  19 +-
 .../test/functional/DeleteEverythingIT.java     |   3 +-
 .../test/functional/DynamicThreadPoolsIT.java   |   3 +-
 .../test/functional/GarbageCollectorIT.java     |   3 +-
 .../test/functional/HalfDeadTServerIT.java      |   3 +-
 .../accumulo/test/functional/LargeRowIT.java    |   3 +-
 .../test/functional/LateLastContactIT.java      |   3 +-
 .../test/functional/MasterFailoverIT.java       |   3 +-
 .../accumulo/test/functional/MaxOpenIT.java     |   3 +-
 .../test/functional/MetadataMaxFiles.java       |   3 +-
 .../test/functional/MetadataSplitIT.java        |   3 +-
 .../accumulo/test/functional/RestartIT.java     |   3 +-
 .../test/functional/RestartStressIT.java        |   5 +-
 .../accumulo/test/functional/RowDeleteIT.java   |   3 +-
 .../test/functional/ScanSessionTimeOutIT.java   |   3 +-
 .../functional/SimpleBalancerFairnessIT.java    |   3 +-
 .../accumulo/test/functional/SplitIT.java       |   3 +-
 .../apache/accumulo/test/functional/SslIT.java  |   5 +-
 .../test/functional/SslWithClientAuthIT.java    |   5 +-
 .../accumulo/test/functional/TabletIT.java      |   3 +-
 .../test/functional/WriteAheadLogIT.java        |   5 +-
 .../test/functional/ZookeeperRestartIT.java     |   3 +-
 61 files changed, 1317 insertions(+), 558 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/core/src/main/java/org/apache/accumulo/core/conf/Property.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index fb3e916..abad4ad 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -121,7 +121,17 @@ public enum Property {
       "A comma seperated list of dfs uris to use.  Files will be stored across these filesystems.  If this is empty, then instance.dfs.uri will be used.  "
           + "After adding uris to this list, run 'accumulo init --add-volume' and then restart tservers.  If entries are removed from this list then tservers "
           + "will need to be restarted.  After a uri is removed from the list Accumulo will not create new files in that location, however Accumulo can still "
-          + "reference files created at that location before the config change."),
+          + "reference files created at that location before the config change. To use a comma or other reserved characters in a URI use standard URI hex encoding."
+          + "For example replace commas with %2C."),
+  INSTANCE_VOLUMES_REPLACEMENTS(
+      "instance.volumes.replacements",
+      "",
+      PropertyType.STRING,
+      "Since accumulo stores absolute URIs changing the location of a namenode could prevent Accumulo from starting.  The property helps deal with that situation.  "
+          + "Provide a comma seperated list of uri replacement pairs here if a namenode location changes. Each pair shold be separated with a space.  For example if "
+          + "hdfs://nn1 was repalced with hdfs://nnA and hdfs://nn2 was replaced with hdfs://nnB, then set this property to "
+          + "'hdfs://nn1 hdfs://nnA,hdfs://nn2 hdfs://nnB'.   Replacements must be configured for use.  To see which volumes are currently in use, run 'accumulo admin volumes -l'."
+          + "To use a comma or other reserved characters in a URI use standard URI hex encoding. For example replace commas with %2C."),
   INSTANCE_SECURITY_AUTHENTICATOR("instance.security.authenticator", "org.apache.accumulo.server.security.handler.ZKAuthenticator", PropertyType.CLASSNAME,
       "The authenticator class that accumulo will use to determine if a user has privilege to perform an action"),
   INSTANCE_SECURITY_AUTHORIZOR("instance.security.authorizor", "org.apache.accumulo.server.security.handler.ZKAuthorizor", PropertyType.CLASSNAME,

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/core/src/main/java/org/apache/accumulo/core/tabletserver/log/LogEntry.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/tabletserver/log/LogEntry.java b/core/src/main/java/org/apache/accumulo/core/tabletserver/log/LogEntry.java
index fc2da4b..b2741c0 100644
--- a/core/src/main/java/org/apache/accumulo/core/tabletserver/log/LogEntry.java
+++ b/core/src/main/java/org/apache/accumulo/core/tabletserver/log/LogEntry.java
@@ -38,6 +38,17 @@ public class LogEntry {
   public int tabletId;
   public Collection<String> logSet;
   
+  public LogEntry() {}
+
+  public LogEntry(LogEntry le) {
+    this.extent = le.extent;
+    this.timestamp = le.timestamp;
+    this.server = le.server;
+    this.filename = le.filename;
+    this.tabletId = le.tabletId;
+    this.logSet = new ArrayList<String>(le.logSet);
+  }
+
   public String toString() {
     return extent.toString() + " " + filename + " (" + tabletId + ")";
   }
@@ -106,5 +117,4 @@ public class LogEntry {
   public Value getValue() {
     return new Value((StringUtil.join(logSet, ";") + "|" + tabletId).getBytes());
   }
-  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/docs/src/main/latex/accumulo_user_manual/chapters/multivolume.tex
----------------------------------------------------------------------
diff --git a/docs/src/main/latex/accumulo_user_manual/chapters/multivolume.tex b/docs/src/main/latex/accumulo_user_manual/chapters/multivolume.tex
index da04f39..07e7a1f 100644
--- a/docs/src/main/latex/accumulo_user_manual/chapters/multivolume.tex
+++ b/docs/src/main/latex/accumulo_user_manual/chapters/multivolume.tex
@@ -23,19 +23,19 @@ The HDFS NameNode holds all of the metadata about the files in
 HDFS. For fast performance, all of this information needs to be stored
 in memory.  A single NameNode with 64G of memory can store the
 metadata for tens of millions of files.However, when scaling beyond a
-thousand nodes, an active accumulo system can generate lots of updates
+thousand nodes, an active Accumulo system can generate lots of updates
 to the file system, especially when data is being ingested.  The large
 number of write transactions to the NameNode, and the speed of a
 single edit log, can become the limiting factor for large scale
-accumulo installations.
+Accumulo installations.
 
-You can see the effect of slow write transactions when the accumulo
+You can see the effect of slow write transactions when the Accumulo
 Garbage Collector takes a long time (more than 5 minutes) to delete
-the files accumulo no longer needs.  If your Garbage Collector
+the files Accumulo no longer needs.  If your Garbage Collector
 routinely runs in less than a minute, the NameNode is performing well.
 
 However, if you do begin to experience slow-down and poor GC
-performance, accumulo can be configured to use multiple NameNode
+performance, Accumulo can be configured to use multiple NameNode
 servers.  The configuration ``instance.volumes'' should be set to a
 comma-separated list, using full URI references to different NameNode
 servers:
@@ -49,9 +49,39 @@ servers:
 \end{verbatim}
 \normalsize
 
-Any existing relative file references will be assumed to be stored in
-the first NameNode.  So, if you are growing your cluster to use
-multiple NameNodes, list the original server first.
+The introduction of multiple volume support in 1.6 changed the way Accumulo
+stores pointers to files.  It now stores fully qualified URI references to
+files.  Before 1.6, Accumulo stored paths that were relative to a table
+directory.   After an upgrade these relative paths will still exist and are
+resolved using instance.dfs.dir, instance.dfs.uri, and Hadoop configuration in
+the same way they were before 1.6. 
+
+If the URI for a namenode changes (e.g. namenode was running on host1 and its
+moved to host2), then Accumulo will no longer function.  Even if Hadoop and
+Accumulo configurations are changed, the fully qualified URIs stored in
+Accumulo will still contain the old URI.  To handle this Accumulo has the
+following configuration property for replacing URI stored in its metadata.  The
+example configuration below will replace ns1 with nsA and ns2 with nsB in
+Accumulo metadata.  For this property to take affect, Accumulo will need to be
+restarted.
+
+\small
+\begin{verbatim}
+  <property>
+    <name>instance.volumes.replacements</name>
+    <value>hdfs://ns1:9001 hdfs://nsA:9001, hdfs://ns2:9001 hdfs://nsB:9001</value>
+  </property>
+\end{verbatim}
+\normalsize
+
+Using viewfs or HA namenode, introduced in Hadoop 2, offers another option for
+managing the fully qualified URIs stored in Accumulo.  Viewfs and HA namenode
+both introduce a level of indirection in the Hadoop configuration.   For
+example assume viewfs:///nn1 maps to hdfs://nn1 in the Hadoop configuration.
+If viewfs://nn1 is used by Accumulo, then its easy to map viewfs://nn1 to
+hdfs://nnA by changing the Hadoop configuration w/o doing anything to Accumulo.
+A production system should probably use a HA namenode.  Viewfs may be useful on
+a test system with a single non HA namenode.
 
 You may also want to configure your cluster to use Federation,
 available in Hadoop 2.0, which allows DataNodes to respond to multiple

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
index 9d490e4..eb928ba 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
@@ -17,19 +17,27 @@
 package org.apache.accumulo.server;
 
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
 
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.server.fs.VolumeUtil;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
 public class ServerConstants {
-  
+
   public static final String VERSION_DIR = "version";
 
   public static final String INSTANCE_ID_DIR = "instance_id";
@@ -39,22 +47,24 @@ public class ServerConstants {
    * (versions should never be negative)
    */
   public static final Integer WIRE_VERSION = 3;
-  
+
   /**
    * current version (6) reflects the addition of a separate root table (ACCUMULO-1481) in version 1.6.0
    */
   public static final int DATA_VERSION = 6;
   public static final int PREV_DATA_VERSION = 5;
-  
+
   private static String[] baseDirs = null;
   private static String defaultBaseDir = null;
 
+  private static List<Pair<Path,Path>> replacementsList = null;
+
   public static synchronized String getDefaultBaseDir() {
     if (defaultBaseDir == null) {
       String singleNamespace = ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_DFS_DIR);
       String dfsUri = ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_DFS_URI);
       String baseDir;
-      
+
       if (dfsUri == null || dfsUri.isEmpty()) {
         Configuration hadoopConfig = CachedConfiguration.getInstance();
         try {
@@ -67,17 +77,17 @@ public class ServerConstants {
           throw new IllegalArgumentException("Expected fully qualified URI for " + Property.INSTANCE_DFS_URI.getKey() + " got " + dfsUri);
         baseDir = dfsUri + singleNamespace;
       }
-      
+
       defaultBaseDir = new Path(baseDir).toString();
-      
+
     }
-    
+
     return defaultBaseDir;
   }
 
-  public static String[] getConfiguredBaseDirs() {
-    String singleNamespace = ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_DFS_DIR);
-    String ns = ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_VOLUMES);
+  public static String[] getConfiguredBaseDirs(AccumuloConfiguration conf) {
+    String singleNamespace = conf.get(Property.INSTANCE_DFS_DIR);
+    String ns = conf.get(Property.INSTANCE_VOLUMES);
 
     String configuredBaseDirs[];
 
@@ -85,12 +95,22 @@ public class ServerConstants {
       configuredBaseDirs = new String[] {getDefaultBaseDir()};
     } else {
       String namespaces[] = ns.split(",");
+      String unescapedNamespaces[] = new String[namespaces.length];
+      int i = 0;
       for (String namespace : namespaces) {
         if (!namespace.contains(":")) {
           throw new IllegalArgumentException("Expected fully qualified URI for " + Property.INSTANCE_VOLUMES.getKey() + " got " + namespace);
         }
+
+        try {
+          // pass through URI to unescape hex encoded chars (e.g. convert %2C to "," char)
+          unescapedNamespaces[i++] = new Path(new URI(namespace)).toString();
+        } catch (URISyntaxException e) {
+          throw new IllegalArgumentException(Property.INSTANCE_VOLUMES.getKey() + " contains " + namespace + " which has a syntax error", e);
+        }
       }
-      configuredBaseDirs = prefix(namespaces, singleNamespace);
+
+      configuredBaseDirs = prefix(unescapedNamespaces, singleNamespace);
     }
 
     return configuredBaseDirs;
@@ -99,9 +119,8 @@ public class ServerConstants {
   // these are functions to delay loading the Accumulo configuration unless we must
   public static synchronized String[] getBaseDirs() {
     if (baseDirs == null) {
-      baseDirs = checkBaseDirs(getConfiguredBaseDirs(), false);
+      baseDirs = checkBaseDirs(getConfiguredBaseDirs(ServerConfiguration.getSiteConfiguration()), false);
     }
-    
 
     return baseDirs;
   }
@@ -148,7 +167,7 @@ public class ServerConstants {
 
     return baseDirsList.toArray(new String[baseDirsList.size()]);
   }
-  
+
   public static String[] prefix(String bases[], String suffix) {
     if (suffix.startsWith("/"))
       suffix = suffix.substring(1);
@@ -158,7 +177,7 @@ public class ServerConstants {
     }
     return result;
   }
-  
+
   public static final String TABLE_DIR = "tables";
   public static final String RECOVERY_DIR = "recovery";
   public static final String WAL_DIR = "wal";
@@ -170,32 +189,86 @@ public class ServerConstants {
   public static String[] getRecoveryDirs() {
     return prefix(getBaseDirs(), RECOVERY_DIR);
   }
-  
+
   public static String[] getWalDirs() {
     return prefix(getBaseDirs(), WAL_DIR);
   }
-  
+
   public static String[] getWalogArchives() {
     return prefix(getBaseDirs(), "walogArchive");
   }
-  
+
   public static Path getInstanceIdLocation() {
     // all base dirs should have the same instance id, so can choose any one
     return new Path(getBaseDirs()[0], INSTANCE_ID_DIR);
   }
-  
+
   public static Path getDataVersionLocation() {
     // all base dirs should have the same version, so can choose any one
     return new Path(getBaseDirs()[0], VERSION_DIR);
   }
-  
 
   public static String[] getMetadataTableDirs() {
     return prefix(getTablesDirs(), MetadataTable.ID);
   }
-  
+
   public static String[] getTemporaryDirs() {
     return prefix(getBaseDirs(), "tmp");
   }
 
+  public static synchronized List<Pair<Path,Path>> getVolumeReplacements() {
+
+    if (replacementsList == null) {
+      String replacements = ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_VOLUMES_REPLACEMENTS);
+
+      replacements = replacements.trim();
+
+      if (replacements.isEmpty())
+        return Collections.emptyList();
+
+      String[] pairs = replacements.split(",");
+      List<Pair<Path,Path>> ret = new ArrayList<Pair<Path,Path>>();
+
+      for (String pair : pairs) {
+        String uris[] = pair.split("\\s+");
+        if (uris.length != 2)
+          throw new IllegalArgumentException(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey() + " contains malformed pair " + pair);
+
+        Path p1, p2;
+        try {
+          // URI constructor handles hex escaping
+          p1 = new Path(new URI(VolumeUtil.removeSlash(uris[0].trim())));
+          if (p1.toUri().getScheme() == null)
+            throw new IllegalArgumentException(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey() + " contains " + uris[0] + " which is not fully qualified");
+        } catch (URISyntaxException e) {
+          throw new IllegalArgumentException(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey() + " contains " + uris[0] + " which has a syntax error", e);
+        }
+
+        try {
+          p2 = new Path(new URI(VolumeUtil.removeSlash(uris[1].trim())));
+          if (p2.toUri().getScheme() == null)
+            throw new IllegalArgumentException(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey() + " contains " + uris[1] + " which is not fully qualified");
+        } catch (URISyntaxException e) {
+          throw new IllegalArgumentException(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey() + " contains " + uris[1] + " which has a syntax error", e);
+        }
+
+        ret.add(new Pair<Path,Path>(p1, p2));
+      }
+
+      HashSet<Path> baseDirs = new HashSet<Path>();
+      for (String baseDir : getBaseDirs()) {
+        // normalize using path and remove accumulo dir
+        baseDirs.add(new Path(baseDir).getParent());
+      }
+
+      for (Pair<Path,Path> pair : ret)
+        if (!baseDirs.contains(pair.getSecond()))
+          throw new IllegalArgumentException(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey() + " contains " + pair.getSecond()
+              + " which is not a configured volume");
+
+      // only set if get here w/o exception
+      replacementsList = ret;
+    }
+    return replacementsList;
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
index c2c04e5..f0c7083 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
@@ -17,7 +17,6 @@
 package org.apache.accumulo.server.fs;
 
 import java.io.IOException;
-import java.util.Map;
 
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.server.ServerConstants;
@@ -29,100 +28,123 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
 /**
- * A wrapper around multiple hadoop FileSystem objects, which are assumed to be different volumes.
- * This also concentrates a bunch of meta-operations like waiting for SAFE_MODE, and closing WALs.
+ * A wrapper around multiple hadoop FileSystem objects, which are assumed to be different volumes. This also concentrates a bunch of meta-operations like
+ * waiting for SAFE_MODE, and closing WALs.
  */
 public interface VolumeManager {
-  
-  
-  
+
   public static enum FileType {
     TABLE(ServerConstants.TABLE_DIR), WAL(ServerConstants.WAL_DIR), RECOVERY(ServerConstants.RECOVERY_DIR);
-    
+
     private String dir;
-    
+
     FileType(String dir) {
       this.dir = dir;
     }
-    
+
     public String getDirectory() {
       return dir;
     }
+
+    private static int endOfVolumeIndex(String path, String dir) {
+      int dirIndex = path.indexOf('/' + dir);
+      if (dirIndex != -1) {
+        return path.lastIndexOf('/', dirIndex - 1);
+      }
+
+      if (path.contains(":"))
+        throw new IllegalArgumentException(path + " is absolute, but does not contain " + dir);
+      return -1;
+
+    }
+
+    public Path getVolume(Path path) {
+      String pathString = path.toString();
+
+      int eopi = endOfVolumeIndex(pathString, dir);
+      if (eopi != -1)
+        return new Path(pathString.substring(0, eopi + 1));
+
+      return null;
+    }
+
+    public Path removeVolume(Path path) {
+      String pathString = path.toString();
+
+      int eopi = endOfVolumeIndex(pathString, dir);
+      if (eopi != -1)
+        return new Path(pathString.substring(eopi + 1));
+
+      return null;
+    }
   }
-  
+
   // close the underlying FileSystems
   void close() throws IOException;
-  
-  // the mechanism by which the master ensures that tablet servers can no longer write to a WAL
-  boolean closePossiblyOpenFile(Path path) throws IOException;
-  
+
   // forward to the appropriate FileSystem object
   FSDataOutputStream create(Path dest) throws IOException;
-  
+
   // forward to the appropriate FileSystem object
   FSDataOutputStream create(Path path, boolean b) throws IOException;
-  
+
   // forward to the appropriate FileSystem object
   FSDataOutputStream create(Path path, boolean b, int int1, short int2, long long1) throws IOException;
-  
+
   // create a file, but only if it doesn't exist
   boolean createNewFile(Path writable) throws IOException;
-  
+
   // create a file which can be sync'd to disk
   FSDataOutputStream createSyncable(Path logPath, int buffersize, short replication, long blockSize) throws IOException;
-  
+
   // delete a file
   boolean delete(Path path) throws IOException;
-  
+
   // delete a directory and anything under it
   boolean deleteRecursively(Path path) throws IOException;
-  
+
   // forward to the appropriate FileSystem object
   boolean exists(Path path) throws IOException;
-  
+
   // forward to the appropriate FileSystem object
   FileStatus getFileStatus(Path path) throws IOException;
-  
+
   // find the appropriate FileSystem object given a path
   FileSystem getFileSystemByPath(Path path);
-  
-  // get a mapping of volume to FileSystem
-  Map<String, ? extends FileSystem> getFileSystems();
-  
+
   // return the item in options that is in the same volume as source
   Path matchingFileSystem(Path source, String[] options);
-  
-  
+
   // forward to the appropriate FileSystem object
   FileStatus[] listStatus(Path path) throws IOException;
-  
+
   // forward to the appropriate FileSystem object
   boolean mkdirs(Path directory) throws IOException;
-  
+
   // forward to the appropriate FileSystem object
   FSDataInputStream open(Path path) throws IOException;
-  
+
   // forward to the appropriate FileSystem object, throws an exception if the paths are in different volumes
   boolean rename(Path path, Path newPath) throws IOException;
-  
+
   // forward to the appropriate FileSystem object
   boolean moveToTrash(Path sourcePath) throws IOException;
-  
+
   // forward to the appropriate FileSystem object
   short getDefaultReplication(Path logPath);
-  
+
   // forward to the appropriate FileSystem object
   boolean isFile(Path path) throws IOException;
-  
+
   // all volume are ready to provide service (not in SafeMode, for example)
   boolean isReady() throws IOException;
-  
+
   // forward to the appropriate FileSystem object
   FileStatus[] globStatus(Path path) throws IOException;
 
   // Convert a file or directory metadata reference into a path
   Path getFullPath(Key key);
-  
+
   Path getFullPath(String tableId, String path);
 
   // Given a filename, figure out the qualified path given multiple namespaces
@@ -133,5 +155,4 @@ public interface VolumeManager {
 
   // decide on which of the given locations to create a new file
   String choose(String[] options);
-
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index b577891..165c3b8 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.server.fs;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationTargetException;
@@ -47,7 +46,6 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -94,26 +92,6 @@ public class VolumeManagerImpl implements VolumeManager {
   }
 
   @Override
-  public boolean closePossiblyOpenFile(Path path) throws IOException {
-    FileSystem fs = getFileSystemByPath(path);
-    if (fs instanceof DistributedFileSystem) {
-      DistributedFileSystem dfs = (DistributedFileSystem) fs;
-      try {
-        return dfs.recoverLease(path);
-      } catch (FileNotFoundException ex) {
-        throw ex;
-      }
-    } else if (fs instanceof LocalFileSystem) {
-      // ignore
-    } else {
-      throw new IllegalStateException("Don't know how to recover a lease for " + fs.getClass().getName());
-    }
-    fs.append(path).close();
-    log.info("Recovered lease on " + path.toString() + " using append");
-    return true;
-  }
-
-  @Override
   public FSDataOutputStream create(Path path) throws IOException {
     FileSystem fs = getFileSystemByPath(path);
     return fs.create(path);
@@ -207,8 +185,18 @@ public class VolumeManagerImpl implements VolumeManager {
   protected void ensureSyncIsEnabled() {
     for (Entry<String,? extends FileSystem> entry : getFileSystems().entrySet()) {
       final String volumeName = entry.getKey();
-      final FileSystem fs = entry.getValue();
-      
+      FileSystem fs = entry.getValue();
+
+      if (ViewFSUtils.isViewFS(fs)) {
+        try {
+          FileSystem resolvedFs = ViewFSUtils.resolvePath(fs, new Path("/")).getFileSystem(fs.getConf());
+          log.debug("resolved " + fs.getUri() + " to " + resolvedFs.getUri() + " for sync check");
+          fs = resolvedFs;
+        } catch (IOException e) {
+          log.warn("Failed to resolve " + fs.getUri(), e);
+        }
+      }
+
       if (fs instanceof DistributedFileSystem) {
         final String DFS_DURABLE_SYNC = "dfs.durable.sync", DFS_SUPPORT_APPEND = "dfs.support.append";
         final String ticketMessage = "See ACCUMULO-623 and ACCUMULO-1637 for more details.";
@@ -216,11 +204,11 @@ public class VolumeManagerImpl implements VolumeManager {
         try {
           // If the default is off (0.20.205.x or 1.0.x)
           DFSConfigKeys configKeys = new DFSConfigKeys();
-          
+
           // Can't use the final constant itself as Java will inline it at compile time
           Field dfsSupportAppendDefaultField = configKeys.getClass().getField("DFS_SUPPORT_APPEND_DEFAULT");
           boolean dfsSupportAppendDefaultValue = dfsSupportAppendDefaultField.getBoolean(configKeys);
-          
+
           if (!dfsSupportAppendDefaultValue) {
             // See if the user did the correct override
             if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, false)) {
@@ -233,9 +221,10 @@ public class VolumeManagerImpl implements VolumeManager {
           // If we can't find DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT, the user is running
           // 1.1.x or 1.2.x. This is ok, though, as, by default, these versions have append/sync enabled.
         } catch (Exception e) {
-          log.warn("Error while checking for " + DFS_SUPPORT_APPEND + " on volume " + volumeName + ". The user should ensure that Hadoop is configured to properly supports append and sync. " + ticketMessage, e);
+          log.warn("Error while checking for " + DFS_SUPPORT_APPEND + " on volume " + volumeName
+              + ". The user should ensure that Hadoop is configured to properly supports append and sync. " + ticketMessage, e);
         }
-        
+
         // If either of these parameters are configured to be false, fail.
         // This is a sign that someone is writing bad configuration.
         if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, true) || !fs.getConf().getBoolean(DFS_DURABLE_SYNC, true)) {
@@ -243,12 +232,12 @@ public class VolumeManagerImpl implements VolumeManager {
           log.fatal(msg);
           throw new RuntimeException(msg);
         }
-        
+
         try {
           // Check DFSConfigKeys to see if DFS_DATANODE_SYNCONCLOSE_KEY exists (should be everything >=1.1.1 and the 0.23 line)
           Class<?> dfsConfigKeysClz = Class.forName("org.apache.hadoop.hdfs.DFSConfigKeys");
           dfsConfigKeysClz.getDeclaredField("DFS_DATANODE_SYNCONCLOSE_KEY");
-        
+
           // Everything else
           if (!fs.getConf().getBoolean("dfs.datanode.synconclose", false)) {
             log.warn("dfs.datanode.synconclose set to false in hdfs-site.xml: data loss is possible on system reset or power loss");
@@ -287,8 +276,7 @@ public class VolumeManagerImpl implements VolumeManager {
     return defaultVolume;
   }
 
-  @Override
-  public Map<String,? extends FileSystem> getFileSystems() {
+  private Map<String,? extends FileSystem> getFileSystems() {
     return volumes;
   }
 
@@ -362,25 +350,34 @@ public class VolumeManagerImpl implements VolumeManager {
     Map<String,FileSystem> fileSystems = new HashMap<String,FileSystem>();
     Configuration hadoopConf = CachedConfiguration.getInstance();
     fileSystems.put(DEFAULT, FileUtil.getFileSystem(hadoopConf, conf));
-    String ns = conf.get(Property.INSTANCE_VOLUMES);
-    if (ns != null && !ns.isEmpty()) {
-      for (String space : ns.split(",")) {
-        if (space.equals(DEFAULT))
-          throw new IllegalArgumentException();
-
-        if (space.contains(":")) {
-          fileSystems.put(space, new Path(space).getFileSystem(hadoopConf));
-        } else {
-          throw new IllegalArgumentException("Expected fully qualified URI for " + Property.INSTANCE_VOLUMES.getKey() + " got " + space);
-        }
+    for (String space : ServerConstants.getConfiguredBaseDirs(conf)) {
+      if (space.equals(DEFAULT))
+        throw new IllegalArgumentException();
+
+      if (space.contains(":")) {
+        fileSystems.put(space, new Path(space).getFileSystem(hadoopConf));
+      } else {
+        throw new IllegalArgumentException("Expected fully qualified URI for " + Property.INSTANCE_VOLUMES.getKey() + " got " + space);
       }
     }
+
     return new VolumeManagerImpl(fileSystems, DEFAULT, conf);
   }
 
   @Override
   public boolean isReady() throws IOException {
     for (FileSystem fs : getFileSystems().values()) {
+
+      if (ViewFSUtils.isViewFS(fs)) {
+        try {
+          FileSystem resolvedFs = ViewFSUtils.resolvePath(fs, new Path("/")).getFileSystem(fs.getConf());
+          log.debug("resolved " + fs.getUri() + " to " + resolvedFs.getUri() + " for ready check");
+          fs = resolvedFs;
+        } catch (IOException e) {
+          log.warn("Failed to resolve " + fs.getUri(), e);
+        }
+      }
+
       if (!(fs instanceof DistributedFileSystem))
         continue;
       DistributedFileSystem dfs = (DistributedFileSystem) fs;
@@ -462,28 +459,28 @@ public class VolumeManagerImpl implements VolumeManager {
   public Path getFullPath(String tableId, String path) {
     if (path.contains(":"))
       return new Path(path);
-    
+
     if (path.startsWith("../"))
       path = path.substring(2);
     else if (path.startsWith("/"))
       path = "/" + tableId + path;
     else
       throw new IllegalArgumentException("Unexpected path prefix " + path);
-    
+
     return getFullPath(FileType.TABLE, path);
   }
-  
+
   @Override
   public Path getFullPath(FileType fileType, String path) {
     if (path.contains(":"))
       return new Path(path);
-    
+
     // normalize the path
     Path fullPath = new Path(ServerConstants.getDefaultBaseDir(), fileType.getDirectory());
     if (path.startsWith("/"))
       path = path.substring(1);
     fullPath = new Path(fullPath, path);
-    
+
     FileSystem fs = getFileSystemByPath(fullPath);
     return fs.makeQualified(fullPath);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
new file mode 100644
index 0000000..4f8d5e8
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
@@ -0,0 +1,330 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.fs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.security.SecureRandom;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.tabletserver.log.LogEntry;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.fs.VolumeManager.FileType;
+import org.apache.accumulo.server.security.SystemCredentials;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
+
+public class VolumeUtil {
+
+  private static final Logger log = Logger.getLogger(VolumeUtil.class);
+
+  public static boolean isActiveVolume(Path dir) {
+
+    // consider relative path as active and take no action
+    if (!dir.toString().contains(":"))
+      return true;
+
+    for (String tableDir : ServerConstants.getTablesDirs()) {
+      // use Path to normalize tableDir
+      if (dir.toString().startsWith(new Path(tableDir).toString()))
+        return true;
+    }
+
+    return false;
+  }
+
+  public static String removeSlash(String path) {
+    while (path.endsWith("/"))
+      path = path.substring(0, path.length() - 1);
+    return path;
+  }
+
+  public static Path removeSlash(Path path) {
+    if (path.toString().endsWith("/"))
+      return new Path(removeSlash(path.toString()));
+    return path;
+  }
+
+  public static String switchVolume(String path, FileType ft, List<Pair<Path,Path>> replacements) {
+    if (replacements.size() == 0)
+      return null;
+
+    Path p = new Path(path);
+
+    // removing slash because new Path("hdfs://nn1").equals(new Path("hdfs://nn1/")) evaluates to false
+    Path volume = removeSlash(ft.getVolume(p));
+
+    for (Pair<Path,Path> pair : replacements) {
+      Path key = removeSlash(pair.getFirst());
+
+      if (key.equals(volume))
+        return new Path(pair.getSecond(), ft.removeVolume(p)).toString();
+    }
+
+    return null;
+  }
+
+  private static LogEntry switchVolumes(LogEntry le, List<Pair<Path,Path>> replacements) {
+    String switchedPath = switchVolume(le.filename, FileType.WAL, replacements);
+    int numSwitched = 0;
+    if (switchedPath != null)
+      numSwitched++;
+    else
+      switchedPath = le.filename;
+
+    ArrayList<String> switchedLogs = new ArrayList<String>();
+    for (String log : le.logSet) {
+      String switchedLog = switchVolume(le.filename, FileType.WAL, replacements);
+      if (switchedLog != null) {
+        switchedLogs.add(switchedLog);
+        numSwitched++;
+      } else {
+        switchedLogs.add(log);
+      }
+
+    }
+
+    if (numSwitched == 0)
+      return null;
+
+    LogEntry newLogEntry = new LogEntry(le);
+    newLogEntry.filename = switchedPath;
+    newLogEntry.logSet = switchedLogs;
+
+    return newLogEntry;
+  }
+
+  public static class TabletFiles {
+    public String dir;
+    public List<LogEntry> logEntries;
+    public SortedMap<FileRef,DataFileValue> datafiles;
+
+    public TabletFiles() {
+      logEntries = new ArrayList<LogEntry>();
+      datafiles = new TreeMap<FileRef,DataFileValue>();
+    }
+
+    public TabletFiles(String dir, List<LogEntry> logEntries, SortedMap<FileRef,DataFileValue> datafiles) {
+      this.dir = dir;
+      this.logEntries = logEntries;
+      this.datafiles = datafiles;
+    }
+  }
+
+  public static Text switchRootTabletVolume(KeyExtent extent, Text location) throws IOException {
+    if (extent.isRootTablet()) {
+      String newLocation = VolumeUtil.switchVolume(location.toString(), FileType.TABLE, ServerConstants.getVolumeReplacements());
+      if (newLocation != null) {
+        MetadataTableUtil.setRootTabletDir(newLocation);
+        log.info("Volume replaced " + extent + " : " + location + " -> " + newLocation);
+        return new Text(new Path(newLocation).toString());
+      }
+    }
+    return location;
+  }
+
+  // Change volumes used by tablet based on configuration changes
+  public static TabletFiles updateTabletVolumes(ZooLock zooLock, VolumeManager vm, KeyExtent extent, TabletFiles tabletFiles) throws IOException {
+    List<Pair<Path,Path>> replacements = ServerConstants.getVolumeReplacements();
+
+    List<LogEntry> logsToRemove = new ArrayList<LogEntry>();
+    List<LogEntry> logsToAdd = new ArrayList<LogEntry>();
+
+    List<FileRef> filesToRemove = new ArrayList<FileRef>();
+    SortedMap<FileRef,DataFileValue> filesToAdd = new TreeMap<FileRef,DataFileValue>();
+
+    TabletFiles ret = new TabletFiles();
+
+    for (LogEntry logEntry : tabletFiles.logEntries) {
+      LogEntry switchedLogEntry = switchVolumes(logEntry, replacements);
+      if (switchedLogEntry != null) {
+        logsToRemove.add(logEntry);
+        logsToAdd.add(switchedLogEntry);
+        ret.logEntries.add(switchedLogEntry);
+        log.debug("Replacing volume " + extent + " : " + logEntry.filename + " -> " + switchedLogEntry.filename);
+      } else {
+        ret.logEntries.add(logEntry);
+      }
+    }
+
+    if (extent.isRootTablet()) {
+      ret.datafiles = tabletFiles.datafiles;
+    } else {
+      for (Entry<FileRef,DataFileValue> entry : tabletFiles.datafiles.entrySet()) {
+        String metaPath = entry.getKey().meta().toString();
+        String switchedPath = switchVolume(metaPath, FileType.TABLE, replacements);
+        if (switchedPath != null) {
+          filesToRemove.add(entry.getKey());
+          FileRef switchedRef = new FileRef(switchedPath, new Path(switchedPath));
+          filesToAdd.put(switchedRef, entry.getValue());
+          ret.datafiles.put(switchedRef, entry.getValue());
+          log.debug("Replacing volume " + extent + " : " + metaPath + " -> " + switchedPath);
+        } else {
+          ret.datafiles.put(entry.getKey(), entry.getValue());
+        }
+      }
+    }
+
+    String tabletDir = tabletFiles.dir;
+    String switchedDir = switchVolume(tabletDir, FileType.TABLE, replacements);
+
+    if (switchedDir != null) {
+      log.debug("Replacing volume " + extent + " : " + tabletDir + " -> " + switchedDir);
+      tabletDir = switchedDir;
+    }
+
+    if (logsToRemove.size() + filesToRemove.size() > 0 || switchedDir != null)
+      MetadataTableUtil.updateTabletVolumes(extent, logsToRemove, logsToAdd, filesToRemove, filesToAdd, switchedDir, zooLock, SystemCredentials.get());
+
+    ret.dir = decommisionedTabletDir(zooLock, vm, extent, tabletDir);
+
+    // method this should return the exact strings that are in the metadata table
+    return ret;
+
+  }
+
+  public static String decommisionedTabletDir(ZooLock zooLock, VolumeManager vm, KeyExtent extent, String metaDir) throws IOException {
+    Path dir = new Path(metaDir);
+    if (isActiveVolume(dir))
+      return metaDir;
+
+    if (!dir.getParent().getParent().getName().equals(ServerConstants.TABLE_DIR)) {
+      throw new IllegalArgumentException("Unexpected table dir " + dir);
+    }
+
+    Path newDir = new Path(vm.choose(ServerConstants.getTablesDirs()) + "/" + dir.getParent().getName() + "/" + dir.getName());
+
+    log.info("Updating directory for " + extent + " from " + dir + " to " + newDir);
+    if (extent.isRootTablet()) {
+      // the root tablet is special case, its files need to be copied if its dir is changed
+
+      // this code needs to be idempotent
+
+      FileSystem fs1 = vm.getFileSystemByPath(dir);
+      FileSystem fs2 = vm.getFileSystemByPath(newDir);
+
+      if (!same(fs1, dir, fs2, newDir)) {
+        if (fs2.exists(newDir)) {
+          Path newDirBackup = getBackupName(fs2, newDir);
+          // never delete anything because were dealing with the root tablet
+          // one reason this dir may exist is because this method failed previously
+          log.info("renaming " + newDir + " to " + newDirBackup);
+          if (!fs2.rename(newDir, newDirBackup)) {
+            throw new IOException("Failed to rename " + newDir + " to " + newDirBackup);
+          }
+        }
+
+        // do a lot of logging since this is the root tablet
+        log.info("copying " + dir + " to " + newDir);
+        if (!FileUtil.copy(fs1, dir, fs2, newDir, false, CachedConfiguration.getInstance())) {
+          throw new IOException("Failed to copy " + dir + " to " + newDir);
+        }
+
+        // only set the new location in zookeeper after a successful copy
+        log.info("setting root tablet location to " + newDir);
+        MetadataTableUtil.setRootTabletDir(newDir.toString());
+
+        // rename the old dir to avoid confusion when someone looks at filesystem... its ok if we fail here and this does not happen because the location in
+        // zookeeper is the authority
+        Path dirBackup = getBackupName(fs1, dir);
+        log.info("renaming " + dir + " to " + dirBackup);
+        fs1.rename(dir, dirBackup);
+
+      } else {
+        log.info("setting root tablet location to " + newDir);
+        MetadataTableUtil.setRootTabletDir(newDir.toString());
+      }
+
+      return newDir.toString();
+    } else {
+      MetadataTableUtil.updateTabletDir(extent, newDir.toString(), SystemCredentials.get(), zooLock);
+      return newDir.toString();
+    }
+  }
+
+  static boolean same(FileSystem fs1, Path dir, FileSystem fs2, Path newDir) throws FileNotFoundException, IOException {
+    // its possible that a user changes config in such a way that two uris point to the same thing. Like hdfs://foo/a/b and hdfs://1.2.3.4/a/b both reference
+    // the same thing because DNS resolves foo to 1.2.3.4. This method does not analyze uris to determine if equivalent, instead it inspects the contents of
+    // what the uris point to.
+
+    // this code is called infrequently and does not need to be optimized.
+
+    if (fs1.exists(dir) && fs2.exists(newDir)) {
+
+      if (!fs1.isDirectory(dir))
+        throw new IllegalArgumentException("expected " + dir + " to be a directory");
+
+      if (!fs2.isDirectory(newDir))
+        throw new IllegalArgumentException("expected " + newDir + " to be a directory");
+
+      HashSet<String> names1 = getFileNames(fs1.listStatus(dir));
+      HashSet<String> names2 = getFileNames(fs2.listStatus(newDir));
+
+      if (names1.equals(names2)) {
+        for (String name : names1)
+          if (!hash(fs1, dir, name).equals(hash(fs2, newDir, name)))
+            return false;
+        return true;
+      }
+
+    }
+    return false;
+  }
+
+  @SuppressWarnings("deprecation")
+  private static HashSet<String> getFileNames(FileStatus[] filesStatuses) {
+    HashSet<String> names = new HashSet<String>();
+    for (FileStatus fileStatus : filesStatuses)
+      if (fileStatus.isDir())
+        throw new IllegalArgumentException("expected " + fileStatus.getPath() + " to be a file");
+      else
+        names.add(fileStatus.getPath().getName());
+    return names;
+  }
+
+  private static String hash(FileSystem fs, Path dir, String name) throws IOException {
+    FSDataInputStream in = fs.open(new Path(dir, name));
+    try {
+      return DigestUtils.sha1Hex(in);
+    } finally {
+      in.close();
+    }
+
+  }
+
+  private static Path getBackupName(FileSystem fs, Path path) {
+    SecureRandom rand = new SecureRandom();
+    return new Path(path.getParent(), path.getName() + "_" + System.currentTimeMillis() + "_" + Math.abs(rand.nextInt()) + ".bak");
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 25defe8..a7e858c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -150,7 +150,7 @@ public class Initialize {
     else
       fsUri = FileSystem.getDefaultUri(conf).toString();
     log.info("Hadoop Filesystem is " + fsUri);
-    log.info("Accumulo data dirs are " + Arrays.asList(ServerConstants.getConfiguredBaseDirs()));
+    log.info("Accumulo data dirs are " + Arrays.asList(ServerConstants.getConfiguredBaseDirs(ServerConfiguration.getSiteConfiguration())));
     log.info("Zookeeper server is " + sconf.get(Property.INSTANCE_ZK_HOST));
     log.info("Checking if Zookeeper is available. If this hangs, then you need to make sure zookeeper is running");
     if (!zookeeperAvailable()) {
@@ -172,7 +172,8 @@ public class Initialize {
     try {
       if (isInitialized(fs)) {
         String instanceDfsDir = sconf.get(Property.INSTANCE_DFS_DIR);
-        log.fatal("It appears the directories " + Arrays.asList(ServerConstants.getConfiguredBaseDirs()) + " were previously initialized.");
+        log.fatal("It appears the directories " + Arrays.asList(ServerConstants.getConfiguredBaseDirs(ServerConfiguration.getSiteConfiguration()))
+            + " were previously initialized.");
         String instanceVolumes = sconf.get(Property.INSTANCE_VOLUMES);
         String instanceDfsUri = sconf.get(Property.INSTANCE_DFS_URI);
 
@@ -218,7 +219,8 @@ public class Initialize {
 
     UUID uuid = UUID.randomUUID();
     // the actual disk locations of the root table and tablets
-    String[] configuredTableDirs = ServerConstants.prefix(ServerConstants.getConfiguredBaseDirs(), ServerConstants.TABLE_DIR);
+    String[] configuredTableDirs = ServerConstants.prefix(ServerConstants.getConfiguredBaseDirs(ServerConfiguration.getSiteConfiguration()),
+        ServerConstants.TABLE_DIR);
     final Path rootTablet = new Path(fs.choose(configuredTableDirs) + "/" + RootTable.ID + RootTable.ROOT_TABLET_LOCATION);
     try {
       initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTablet);
@@ -231,22 +233,20 @@ public class Initialize {
       initFileSystem(opts, fs, uuid, rootTablet);
     } catch (Exception e) {
       log.fatal("Failed to initialize filesystem", e);
-      for (FileSystem filesystem : fs.getFileSystems().values()) {
-        log.fatal("For FileSystem:" + filesystem.getUri());
-        
-        // Try to warn the user about what the actual problem is
-        Configuration fsConf = filesystem.getConf();
-        
+
+      if (ServerConfiguration.getSiteConfiguration().get(Property.INSTANCE_VOLUMES).trim().equals("")) {
+        Configuration fsConf = CachedConfiguration.getInstance();
+
         final String defaultFsUri = "file:///";
         String fsDefaultName = fsConf.get("fs.default.name", defaultFsUri), fsDefaultFS = fsConf.get("fs.defaultFS", defaultFsUri);
-        
+
         // Try to determine when we couldn't find an appropriate core-site.xml on the classpath
         if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
           log.fatal("Default filesystem value ('fs.defaultFS' or 'fs.default.name') was found in the Hadoop configuration");
           log.fatal("Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
         }
       }
-      
+
       return false;
     }
 
@@ -295,7 +295,7 @@ public class Initialize {
   private static void initFileSystem(Opts opts, VolumeManager fs, UUID uuid, Path rootTablet) throws IOException {
     FileStatus fstat;
 
-    initDirs(fs, uuid, ServerConstants.getConfiguredBaseDirs(), false);
+    initDirs(fs, uuid, ServerConstants.getConfiguredBaseDirs(ServerConfiguration.getSiteConfiguration()), false);
 
     // the actual disk locations of the metadata table and tablets
     final Path[] metadataTableDirs = paths(ServerConstants.getMetadataTableDirs());
@@ -425,7 +425,7 @@ public class Initialize {
     zoo.putPersistentData(instanceNamePath, uuid.getBytes(Constants.UTF8), NodeExistsPolicy.FAIL);
 
     final byte[] EMPTY_BYTE_ARRAY = new byte[0], ZERO_CHAR_ARRAY = new byte[] {'0'};
-    
+
     // setup the instance
     String zkInstanceRoot = Constants.ZROOT + "/" + uuid;
     zoo.putPersistentData(zkInstanceRoot, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
@@ -552,7 +552,7 @@ public class Initialize {
   }
 
   public static boolean isInitialized(VolumeManager fs) throws IOException {
-    for (String baseDir : ServerConstants.getConfiguredBaseDirs()) {
+    for (String baseDir : ServerConstants.getConfiguredBaseDirs(ServerConfiguration.getSiteConfiguration())) {
       if (fs.exists(new Path(baseDir, ServerConstants.INSTANCE_ID_DIR)) || fs.exists(new Path(baseDir, ServerConstants.VERSION_DIR)))
         return true;
     }
@@ -562,10 +562,11 @@ public class Initialize {
 
   private static void addVolumes(VolumeManager fs) throws IOException {
     HashSet<String> initializedDirs = new HashSet<String>();
-    initializedDirs.addAll(Arrays.asList(ServerConstants.checkBaseDirs(ServerConstants.getConfiguredBaseDirs(), true)));
+    initializedDirs
+        .addAll(Arrays.asList(ServerConstants.checkBaseDirs(ServerConstants.getConfiguredBaseDirs(ServerConfiguration.getSiteConfiguration()), true)));
 
     HashSet<String> uinitializedDirs = new HashSet<String>();
-    uinitializedDirs.addAll(Arrays.asList(ServerConstants.getConfiguredBaseDirs()));
+    uinitializedDirs.addAll(Arrays.asList(ServerConstants.getConfiguredBaseDirs(ServerConfiguration.getSiteConfiguration())));
     uinitializedDirs.removeAll(initializedDirs);
 
     Path aBasePath = new Path(initializedDirs.iterator().next());
@@ -604,8 +605,7 @@ public class Initialize {
       SecurityUtil.serverLogin();
       Configuration conf = CachedConfiguration.getInstance();
 
-      @SuppressWarnings("deprecation")
-      VolumeManager fs = VolumeManagerImpl.get(SiteConfiguration.getSiteConfiguration());
+      VolumeManager fs = VolumeManagerImpl.get(ServerConfiguration.getSiteConfiguration());
 
       if (opts.resetSecurity) {
         if (isInitialized(fs)) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
index 0006bf9..7edc0cf 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
@@ -27,6 +27,7 @@ import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.log4j.Logger;
 
@@ -62,7 +63,7 @@ public class HadoopLogCloser implements LogCloser {
         ns.append(source).close();
         log.info("Recovered lease on " + source.toString() + " using append");
       }
-    } else if (ns instanceof LocalFileSystem) {
+    } else if (ns instanceof LocalFileSystem || ns instanceof RawLocalFileSystem) {
       // ignore
     } else {
       throw new IllegalStateException("Don't know how to recover a lease for " + ns.getClass().getName());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
index 1e1bc79..224f786 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -104,6 +104,12 @@ public class Admin {
     boolean printAll = false;
   }
 
+  @Parameters(commandDescription = "Accumulo volume utility")
+  static class VolumesCommand {
+    @Parameter(names = {"-l", "--list"}, description = "list volumes currently in use")
+    boolean printErrors = false;
+  }
+
   @Parameters(commandDescription = "print out non-default configuration settings")
   static class DumpConfigCommand {
     @Parameter(names = {"-t", "--tables"}, description = "print per-table configuration")
@@ -137,6 +143,9 @@ public class Admin {
     DumpConfigCommand dumpConfigCommand = new DumpConfigCommand();
     cl.addCommand("dumpConfig", dumpConfigCommand);
 
+    VolumesCommand volumesCommand = new VolumesCommand();
+    cl.addCommand("volumes", volumesCommand);
+
     StopCommand stopOpts = new StopCommand();
     cl.addCommand("stop", stopOpts);
     StopAllCommand stopAllOpts = new StopAllCommand();
@@ -186,6 +195,8 @@ public class Admin {
         stopTabletServer(instance, new Credentials(principal, token), stopOpts.args, opts.force);
       } else if (cl.getParsedCommand().equals("dumpConfig")) {
         printConfig(instance, principal, token, dumpConfigCommand);
+      } else if (cl.getParsedCommand().equals("volumes")) {
+        ListVolumesUsed.listVolumes(instance, principal, token);
       } else {
         everything = cl.getParsedCommand().equals("stopAll");
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java b/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
new file mode 100644
index 0000000..b876392
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.util;
+
+import java.util.ArrayList;
+import java.util.Map.Entry;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.tabletserver.log.LogEntry;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.fs.VolumeManager.FileType;
+import org.apache.accumulo.server.security.SystemCredentials;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * 
+ */
+public class ListVolumesUsed {
+
+
+
+  public static void main(String[] args) throws Exception {
+    listVolumes(HdfsZooInstance.getInstance(), SystemCredentials.get().getPrincipal(), SystemCredentials.get().getToken());
+  }
+
+  private static String getTableURI(String rootTabletDir) {
+    Path ret = FileType.TABLE.getVolume(new Path(rootTabletDir));
+    if (ret == null)
+      return "RELATIVE";
+    return ret.toString();
+  }
+
+  private static String getLogURI(String logEntry) {
+    Path ret = FileType.WAL.getVolume(new Path(logEntry));
+    if (ret == null)
+      return "RELATIVE";
+    return ret.toString();
+  }
+
+  private static void getLogURIs(TreeSet<String> volumes, LogEntry logEntry) {
+    volumes.add(getLogURI(logEntry.filename));
+    for (String logSet : logEntry.logSet) {
+      volumes.add(getLogURI(logSet));
+    }
+  }
+
+  private static void listZookeeper() throws Exception {
+    System.out.println("Listing volumes referenced in zookeeper");
+    TreeSet<String> volumes = new TreeSet<String>();
+
+    volumes.add(getTableURI(MetadataTableUtil.getRootTabletDir()));
+    ArrayList<LogEntry> result = new ArrayList<LogEntry>();
+    MetadataTableUtil.getRootLogEntries(result);
+    for (LogEntry logEntry : result) {
+      getLogURIs(volumes, logEntry);
+    }
+
+    for (String volume : volumes)
+      System.out.println("\tVolume : " + volume);
+
+  }
+
+
+
+  private static void listTable(String name, Connector conn) throws Exception {
+
+    System.out.println("Listing volumes referenced in " + name + " tablets section");
+
+    Scanner scanner = conn.createScanner(name, Authorizations.EMPTY);
+
+    scanner.setRange(MetadataSchema.TabletsSection.getRange());
+    scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+    scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LogColumnFamily.NAME);
+    MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+
+    TreeSet<String> volumes = new TreeSet<String>();
+
+    for (Entry<Key,Value> entry : scanner) {
+      if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
+        volumes.add(getTableURI(entry.getKey().getColumnQualifier().toString()));
+      } else if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.LogColumnFamily.NAME)) {
+        LogEntry le = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
+        getLogURIs(volumes, le);
+      } else if (MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
+        volumes.add(getTableURI(entry.getValue().toString()));
+      }
+    }
+
+    for (String volume : volumes)
+      System.out.println("\tVolume : " + volume);
+
+    volumes.clear();
+
+    scanner.clearColumns();
+    scanner.setRange(MetadataSchema.DeletesSection.getRange());
+
+    for (Entry<Key,Value> entry : scanner) {
+      String delPath = entry.getKey().getRow().toString().substring(MetadataSchema.DeletesSection.getRowPrefix().length());
+      volumes.add(getTableURI(delPath));
+    }
+
+    System.out.println("Listing volumes referenced in " + name + " deletes section (volume replacement occurrs at deletion time)");
+
+    for (String volume : volumes)
+      System.out.println("\tVolume : " + volume);
+  }
+
+  public static void listVolumes(Instance instance, String principal, AuthenticationToken token) throws Exception {
+    Connector conn = instance.getConnector(principal, token);
+    listZookeeper();
+    System.out.println();
+    listTable(RootTable.NAME, conn);
+    System.out.println();
+    listTable(MetadataTable.NAME, conn);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index 8b8a3d6..d49f9bc 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -61,6 +61,7 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Cl
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.Credentials;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
@@ -179,7 +180,7 @@ public class MetadataTableUtil {
 
   public static void updateTabletDir(KeyExtent extent, String newDir, Credentials creds, ZooLock lock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
-    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes()));
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes(Constants.UTF8)));
     update(creds, lock, m, extent);
   }
 
@@ -197,6 +198,43 @@ public class MetadataTableUtil {
     update(credentials, m, extent);
   }
 
+  public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove, List<LogEntry> logsToAdd, List<FileRef> filesToRemove,
+      SortedMap<FileRef,DataFileValue> filesToAdd, String newDir, ZooLock zooLock, Credentials credentials) {
+
+    if (extent.isRootTablet()) {
+      if (newDir != null)
+        throw new IllegalArgumentException("newDir not expected for " + extent);
+
+      if (filesToRemove.size() != 0 || filesToAdd.size() != 0)
+        throw new IllegalArgumentException("files not expected for " + extent);
+
+      // add before removing in case of process death
+      for (LogEntry logEntry : logsToAdd)
+        addLogEntry(credentials, logEntry, zooLock);
+
+      removeUnusedWALEntries(extent, logsToRemove, zooLock);
+    } else {
+      Mutation m = new Mutation(extent.getMetadataEntry());
+
+      for (LogEntry logEntry : logsToRemove)
+        m.putDelete(logEntry.getColumnFamily(), logEntry.getColumnQualifier());
+
+      for (LogEntry logEntry : logsToAdd)
+        m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
+
+      for (FileRef fileRef : filesToRemove)
+        m.putDelete(DataFileColumnFamily.NAME, fileRef.meta());
+
+      for (Entry<FileRef,DataFileValue> entry : filesToAdd.entrySet())
+        m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
+
+      if (newDir != null)
+        ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes(Constants.UTF8)));
+
+      update(credentials, m, extent);
+    }
+  }
+
   public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, Credentials credentials) throws IOException {
     TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();
 
@@ -529,7 +567,7 @@ public class MetadataTableUtil {
     return result;
   }
 
-  private static void getRootLogEntries(ArrayList<LogEntry> result) throws KeeperException, InterruptedException, IOException {
+  static void getRootLogEntries(ArrayList<LogEntry> result) throws KeeperException, InterruptedException, IOException {
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
     String root = getZookeeperLogLocation();
     // there's a little race between getting the children and fetching

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/test/java/org/apache/accumulo/server/fs/FileTypeTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/fs/FileTypeTest.java b/server/base/src/test/java/org/apache/accumulo/server/fs/FileTypeTest.java
new file mode 100644
index 0000000..205a793
--- /dev/null
+++ b/server/base/src/test/java/org/apache/accumulo/server/fs/FileTypeTest.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.fs;
+
+import org.apache.accumulo.server.fs.VolumeManager.FileType;
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * 
+ */
+public class FileTypeTest {
+  @Test
+  public void testVolumeExtraction() {
+    Assert.assertEquals(new Path("file:/a"), FileType.TABLE.getVolume(new Path("file:/a/accumulo/tables/2b/t-001/C00.rf")));
+    Assert.assertEquals(new Path("file:///a"), FileType.TABLE.getVolume(new Path("file:/a/accumulo/tables/2b/t-001/C00.rf")));
+    Assert.assertEquals(new Path("file:///a"), FileType.TABLE.getVolume(new Path("file:///a/accumulo/tables/2b/t-001/C00.rf")));
+    Assert.assertEquals(new Path("file:/a"), FileType.TABLE.getVolume(new Path("file:///a/accumulo/tables/2b/t-001/C00.rf")));
+
+    Assert.assertEquals(new Path("accumulo/tables/2b/t-001/C00.rf"), FileType.TABLE.removeVolume(new Path("file:/a/accumulo/tables/2b/t-001/C00.rf")));
+    Assert.assertEquals(new Path("accumulo/tables/2b/t-001/C00.rf"), FileType.TABLE.removeVolume(new Path("file:///a/accumulo/tables/2b/t-001/C00.rf")));
+
+    Assert.assertEquals(new Path("file:/"), FileType.TABLE.getVolume(new Path("file:/accumulo/tables/2b/t-001/C00.rf")));
+    Assert.assertEquals(new Path("file:/"), FileType.TABLE.getVolume(new Path("file:///accumulo/tables/2b/t-001/C00.rf")));
+
+    Assert.assertEquals(new Path("file:/a"), FileType.WAL.getVolume(new Path("file:/a/accumulo/wal/1.2.3.4/aaa-bbb-ccc-ddd")));
+
+    Assert.assertNull(FileType.WAL.getVolume(new Path("1.2.3.4/aaa-bbb-ccc-ddd")));
+    Assert.assertNull(FileType.TABLE.getVolume(new Path("../2b/t-001/C00.rf")));
+    Assert.assertNull(FileType.TABLE.getVolume(new Path("/t-001/C00.rf")));
+
+    Assert.assertEquals(new Path("hdfs://nn1/"), FileType.TABLE.getVolume(new Path("hdfs://nn1/accumulo/tables/2b/t-001/C00.rf")));
+    Assert.assertEquals(new Path("hdfs://nn1/a/"), FileType.TABLE.getVolume(new Path("hdfs://nn1/a/accumulo/tables/2b/t-001/C00.rf")));
+
+    Assert.assertEquals(new Path("accumulo/tables/2b/t-001/C00.rf"), FileType.TABLE.removeVolume(new Path("hdfs://nn1/accumulo/tables/2b/t-001/C00.rf")));
+    Assert.assertEquals(new Path("accumulo/tables/2b/t-001/C00.rf"), FileType.TABLE.removeVolume(new Path("hdfs://nn1/a/accumulo/tables/2b/t-001/C00.rf")));
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/2ef2d885/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java b/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java
new file mode 100644
index 0000000..c85be45
--- /dev/null
+++ b/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.fs;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.server.fs.VolumeManager.FileType;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+/**
+ * 
+ */
+public class VolumeUtilTest {
+
+  @Rule
+  public TemporaryFolder tempFolder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
+
+  @Test
+  public void testSwitchVolume() {
+    List<Pair<Path,Path>> replacements = new ArrayList<Pair<Path,Path>>();
+    replacements.add(new Pair<Path,Path>(new Path("hdfs://nn1"), new Path("viewfs:/a")));
+    replacements.add(new Pair<Path,Path>(new Path("hdfs://nn1:9000/"), new Path("viewfs:/a")));
+    replacements.add(new Pair<Path,Path>(new Path("hdfs://nn2/"), new Path("viewfs:/b")));
+
+    Assert.assertEquals("viewfs:/a/accumulo/tables/t-00000/C000.rf",
+        VolumeUtil.switchVolume("hdfs://nn1/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+    Assert.assertEquals("viewfs:/a/accumulo/tables/t-00000/C000.rf",
+        VolumeUtil.switchVolume("hdfs://nn1:9000/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+    Assert.assertEquals("viewfs:/b/accumulo/tables/t-00000/C000.rf",
+        VolumeUtil.switchVolume("hdfs://nn2/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+    Assert.assertNull(VolumeUtil.switchVolume("viewfs:/a/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+    Assert.assertNull(VolumeUtil.switchVolume("file:/nn1/a/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+
+    replacements.clear();
+    replacements.add(new Pair<Path,Path>(new Path("hdfs://nn1/d1"), new Path("viewfs:/a")));
+    replacements.add(new Pair<Path,Path>(new Path("hdfs://nn1:9000/d1"), new Path("viewfs:/a")));
+    replacements.add(new Pair<Path,Path>(new Path("hdfs://nn2/d2/"), new Path("viewfs:/b")));
+
+    Assert.assertEquals("viewfs:/a/accumulo/tables/t-00000/C000.rf",
+        VolumeUtil.switchVolume("hdfs://nn1/d1/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+    Assert.assertEquals("viewfs:/a/accumulo/tables/t-00000/C000.rf",
+        VolumeUtil.switchVolume("hdfs://nn1:9000/d1/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+    Assert.assertEquals("viewfs:/b/accumulo/tables/t-00000/C000.rf",
+        VolumeUtil.switchVolume("hdfs://nn2/d2/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+    Assert.assertNull(VolumeUtil.switchVolume("viewfs:/a/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+    Assert.assertNull(VolumeUtil.switchVolume("file:/nn1/a/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+    Assert.assertNull(VolumeUtil.switchVolume("hdfs://nn1/accumulo/tables/t-00000/C000.rf", FileType.TABLE, replacements));
+  }
+
+  @Test
+  public void testSame() throws Exception {
+    FileSystem fs = FileSystem.getLocal(new Configuration());
+
+    Path subdir1 = new Path(tempFolder.newFolder().toURI());
+    Path subdir2 = new Path(tempFolder.newFolder().toURI());
+    Path subdir3 = new Path(tempFolder.newFolder().toURI());
+
+    Assert.assertFalse(VolumeUtil.same(fs, subdir1, fs, new Path(tempFolder.getRoot().toURI().toString(), "8854339269459287524098238497")));
+    Assert.assertFalse(VolumeUtil.same(fs, new Path(tempFolder.getRoot().toURI().toString(), "8854339269459287524098238497"), fs, subdir1));
+    Assert.assertTrue(VolumeUtil.same(fs, subdir1, fs, subdir1));
+
+    writeFile(fs, subdir1, "abc", "foo");
+    writeFile(fs, subdir2, "abc", "bar");
+    writeFile(fs, subdir3, "abc", "foo");
+
+    Assert.assertTrue(VolumeUtil.same(fs, subdir1, fs, subdir1));
+    Assert.assertFalse(VolumeUtil.same(fs, subdir1, fs, subdir2));
+    Assert.assertFalse(VolumeUtil.same(fs, subdir2, fs, subdir1));
+    Assert.assertTrue(VolumeUtil.same(fs, subdir1, fs, subdir3));
+    Assert.assertTrue(VolumeUtil.same(fs, subdir3, fs, subdir1));
+
+    writeFile(fs, subdir1, "def", "123456");
+    writeFile(fs, subdir2, "def", "123456");
+    writeFile(fs, subdir3, "def", "123456");
+
+    Assert.assertTrue(VolumeUtil.same(fs, subdir1, fs, subdir1));
+    Assert.assertFalse(VolumeUtil.same(fs, subdir1, fs, subdir2));
+    Assert.assertFalse(VolumeUtil.same(fs, subdir2, fs, subdir1));
+    Assert.assertTrue(VolumeUtil.same(fs, subdir1, fs, subdir3));
+    Assert.assertTrue(VolumeUtil.same(fs, subdir3, fs, subdir1));
+
+    writeFile(fs, subdir3, "ghi", "09876");
+
+    Assert.assertFalse(VolumeUtil.same(fs, subdir1, fs, subdir3));
+    Assert.assertFalse(VolumeUtil.same(fs, subdir3, fs, subdir1));
+
+    fs.mkdirs(new Path(subdir2, "dir1"));
+
+    try {
+      VolumeUtil.same(fs, subdir1, fs, subdir2);
+      Assert.fail();
+    } catch (IllegalArgumentException e) {}
+
+    try {
+      VolumeUtil.same(fs, subdir2, fs, subdir1);
+      Assert.fail();
+    } catch (IllegalArgumentException e) {}
+
+    try {
+      VolumeUtil.same(fs, subdir1, fs, new Path(subdir2, "def"));
+      Assert.fail();
+    } catch (IllegalArgumentException e) {}
+
+    try {
+      VolumeUtil.same(fs, new Path(subdir2, "def"), fs, subdir3);
+      Assert.fail();
+    } catch (IllegalArgumentException e) {}
+
+  }
+
+  private void writeFile(FileSystem fs, Path dir, String filename, String data) throws IOException {
+    FSDataOutputStream out = fs.create(new Path(dir, filename));
+    try {
+      out.writeUTF(data);
+    } finally {
+      out.close();
+    }
+  }
+}


Mime
View raw message