accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [3/3] git commit: Merge branch '1.5.1-SNAPSHOT'
Date Thu, 17 Oct 2013 17:08:36 GMT
Merge branch '1.5.1-SNAPSHOT'

Conflicts:
	server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/ba8ad057
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/ba8ad057
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/ba8ad057

Branch: refs/heads/master
Commit: ba8ad057d70d90f13a75911b81650897b3889f29
Parents: a65fe6a 685cc4a
Author: Josh Elser <elserj@apache.org>
Authored: Thu Oct 17 13:08:11 2013 -0400
Committer: Josh Elser <elserj@apache.org>
Committed: Thu Oct 17 13:08:11 2013 -0400

----------------------------------------------------------------------
 .../server/tabletserver/TabletServer.java       | 60 ++++++++++++++++++++
 1 file changed, 60 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/ba8ad057/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --cc server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index a44b194,ea30694..4e06b6d
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@@ -213,9 -206,15 +215,14 @@@ import org.apache.accumulo.trace.instru
  import org.apache.accumulo.trace.instrument.thrift.TraceWrap;
  import org.apache.accumulo.trace.thrift.TInfo;
  import org.apache.commons.collections.map.LRUMap;
 -import org.apache.hadoop.fs.FSDataOutputStream;
 -import org.apache.hadoop.fs.FileStatus;
 +import org.apache.hadoop.fs.FSError;
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.fs.Trash;
+ import org.apache.hadoop.hdfs.DFSConfigKeys;
+ import org.apache.hadoop.hdfs.DistributedFileSystem;
+ import org.apache.hadoop.io.SequenceFile;
+ import org.apache.hadoop.io.SequenceFile.Reader;
  import org.apache.hadoop.io.Text;
  import org.apache.log4j.Logger;
  import org.apache.thrift.TException;
@@@ -3596,6 -3230,8 +3603,7 @@@ public class TabletServer extends Abstr
        Instance instance = HdfsZooInstance.getInstance();
        ServerConfiguration conf = new ServerConfiguration(instance);
        Accumulo.init(fs, conf, "tserver");
+       ensureHdfsSyncIsEnabled(fs);
 -      recoverLocalWriteAheadLogs(fs, conf);
        TabletServer server = new TabletServer(conf, fs);
        server.config(hostname);
        Accumulo.enableTracing(hostname, "tserver");
@@@ -3606,6 -3242,108 +3614,58 @@@
      }
    }
    
 -  private static void ensureHdfsSyncIsEnabled(FileSystem fs) {
 -    if (fs instanceof DistributedFileSystem) {
 -      final String DFS_DURABLE_SYNC = "dfs.durable.sync", DFS_SUPPORT_APPEND = "dfs.support.append";
 -      final String ticketMessage = "See ACCUMULO-623 and ACCUMULO-1637 for more details.";
 -      // Check to make sure that we have proper defaults configured
 -      try {
 -        // If the default is off (0.20.205.x or 1.0.x)
 -        DFSConfigKeys configKeys = new DFSConfigKeys();
 -        
 -        // Can't use the final constant itself as Java will inline it at compile time
 -        Field dfsSupportAppendDefaultField = configKeys.getClass().getField("DFS_SUPPORT_APPEND_DEFAULT");
 -        boolean dfsSupportAppendDefaultValue = dfsSupportAppendDefaultField.getBoolean(configKeys);
 -        
 -        if (!dfsSupportAppendDefaultValue) {
 -          // See if the user did the correct override
 -          if (!fs.getConf().getBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, false)) {
 -            log.fatal("Accumulo requires that dfs.support.append to true. " + ticketMessage);
 -            System.exit(-1);
++  private static void ensureHdfsSyncIsEnabled(VolumeManager volumes) {
++    for (Entry<String,? extends FileSystem> entry : volumes.getFileSystems().entrySet())
{
++      final String volumeName = entry.getKey();
++      final FileSystem fs = entry.getValue();
++      
++      if (fs instanceof DistributedFileSystem) {
++        final String DFS_DURABLE_SYNC = "dfs.durable.sync", DFS_SUPPORT_APPEND = "dfs.support.append";
++        final String ticketMessage = "See ACCUMULO-623 and ACCUMULO-1637 for more details.";
++        // Check to make sure that we have proper defaults configured
++        try {
++          // If the default is off (0.20.205.x or 1.0.x)
++          DFSConfigKeys configKeys = new DFSConfigKeys();
++          
++          // Can't use the final constant itself as Java will inline it at compile time
++          Field dfsSupportAppendDefaultField = configKeys.getClass().getField("DFS_SUPPORT_APPEND_DEFAULT");
++          boolean dfsSupportAppendDefaultValue = dfsSupportAppendDefaultField.getBoolean(configKeys);
++          
++          if (!dfsSupportAppendDefaultValue) {
++            // See if the user did the correct override
++            if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, false)) {
++              log.fatal("Accumulo requires that dfs.support.append to true on volume " +
volumeName + ". " + ticketMessage);
++              System.exit(-1);
++            }
+           }
++        } catch (NoSuchFieldException e) {
++          // If we can't find DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT, the user is running
++          // 1.1.x or 1.2.x. This is ok, though, as, by default, these versions have append/sync
enabled.
++        } catch (Exception e) {
++          log.warn("Error while checking for " + DFS_SUPPORT_APPEND + " on volume " + volumeName
+ ". The user should ensure that Hadoop is configured to properly supports append and sync.
" + ticketMessage, e);
+         }
 -      } catch (NoSuchFieldException e) {
 -        // If we can't find DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT, the user is running
 -        // 1.1.x or 1.2.x. This is ok, though, as, by default, these versions have append/sync
enabled.
 -      } catch (Exception e) {
 -        log.warn("Error while checking for " + DFS_SUPPORT_APPEND + ". The user should ensure
that Hadoop is configured to properly supports append and sync. " + ticketMessage, e);
 -      }
 -      
 -      // If either of these parameters are configured to be false, fail.
 -      // This is a sign that someone is writing bad configuration.
 -      if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, true) || !fs.getConf().getBoolean(DFS_DURABLE_SYNC,
true)) {
 -        log.fatal("Accumulo requires that " + DFS_SUPPORT_APPEND + " and " + DFS_DURABLE_SYNC
+ " not be configured as false. " + ticketMessage);
 -        System.exit(-1);
 -      }
 -      
 -      try {
 -        // if this class exists
 -        Class.forName("org.apache.hadoop.fs.CreateFlag");
 -        // we're running hadoop 2.0, 1.1
 -        if (!fs.getConf().getBoolean("dfs.datanode.synconclose", false)) {
 -          log.warn("dfs.datanode.synconclose set to false: data loss is possible on system
reset or power loss");
++        
++        // If either of these parameters are configured to be false, fail.
++        // This is a sign that someone is writing bad configuration.
++        if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, true) || !fs.getConf().getBoolean(DFS_DURABLE_SYNC,
true)) {
++          log.fatal("Accumulo requires that " + DFS_SUPPORT_APPEND + " and " + DFS_DURABLE_SYNC
+ " not be configured as false on volume " + volumeName + ". " + ticketMessage);
++          System.exit(-1);
+         }
 -      } catch (ClassNotFoundException ex) {
 -        // hadoop 1.0
 -      }
 -    }
 -    
 -  }
 -  
 -  /**
 -   * Copy local walogs into HDFS on an upgrade
 -   * 
 -   */
 -  public static void recoverLocalWriteAheadLogs(FileSystem fs, ServerConfiguration serverConf)
throws IOException {
 -    FileSystem localfs = FileSystem.getLocal(fs.getConf()).getRawFileSystem();
 -    AccumuloConfiguration conf = serverConf.getConfiguration();
 -    String localWalDirectories = conf.get(Property.LOGGER_DIR);
 -    for (String localWalDirectory : localWalDirectories.split(",")) {
 -      if (!localWalDirectory.startsWith("/")) {
 -        localWalDirectory = System.getenv("ACCUMULO_HOME") + "/" + localWalDirectory;
 -      }
 -      
 -      FileStatus status = null;
 -      try {
 -        status = localfs.getFileStatus(new Path(localWalDirectory));
 -      } catch (FileNotFoundException fne) {}
 -      
 -      if (status == null || !status.isDir()) {
 -        log.debug("Local walog dir " + localWalDirectory + " not found ");
 -        continue;
 -      }
 -      
 -      for (FileStatus file : localfs.listStatus(new Path(localWalDirectory))) {
 -        String name = file.getPath().getName();
++        
+         try {
 -          UUID.fromString(name);
 -        } catch (IllegalArgumentException ex) {
 -          log.info("Ignoring non-log file " + name + " in " + localWalDirectory);
 -          continue;
 -        }
 -        LogFileKey key = new LogFileKey();
 -        LogFileValue value = new LogFileValue();
 -        log.info("Openning local log " + file.getPath());
 -        Reader reader = new SequenceFile.Reader(localfs, file.getPath(), localfs.getConf());
 -        Path tmp = new Path(Constants.getWalDirectory(conf) + "/" + name + ".copy");
 -        FSDataOutputStream writer = fs.create(tmp);
 -        while (reader.next(key, value)) {
 -          try {
 -            key.write(writer);
 -            value.write(writer);
 -          } catch (EOFException ex) {
 -            break;
++          // if this class exists
++          Class.forName("org.apache.hadoop.fs.CreateFlag");
++          // we're running hadoop 2.0, 1.1
++          if (!fs.getConf().getBoolean("dfs.datanode.synconclose", false)) {
++            log.warn("dfs.datanode.synconclose set to false: data loss is possible on system
reset or power loss on volume " + volumeName);
+           }
++        } catch (ClassNotFoundException ex) {
++          // hadoop 1.0
+         }
 -        writer.close();
 -        reader.close();
 -        fs.rename(tmp, new Path(tmp.getParent(), name));
 -        log.info("Copied local log " + name);
 -        localfs.delete(new Path(localWalDirectory, name), true);
+       }
+     }
+   }
 -  
++
    public void minorCompactionFinished(CommitSession tablet, String newDatafile, int walogSeq)
throws IOException {
      totalMinorCompactions++;
      logger.minorCompactionFinished(tablet, newDatafile, walogSeq);


Mime
View raw message