accumulo-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From els...@apache.org
Subject [2/2] git commit: Merge branch '1.5.1-SNAPSHOT'
Date Fri, 18 Oct 2013 02:02:19 GMT
Merge branch '1.5.1-SNAPSHOT'

Conflicts:
	server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java

ACCUMULO-1637 Expanding the tests to account for the VolumeManager
changes in 1.6


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/e498ebb8
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/e498ebb8
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/e498ebb8

Branch: refs/heads/master
Commit: e498ebb802f7f07827b8f0c0a067f8536322b692
Parents: e26fc0b 0987628
Author: Josh Elser <elserj@apache.org>
Authored: Thu Oct 17 22:00:46 2013 -0400
Committer: Josh Elser <elserj@apache.org>
Committed: Thu Oct 17 22:00:46 2013 -0400

----------------------------------------------------------------------
 .../server/tabletserver/TabletServer.java       |  12 +-
 .../tabletserver/TabletServerSyncCheckTest.java | 283 +++++++++++++++++++
 2 files changed, 290 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/e498ebb8/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --cc server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index 9142299,eba96ae..79dd2b0
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@@ -3610,54 -3242,107 +3610,56 @@@ public class TabletServer extends Abstr
      }
    }
    
-   private static void ensureHdfsSyncIsEnabled(VolumeManager volumes) {
 -  protected static void ensureHdfsSyncIsEnabled(FileSystem fs) {
 -    if (fs instanceof DistributedFileSystem) {
 -      final String DFS_DURABLE_SYNC = "dfs.durable.sync", DFS_SUPPORT_APPEND = "dfs.support.append";
 -      final String ticketMessage = "See ACCUMULO-623 and ACCUMULO-1637 for more details.";
 -      // Check to make sure that we have proper defaults configured
 -      try {
 -        // If the default is off (0.20.205.x or 1.0.x)
 -        DFSConfigKeys configKeys = new DFSConfigKeys();
 -        
 -        // Can't use the final constant itself as Java will inline it at compile time
 -        Field dfsSupportAppendDefaultField = configKeys.getClass().getField("DFS_SUPPORT_APPEND_DEFAULT");
 -        boolean dfsSupportAppendDefaultValue = dfsSupportAppendDefaultField.getBoolean(configKeys);
 -        
 -        if (!dfsSupportAppendDefaultValue) {
 -          // See if the user did the correct override
 -          if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, false)) {
 -            String msg = "Accumulo requires that dfs.support.append to true. " + ticketMessage;
 -            log.fatal(msg);
 -            throw new RuntimeException(msg);
++  protected static void ensureHdfsSyncIsEnabled(VolumeManager volumes) {
 +    for (Entry<String,? extends FileSystem> entry : volumes.getFileSystems().entrySet())
{
 +      final String volumeName = entry.getKey();
 +      final FileSystem fs = entry.getValue();
 +      
 +      if (fs instanceof DistributedFileSystem) {
 +        final String DFS_DURABLE_SYNC = "dfs.durable.sync", DFS_SUPPORT_APPEND = "dfs.support.append";
 +        final String ticketMessage = "See ACCUMULO-623 and ACCUMULO-1637 for more details.";
 +        // Check to make sure that we have proper defaults configured
 +        try {
 +          // If the default is off (0.20.205.x or 1.0.x)
 +          DFSConfigKeys configKeys = new DFSConfigKeys();
 +          
 +          // Can't use the final constant itself as Java will inline it at compile time
 +          Field dfsSupportAppendDefaultField = configKeys.getClass().getField("DFS_SUPPORT_APPEND_DEFAULT");
 +          boolean dfsSupportAppendDefaultValue = dfsSupportAppendDefaultField.getBoolean(configKeys);
 +          
 +          if (!dfsSupportAppendDefaultValue) {
 +            // See if the user did the correct override
 +            if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, false)) {
-               log.fatal("Accumulo requires that dfs.support.append to true on volume " +
volumeName + ". " + ticketMessage);
-               System.exit(-1);
++              String msg = "Accumulo requires that dfs.support.append to true. " + ticketMessage;
++              log.fatal(msg);
++              throw new RuntimeException(msg);
 +            }
            }
 +        } catch (NoSuchFieldException e) {
 +          // If we can't find DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT, the user is running
 +          // 1.1.x or 1.2.x. This is ok, though, as, by default, these versions have append/sync
enabled.
 +        } catch (Exception e) {
 +          log.warn("Error while checking for " + DFS_SUPPORT_APPEND + " on volume " + volumeName
+ ". The user should ensure that Hadoop is configured to properly supports append and sync.
" + ticketMessage, e);
          }
 -      } catch (NoSuchFieldException e) {
 -        // If we can't find DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT, the user is running
 -        // 1.1.x or 1.2.x. This is ok, though, as, by default, these versions have append/sync
enabled.
 -      } catch (Exception e) {
 -        log.warn("Error while checking for " + DFS_SUPPORT_APPEND + ". The user should ensure
that Hadoop is configured to properly supports append and sync. "
 -            + ticketMessage, e);
 -      }
 -      
 -      // If either of these parameters are configured to be false, fail.
 -      // This is a sign that someone is writing bad configuration.
 -      if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, true) || !fs.getConf().getBoolean(DFS_DURABLE_SYNC,
true)) {
 -        String msg = "Accumulo requires that " + DFS_SUPPORT_APPEND + " and " + DFS_DURABLE_SYNC
+ " not be configured as false. " + ticketMessage;
 -        log.fatal(msg);
 -        throw new RuntimeException(msg);
 -      }
 -      
 -      try {
 -        // if this class exists
 -        Class.forName("org.apache.hadoop.fs.CreateFlag");
 -        // we're running hadoop 2.0, 1.1
 -        if (!fs.getConf().getBoolean("dfs.datanode.synconclose", false)) {
 -          log.warn("dfs.datanode.synconclose set to false: data loss is possible on system
reset or power loss");
 +        
 +        // If either of these parameters are configured to be false, fail.
 +        // This is a sign that someone is writing bad configuration.
 +        if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, true) || !fs.getConf().getBoolean(DFS_DURABLE_SYNC,
true)) {
-           log.fatal("Accumulo requires that " + DFS_SUPPORT_APPEND + " and " + DFS_DURABLE_SYNC
+ " not be configured as false on volume " + volumeName + ". " + ticketMessage);
-           System.exit(-1);
++          String msg = "Accumulo requires that " + DFS_SUPPORT_APPEND + " and " + DFS_DURABLE_SYNC
+ " not be configured as false. " + ticketMessage;
++          log.fatal(msg);
++          throw new RuntimeException(msg);
          }
 -      } catch (ClassNotFoundException ex) {
 -        // hadoop 1.0
 -      }
 -    }
 -    
 -  }
 -  
 -  /**
 -   * Copy local walogs into HDFS on an upgrade
 -   * 
 -   */
 -  public static void recoverLocalWriteAheadLogs(FileSystem fs, ServerConfiguration serverConf)
throws IOException {
 -    FileSystem localfs = FileSystem.getLocal(fs.getConf()).getRawFileSystem();
 -    AccumuloConfiguration conf = serverConf.getConfiguration();
 -    String localWalDirectories = conf.get(Property.LOGGER_DIR);
 -    for (String localWalDirectory : localWalDirectories.split(",")) {
 -      if (!localWalDirectory.startsWith("/")) {
 -        localWalDirectory = System.getenv("ACCUMULO_HOME") + "/" + localWalDirectory;
 -      }
 -      
 -      FileStatus status = null;
 -      try {
 -        status = localfs.getFileStatus(new Path(localWalDirectory));
 -      } catch (FileNotFoundException fne) {}
 -      
 -      if (status == null || !status.isDir()) {
 -        log.debug("Local walog dir " + localWalDirectory + " not found ");
 -        continue;
 -      }
 -      
 -      for (FileStatus file : localfs.listStatus(new Path(localWalDirectory))) {
 -        String name = file.getPath().getName();
 +        
          try {
 -          UUID.fromString(name);
 -        } catch (IllegalArgumentException ex) {
 -          log.info("Ignoring non-log file " + name + " in " + localWalDirectory);
 -          continue;
 -        }
 -        LogFileKey key = new LogFileKey();
 -        LogFileValue value = new LogFileValue();
 -        log.info("Openning local log " + file.getPath());
 -        Reader reader = new SequenceFile.Reader(localfs, file.getPath(), localfs.getConf());
 -        Path tmp = new Path(Constants.getWalDirectory(conf) + "/" + name + ".copy");
 -        FSDataOutputStream writer = fs.create(tmp);
 -        while (reader.next(key, value)) {
 -          try {
 -            key.write(writer);
 -            value.write(writer);
 -          } catch (EOFException ex) {
 -            break;
 +          // if this class exists
 +          Class.forName("org.apache.hadoop.fs.CreateFlag");
 +          // we're running hadoop 2.0, 1.1
 +          if (!fs.getConf().getBoolean("dfs.datanode.synconclose", false)) {
 +            log.warn("dfs.datanode.synconclose set to false: data loss is possible on system
reset or power loss on volume " + volumeName);
            }
 +        } catch (ClassNotFoundException ex) {
 +          // hadoop 1.0
          }
 -        writer.close();
 -        reader.close();
 -        fs.rename(tmp, new Path(tmp.getParent(), name));
 -        log.info("Copied local log " + name);
 -        localfs.delete(new Path(localWalDirectory, name), true);
        }
      }
    }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e498ebb8/server/src/test/java/org/apache/accumulo/server/tabletserver/TabletServerSyncCheckTest.java
----------------------------------------------------------------------
diff --cc server/src/test/java/org/apache/accumulo/server/tabletserver/TabletServerSyncCheckTest.java
index 0000000,6443b7e..43a3ade
mode 000000,100644..100644
--- a/server/src/test/java/org/apache/accumulo/server/tabletserver/TabletServerSyncCheckTest.java
+++ b/server/src/test/java/org/apache/accumulo/server/tabletserver/TabletServerSyncCheckTest.java
@@@ -1,0 -1,105 +1,283 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.accumulo.server.tabletserver;
+ 
++import java.io.IOException;
++import java.util.Map;
++
++import org.apache.accumulo.core.data.Key;
++import org.apache.accumulo.server.fs.VolumeManager;
+ import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.ContentSummary;
++import org.apache.hadoop.fs.FSDataInputStream;
++import org.apache.hadoop.fs.FSDataOutputStream;
++import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.FileSystem;
++import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hdfs.DistributedFileSystem;
+ import org.junit.Test;
+ 
++import com.google.common.collect.ImmutableMap;
++
+ public class TabletServerSyncCheckTest {
+   private static final String DFS_DURABLE_SYNC = "dfs.durable.sync", DFS_SUPPORT_APPEND
= "dfs.support.append";
+   
+   @Test(expected = RuntimeException.class)
+   public void testFailureOnExplicitSyncFalseConf() {
+     Configuration conf = new Configuration();
+     conf.set(DFS_DURABLE_SYNC, "false");
+     
+     FileSystem fs = new TestFileSystem(conf);
++    TestVolumeManagerImpl vm = new TestVolumeManagerImpl(ImmutableMap.of("foo", fs));
+     
 -    TabletServer.ensureHdfsSyncIsEnabled(fs);
++    TabletServer.ensureHdfsSyncIsEnabled(vm);
+   }
+   
+   @Test(expected = RuntimeException.class)
 -  public void testFailureOnExplicitAppendFalseConf() {
 -    Configuration conf = new Configuration();
 -    conf.set(DFS_SUPPORT_APPEND, "false");
++  public void testFailureOnSingleExplicitSyncFalseConf() {
++    Configuration conf1 = new Configuration(), conf2 = new Configuration();
++    conf1.set(DFS_DURABLE_SYNC, "false");
+     
 -    FileSystem fs = new TestFileSystem(conf);
++    FileSystem fs1 = new TestFileSystem(conf1);
++    FileSystem fs2 = new TestFileSystem(conf2);
++    TestVolumeManagerImpl vm = new TestVolumeManagerImpl(ImmutableMap.of("bar", fs2, "foo",
fs1));
+     
 -    TabletServer.ensureHdfsSyncIsEnabled(fs);
++    TabletServer.ensureHdfsSyncIsEnabled(vm);
+   }
+   
+   @Test(expected = RuntimeException.class)
 -  public void testFailureOnExplicitSyncAndAppendFalseConf() {
++  public void testFailureOnExplicitAppendFalseConf() {
+     Configuration conf = new Configuration();
+     conf.set(DFS_SUPPORT_APPEND, "false");
 -    conf.set(DFS_DURABLE_SYNC, "false");
+     
+     FileSystem fs = new TestFileSystem(conf);
++    TestVolumeManagerImpl vm = new TestVolumeManagerImpl(ImmutableMap.of("foo", fs));
+     
 -    TabletServer.ensureHdfsSyncIsEnabled(fs);
++    TabletServer.ensureHdfsSyncIsEnabled(vm);
+   }
+   
+   @Test(expected = RuntimeException.class)
 -  public void testDefaultHadoopAction() {
 -    // We currently depend on Hadoop-1.0.4 in this branch
 -    // so this test should throw an exception by default
++  public void testFailureOnExplicitSyncAndAppendFalseConf() {
+     Configuration conf = new Configuration();
++    conf.set(DFS_SUPPORT_APPEND, "false");
++    conf.set(DFS_DURABLE_SYNC, "false");
+     
+     FileSystem fs = new TestFileSystem(conf);
++    TestVolumeManagerImpl vm = new TestVolumeManagerImpl(ImmutableMap.of("foo", fs));
+     
 -    TabletServer.ensureHdfsSyncIsEnabled(fs);
++    TabletServer.ensureHdfsSyncIsEnabled(vm);
+   }
+   
 -  @Test(expected = RuntimeException.class)
 -  public void testMissingNecessaryConfiguration() {
 -    // We currently depend on Hadoop-1.0.4 in this branch
 -    // so this test should throw an exception by default
++  @Test
++  public void testDefaultHadoopAction() {
++    // We currently depend on Hadoop-2.0.5 in this branch
++    // so this test should not throw an exception by default
+     Configuration conf = new Configuration();
+     
+     FileSystem fs = new TestFileSystem(conf);
++    TestVolumeManagerImpl vm = new TestVolumeManagerImpl(ImmutableMap.of("foo", fs));
+     
 -    TabletServer.ensureHdfsSyncIsEnabled(fs);
++    TabletServer.ensureHdfsSyncIsEnabled(vm);
+   }
+   
+   @Test
+   public void testNecessaryConfiguration() {
 -    // We currently depend on Hadoop-1.0.4 in this branch
++    // We currently depend on Hadoop-2.0.5 in this branch
+     // By providing the override, we should not throw an exception
+     Configuration conf = new Configuration();
+     conf.set(DFS_SUPPORT_APPEND, "true");
+     
+     FileSystem fs = new TestFileSystem(conf);
++    TestVolumeManagerImpl vm = new TestVolumeManagerImpl(ImmutableMap.of("foo", fs));
+     
 -    TabletServer.ensureHdfsSyncIsEnabled(fs);
++    TabletServer.ensureHdfsSyncIsEnabled(vm);
+   }
+   
+   private class TestFileSystem extends DistributedFileSystem {
+     protected final Configuration conf;
+     
+     public TestFileSystem(Configuration conf) {
+       this.conf = conf;
+     }
+     
+     @Override
+     public Configuration getConf() {
+       return conf;
+     }
+     
+   }
++  
++  private class TestVolumeManagerImpl implements VolumeManager {
++    
++    protected final Map<String,? extends FileSystem> volumes;
++
++    public TestVolumeManagerImpl(Map<String,? extends FileSystem> volumes) {
++      this.volumes = volumes;
++    }
++
++    @Override
++    public void close() throws IOException {
++      
++    }
++
++    @Override
++    public boolean closePossiblyOpenFile(Path path) throws IOException {
++      return false;
++    }
++
++    @Override
++    public FSDataOutputStream create(Path dest) throws IOException {
++      return null;
++    }
++
++    @Override
++    public FSDataOutputStream create(Path path, boolean b) throws IOException {
++      return null;
++    }
++
++    @Override
++    public FSDataOutputStream create(Path path, boolean b, int int1, short int2, long long1)
throws IOException {
++      return null;
++    }
++
++    @Override
++    public boolean createNewFile(Path writable) throws IOException {
++      return false;
++    }
++
++    @Override
++    public FSDataOutputStream createSyncable(Path logPath, int buffersize, short replication,
long blockSize) throws IOException {
++      return null;
++    }
++
++    @Override
++    public boolean delete(Path path) throws IOException {
++      return false;
++    }
++
++    @Override
++    public boolean deleteRecursively(Path path) throws IOException {
++      return false;
++    }
++
++    @Override
++    public boolean exists(Path path) throws IOException {
++      return false;
++    }
++
++    @Override
++    public FileStatus getFileStatus(Path path) throws IOException {
++      return null;
++    }
++
++    @Override
++    public FileSystem getFileSystemByPath(Path path) {
++      return null;
++    }
++
++    @Override
++    public Map<String,? extends FileSystem> getFileSystems() {
++      return volumes;
++    }
++
++    @Override
++    public Path matchingFileSystem(Path source, String[] options) {
++      return null;
++    }
++
++    @Override
++    public String newPathOnSameVolume(String sourceDir, String suffix) {
++      return null;
++    }
++
++    @Override
++    public FileStatus[] listStatus(Path path) throws IOException {
++      return null;
++    }
++
++    @Override
++    public boolean mkdirs(Path directory) throws IOException {
++      return false;
++    }
++
++    @Override
++    public FSDataInputStream open(Path path) throws IOException {
++      return null;
++    }
++
++    @Override
++    public boolean rename(Path path, Path newPath) throws IOException {
++      return false;
++    }
++
++    @Override
++    public boolean moveToTrash(Path sourcePath) throws IOException {
++      return false;
++    }
++
++    @Override
++    public short getDefaultReplication(Path logPath) {
++      return 0;
++    }
++
++    @Override
++    public boolean isFile(Path path) throws IOException {
++      return false;
++    }
++
++    @Override
++    public boolean isReady() throws IOException {
++      return false;
++    }
++
++    @Override
++    public FileSystem getDefaultVolume() {
++      return null;
++    }
++
++    @Override
++    public FileStatus[] globStatus(Path path) throws IOException {
++      return null;
++    }
++
++    @Override
++    public Path getFullPath(Key key) {
++      return null;
++    }
++
++    @Override
++    public Path getFullPath(String tableId, String path) {
++      return null;
++    }
++
++    @Override
++    public Path getFullPath(FileType fileType, String fileName) throws IOException {
++      return null;
++    }
++
++    @Override
++    public ContentSummary getContentSummary(Path dir) throws IOException {
++      return null;
++    }
++
++    @Override
++    public String choose(String[] options) {
++      return null;
++    }
++    
++  }
+ }


Mime
View raw message