hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject svn commit: r1331519 - in /hadoop/common/branches/branch-1: ./ src/c++/libhdfs/tests/conf/ src/hdfs/ src/hdfs/org/apache/hadoop/hdfs/ src/hdfs/org/apache/hadoop/hdfs/protocol/ src/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/hdfs/org/apache/hadoop/...
Date Fri, 27 Apr 2012 17:52:01 GMT
Author: eli
Date: Fri Apr 27 17:52:00 2012
New Revision: 1331519

URL: http://svn.apache.org/viewvc?rev=1331519&view=rev
Log:
HADOOP-8230. Enable sync by default and disable append. Contributed by Eli Collins

Modified:
    hadoop/common/branches/branch-1/CHANGES.txt
    hadoop/common/branches/branch-1/src/c++/libhdfs/tests/conf/hdfs-site.xml
    hadoop/common/branches/branch-1/src/hdfs/hdfs-default.xml
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend4.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestQuota.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestSyncingWriterInterrupted.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBBWBlockReport.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
    hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestDFSConcurrentFileOperations.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Fri Apr 27 17:52:00 2012
@@ -7,6 +7,8 @@ Release 1.1.0 - unreleased
     HDFS-3044. fsck move should be non-destructive by default.
     (Colin Patrick McCabe via eli)
 
+    HADOOP-8230. Enable sync by default and disable append. (eli)
+
   NEW FEATURES
 
     MAPREDUCE-3118. Backport Gridmix and Rumen features to

Modified: hadoop/common/branches/branch-1/src/c++/libhdfs/tests/conf/hdfs-site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/c%2B%2B/libhdfs/tests/conf/hdfs-site.xml?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/c++/libhdfs/tests/conf/hdfs-site.xml (original)
+++ hadoop/common/branches/branch-1/src/c++/libhdfs/tests/conf/hdfs-site.xml Fri Apr 27 17:52:00
2012
@@ -15,13 +15,6 @@
 </property>
 
 <property>
-  <name>dfs.support.append</name>
-  <value>true</value>
-  <description>Allow appends to files.
-  </description>
-</property>
-
-<property>
   <name>dfs.datanode.address</name>
   <value>0.0.0.0:50012</value>
   <description>

Modified: hadoop/common/branches/branch-1/src/hdfs/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/hdfs-default.xml?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/hdfs-default.xml (original)
+++ hadoop/common/branches/branch-1/src/hdfs/hdfs-default.xml Fri Apr 27 17:52:00 2012
@@ -387,10 +387,10 @@ creations/deletions), or "all".</descrip
 
 <property>
   <name>dfs.support.append</name>
-  <value>false</value>
-  <description>Does HDFS allow appends to files?
-               This is currently set to false because there are bugs in the
-               "append code" and is not supported in any prodction cluster.
+  <description>
+    This option is no longer supported. HBase no longer requires that
+    this option be enabled as sync is now enabled by default. See
+    HADOOP-8230 for additional information.
   </description>
 </property>
 

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri
Apr 27 17:52:00 2012
@@ -174,8 +174,6 @@ public class DFSConfigKeys extends Commo
   public static final int     DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = "dfs.namenode.service.handler.count";
   public static final int     DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT = 10;
-  public static final String  DFS_SUPPORT_APPEND_KEY = "dfs.support.append";
-  public static final boolean DFS_SUPPORT_APPEND_DEFAULT = false;
   public static final String  DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";
   public static final boolean DFS_HTTPS_ENABLE_DEFAULT = false;
   public static final String  DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size";

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
(original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
Fri Apr 27 17:52:00 2012
@@ -143,7 +143,7 @@ public interface ClientProtocol extends 
    * denied by the system. As usually on the client side the exception will 
    * be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
    * Allows appending to an existing file if the server is
-   * configured with the parameter dfs.support.append set to true, otherwise
+   * configured with the parameter dfs.support.broken.append set to true, otherwise
    * throws an IOException.
    * @throws IOException if other errors occur.
    */

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Fri Apr 27 17:52:00 2012
@@ -228,7 +228,6 @@ public class DataNode extends Configured
   int socketWriteTimeout = 0;  
   boolean transferToAllowed = true;
   int writePacketSize = 0;
-  private boolean supportAppends;
   boolean isBlockTokenEnabled;
   BlockTokenSecretManager blockTokenSecretManager;
   boolean isBlockTokenInitialized = false;
@@ -295,7 +294,6 @@ public class DataNode extends Configured
         DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY);
 
     datanodeObject = this;
-    supportAppends = conf.getBoolean("dfs.support.append", false);
     this.userWithLocalPathAccess = conf
         .get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
     try {
@@ -757,12 +755,12 @@ public class DataNode extends Configured
       dnRegistration.exportedKeys = ExportedBlockKeys.DUMMY_KEYS;
     }
 
-    if (supportAppends) {
-      Block[] bbwReport = data.getBlocksBeingWrittenReport();
-      long[] blocksBeingWritten = BlockListAsLongs
-          .convertToArrayLongs(bbwReport);
-      namenode.blocksBeingWrittenReport(dnRegistration, blocksBeingWritten);
-    }
+
+    Block[] bbwReport = data.getBlocksBeingWrittenReport();
+    long[] blocksBeingWritten =
+      BlockListAsLongs.convertToArrayLongs(bbwReport);
+    namenode.blocksBeingWrittenReport(dnRegistration, blocksBeingWritten);
+
     // random short delay - helps scatter the BR from all DNs
     // - but we can start generating the block report immediately
     data.requestAsyncBlockReport();

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
(original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
Fri Apr 27 17:52:00 2012
@@ -347,7 +347,6 @@ public class FSDataset implements FSCons
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
       this.dataDir = new FSDir(currentDir);
       this.currentDir = currentDir;
-      boolean supportAppends = conf.getBoolean("dfs.support.append", false);
       File parent = currentDir.getParentFile();
 
       this.detachDir = new File(parent, "detach");
@@ -367,11 +366,7 @@ public class FSDataset implements FSCons
       // should not be deleted.
       blocksBeingWritten = new File(parent, "blocksBeingWritten");
       if (blocksBeingWritten.exists()) {
-        if (supportAppends) {  
-          recoverBlocksBeingWritten(blocksBeingWritten);
-        } else {
-          FileUtil.fullyDelete(blocksBeingWritten);
-        }
+        recoverBlocksBeingWritten(blocksBeingWritten);
       }
       
       if (!blocksBeingWritten.mkdirs()) {

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Fri Apr 27 17:52:00 2012
@@ -314,8 +314,8 @@ public class FSNamesystem implements FSC
   private long replicationRecheckInterval;
   // default block size of a file
   private long defaultBlockSize = 0;
-  // allow appending to hdfs files
-  private boolean supportAppends = true;
+  // allow file appending (for test coverage)
+  private boolean allowBrokenAppend = false;
 
   /**
    * Last block index used for replication work.
@@ -532,7 +532,12 @@ public class FSNamesystem implements FSC
     LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY + "=" + this.blockInvalidateLimit);
 
     this.accessTimePrecision = conf.getLong("dfs.access.time.precision", 0);
-    this.supportAppends = conf.getBoolean("dfs.support.append", false);
+    this.allowBrokenAppend = conf.getBoolean("dfs.support.broken.append", false);
+    if (conf.getBoolean("dfs.support.append", false)) {
+      LOG.warn("The dfs.support.append option is in your configuration, " +
+               "however append is not supported. This configuration option " +
+               "is no longer required to enable sync.");
+    }
     this.isAccessTokenEnabled = conf.getBoolean(
         DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, false);
     if (isAccessTokenEnabled) {
@@ -1425,9 +1430,9 @@ public class FSNamesystem implements FSC
    */
   LocatedBlock appendFile(String src, String holder, String clientMachine
       ) throws IOException {
-    if (supportAppends == false) {
-      throw new IOException("Append to hdfs not supported." +
-                            " Please refer to dfs.support.append configuration parameter.");
+    if (!allowBrokenAppend) {
+      throw new IOException("Append is not supported. " +
+          "Please see the dfs.support.append configuration parameter.");
     }
     startFileInternal(src, null, holder, clientMachine, false, true, 
                       false, (short)maxReplication, (long)0);
@@ -2296,13 +2301,11 @@ public class FSNamesystem implements FSC
     }
 
     // If this commit does not want to close the file, persist
-    // blocks only if append is supported and return
+    // blocks and return
     src = leaseManager.findPath(pendingFile);
     if (!closeFile) {
-      if (supportAppends) {
-        dir.persistBlocks(src, pendingFile);
-        getEditLog().logSync();
-      }
+      dir.persistBlocks(src, pendingFile);
+      getEditLog().logSync();
       LOG.info("commitBlockSynchronization(" + lastblock + ") successful");
       return;
     }

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java (original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java Fri
Apr 27 17:52:00 2012
@@ -108,7 +108,7 @@ public class TestFileAppend2 extends Tes
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
     conf.setInt("dfs.datanode.handler.count", 50);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     initBuffer(fileSize);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
@@ -359,7 +359,7 @@ public class TestFileAppend2 extends Tes
     conf.setInt("dfs.datanode.handler.count", 50);
     conf.setInt("dfs.datanode.artificialBlockReceivedDelay",
                 artificialBlockReceivedDelay);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
                                                 true, null);

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java Fri
Apr 27 17:52:00 2012
@@ -52,7 +52,7 @@ public class TestFileAppend3 extends jun
         AppendTestUtil.LOG.info("setUp()");
         conf = new Configuration();
         conf.setInt("io.bytes.per.checksum", 512);
-        conf.setBoolean("dfs.support.append", true);
+        conf.setBoolean("dfs.support.broken.append", true);
         buffersize = conf.getInt("io.file.buffer.size", 4096);
         cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
         fs = (DistributedFileSystem)cluster.getFileSystem();

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend4.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend4.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend4.java (original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileAppend4.java Fri
Apr 27 17:52:00 2012
@@ -97,7 +97,7 @@ public class TestFileAppend4 extends Tes
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
 
     // lower heartbeat interval for fast recognition of DN death
     conf.setInt("heartbeat.recheck.interval", 1000);

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileConcurrentReader.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
Fri Apr 27 17:52:00 2012
@@ -233,7 +233,7 @@ public class TestFileConcurrentReader ex
     Configuration conf
   ) throws IOException {
     try {
-      conf.setBoolean("dfs.support.append", syncType == SyncType.APPEND);
+      conf.setBoolean("dfs.support.broken.append", syncType == SyncType.APPEND);
       conf.setBoolean("dfs.datanode.transferTo.allowed", transferToAllowed);
       init(conf);
 

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
Fri Apr 27 17:52:00 2012
@@ -42,7 +42,6 @@ public class TestFileCreationDelete exte
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
-    conf.setBoolean("dfs.support.append", true);
 
     // create cluster
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
Fri Apr 27 17:52:00 2012
@@ -70,7 +70,7 @@ public class TestLeaseRecovery extends j
     final int ORG_FILE_SIZE = 3000; 
     Configuration conf = new Configuration();
     conf.setLong("dfs.block.size", BLOCK_SIZE);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     MiniDFSCluster cluster = null;
 
     try {

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestQuota.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestQuota.java Fri Apr
27 17:52:00 2012
@@ -62,7 +62,7 @@ public class TestQuota extends TestCase 
     // Space quotas
     final int DEFAULT_BLOCK_SIZE = 512;
     conf.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -509,7 +509,7 @@ public class TestQuota extends TestCase 
     // set a smaller block size so that we can test with smaller 
     // diskspace quotas
     conf.set("dfs.block.size", "512");
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
Fri Apr 27 17:52:00 2012
@@ -48,7 +48,6 @@ public class TestRenameWhileOpen extends
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
-    conf.setBoolean("dfs.support.append", true);
 
     // create cluster
     System.out.println("Test 1*****************************");
@@ -132,7 +131,6 @@ public class TestRenameWhileOpen extends
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
-    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 2************************************");
 
     // create cluster
@@ -205,7 +203,6 @@ public class TestRenameWhileOpen extends
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
-    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 3************************************");
 
     // create cluster
@@ -268,7 +265,6 @@ public class TestRenameWhileOpen extends
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
-    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 4************************************");
 
     // create cluster

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestSyncingWriterInterrupted.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestSyncingWriterInterrupted.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestSyncingWriterInterrupted.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/TestSyncingWriterInterrupted.java
Fri Apr 27 17:52:00 2012
@@ -24,7 +24,7 @@ public class TestSyncingWriterInterrupte
   @Before
   public void setUp() throws Exception {
     conf = new Configuration();
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     conf.setInt("dfs.client.block.recovery.retries", 1);
   }
   

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
Fri Apr 27 17:52:00 2012
@@ -106,7 +106,7 @@ public class TestDelegationTokenForProxy
   public void setUp() throws Exception {
     config = new Configuration();
     config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
-    config.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
+    config.setBoolean("dfs.support.broken.append", true);
     config.setLong(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
     config.setLong(

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBBWBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBBWBlockReport.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBBWBlockReport.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBBWBlockReport.java
Fri Apr 27 17:52:00 2012
@@ -49,10 +49,9 @@ public class TestBBWBlockReport {
 
   @Test(timeout = 60000)
   // timeout is mainly for safe mode
-  public void testDNShouldSendBBWReportIfAppendOn() throws Exception {
+  public void testDNShouldSendBBWReport() throws Exception {
     FileSystem fileSystem = null;
     FSDataOutputStream outStream = null;
-    conf.setBoolean("dfs.support.append", true);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     cluster.waitActive();
     try {
@@ -73,35 +72,6 @@ public class TestBBWBlockReport {
     }
   }
 
-  @Test
-  public void testDNShouldNotSendBBWReportIfAppendOff() throws Exception {
-    FileSystem fileSystem = null;
-    FSDataOutputStream outStream = null;
-    // disable the append support
-    conf.setBoolean("dfs.support.append", false);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
-    cluster.waitActive();
-    try {
-      fileSystem = cluster.getFileSystem();
-      // Keep open stream
-      outStream = writeFileAndSync(fileSystem, src, fileContent);
-      cluster.restartNameNode(false);
-      Thread.sleep(2000);
-      assertEquals(
-          "Able to read the synced block content after NameNode restart (without append support",
-          0, getFileContentFromDFS(fileSystem).length());
-    } finally {
-      // NN will not come out of safe mode. So exited the safemode forcibly to
-      // clean the resources.
-      cluster.getNameNode().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
-      if (null != fileSystem)
-        fileSystem.close();
-      if (null != outStream)
-        outStream.close();
-      cluster.shutdown();
-    }
-  }
-
   private String getFileContentFromDFS(FileSystem fs) throws IOException {
     ByteArrayOutputStream bio = new ByteArrayOutputStream();
     IOUtils.copyBytes(fs.open(src), bio, conf, true);

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
Fri Apr 27 17:52:00 2012
@@ -170,7 +170,7 @@ public class TestBlockTokenWithDFS exten
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.replication", numDataNodes);
     conf.setInt("ipc.client.connect.max.retries", 0);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     return conf;
   }
 

Modified: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestDFSConcurrentFileOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestDFSConcurrentFileOperations.java?rev=1331519&r1=1331518&r2=1331519&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestDFSConcurrentFileOperations.java
(original)
+++ hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestDFSConcurrentFileOperations.java
Fri Apr 27 17:52:00 2012
@@ -74,7 +74,7 @@ public class TestDFSConcurrentFileOperat
     Configuration conf = new Configuration();
     
     conf.setLong("dfs.block.size", blockSize);
-    conf.setBoolean("dfs.support.append", true);
+    conf.setBoolean("dfs.support.broken.append", true);
     
     init(conf);
     



Mime
View raw message