hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dhr...@apache.org
Subject svn commit: r752518 - in /hadoop/core/branches/branch-0.20: ./ src/c++/libhdfs/tests/conf/ src/hdfs/ src/hdfs/org/apache/hadoop/hdfs/protocol/ src/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/or...
Date Wed, 11 Mar 2009 16:52:48 GMT
Author: dhruba
Date: Wed Mar 11 16:52:47 2009
New Revision: 752518

URL: http://svn.apache.org/viewvc?rev=752518&view=rev
Log:
HADOOP-5332. Appending to files is not allowed (by default) unless
dfs.support.append is set to true. (dhruba)


Modified:
    hadoop/core/branches/branch-0.20/CHANGES.txt   (contents, props changed)
    hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/conf/hdfs-site.xml
    hadoop/core/branches/branch-0.20/src/hdfs/hdfs-default.xml
    hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
    hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
    hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestQuota.java
    hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java

Modified: hadoop/core/branches/branch-0.20/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/CHANGES.txt?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.20/CHANGES.txt Wed Mar 11 16:52:47 2009
@@ -722,6 +722,9 @@
 
     HADOOP-5307. Fix null value handling in StringUtils#arrayToString() and 
     #getStrings(). (enis)
+
+    HADOOP-5332. Appending to files is not allowed (by default) unless
+    dfs.support.append is set to true. (dhruba)
  
 Release 0.19.1 - Unreleased
 

Propchange: hadoop/core/branches/branch-0.20/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Mar 11 16:52:47 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.18/CHANGES.txt:727226
 /hadoop/core/branches/branch-0.19/CHANGES.txt:713112
-/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762,743296,743745,743816,743892,744894,745180,745268,746010,746193,746206,746227,746233,746274,746902-746903,746944,746968,746970,747279,747802,748084,748090,748783,749262,749318,749863,750533,752073
+/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762,743296,743745,743816,743892,744894,745180,745268,746010,746193,746206,746227,746233,746274,746902-746903,746944,746968,746970,747279,747802,748084,748090,748783,749262,749318,749863,750533,752073,752514

Modified: hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/conf/hdfs-site.xml
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/c%2B%2B/libhdfs/tests/conf/hdfs-site.xml?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/conf/hdfs-site.xml (original)
+++ hadoop/core/branches/branch-0.20/src/c++/libhdfs/tests/conf/hdfs-site.xml Wed Mar 11 16:52:47
2009
@@ -14,4 +14,11 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.support.append</name>
+  <value>true</value>
+  <description>Allow appends to files.
+  </description>
+</property>
+
 </configuration>

Modified: hadoop/core/branches/branch-0.20/src/hdfs/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/hdfs-default.xml?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/hdfs/hdfs-default.xml (original)
+++ hadoop/core/branches/branch-0.20/src/hdfs/hdfs-default.xml Wed Mar 11 16:52:47 2009
@@ -353,4 +353,13 @@
   </description>
 </property>
 
-</configuration>
\ No newline at end of file
+<property>
+  <name>dfs.support.append</name>
+  <value>false</value>
+  <description>Does HDFS allow appends to files?
+               This is currently set to false because there are bugs in the
+               "append code" and is not supported in any prodction cluster.
+  </description>
+</property>
+
+</configuration>

Modified: hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
(original)
+++ hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
Wed Mar 11 16:52:47 2009
@@ -116,6 +116,9 @@
    * @throws AccessControlException if permission to append file is 
    * denied by the system. As usually on the client side the exception will 
    * be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
+   * Allows appending to an existing file if the server is
+   * configured with the parameter dfs.support.append set to true, otherwise
+   * throws an IOException.
    * @throws IOException if other errors occur.
    */
   public LocatedBlock append(String src, String clientName) throws IOException;

Modified: hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
(original)
+++ hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
Wed Mar 11 16:52:47 2009
@@ -297,6 +297,7 @@
     
     FSVolume(File currentDir, Configuration conf) throws IOException {
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
+      boolean supportAppends = conf.getBoolean("dfs.support.append", false);
       File parent = currentDir.getParentFile();
 
       this.detachDir = new File(parent, "detach");
@@ -311,7 +312,11 @@
       //
       this.tmpDir = new File(parent, "tmp");
       if (tmpDir.exists()) {
-        recoverDetachedBlocks(currentDir, tmpDir);
+        if (supportAppends) {
+          recoverDetachedBlocks(currentDir, tmpDir);
+        } else {
+          FileUtil.fullyDelete(tmpDir);
+        }
       }
       this.dataDir = new FSDir(currentDir);
       if (!tmpDir.mkdirs()) {

Modified: hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++ hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Wed Mar 11 16:52:47 2009
@@ -426,7 +426,7 @@
     this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit, 
                                          20*(int)(heartbeatInterval/1000));
     this.accessTimePrecision = conf.getLong("dfs.access.time.precision", 0);
-    this.supportAppends = conf.getBoolean("dfs.support.append", true);
+    this.supportAppends = conf.getBoolean("dfs.support.append", false);
   }
 
   /**

Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
(original)
+++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
Wed Mar 11 16:52:47 2009
@@ -128,6 +128,7 @@
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
     conf.setInt("dfs.datanode.handler.count", 50);
+    conf.setBoolean("dfs.support.append", true);
     initBuffer(fileSize);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
@@ -377,6 +378,7 @@
     conf.setInt("dfs.socket.timeout", 30000);
     conf.setInt("dfs.datanode.socket.write.timeout", 30000);
     conf.setInt("dfs.datanode.handler.count", 50);
+    conf.setBoolean("dfs.support.append", true);
 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
                                                 true, null);

Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
(original)
+++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
Wed Mar 11 16:52:47 2009
@@ -52,6 +52,7 @@
         AppendTestUtil.LOG.info("setUp()");
         conf = new Configuration();
         conf.setInt("io.bytes.per.checksum", 512);
+        conf.setBoolean("dfs.support.append", true);
         buffersize = conf.getInt("io.file.buffer.size", 4096);
         cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
         fs = (DistributedFileSystem)cluster.getFileSystem();
@@ -266,4 +267,4 @@
     //c. Reopen file and read 25687+5877 bytes of data from file. Close file.
     AppendTestUtil.check(fs, p, len1 + len2);
   }
-}
\ No newline at end of file
+}

Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
(original)
+++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
Wed Mar 11 16:52:47 2009
@@ -42,6 +42,7 @@
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
+    conf.setBoolean("dfs.support.append", true);
 
     // create cluster
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);

Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
(original)
+++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
Wed Mar 11 16:52:47 2009
@@ -58,6 +58,7 @@
     final int ORG_FILE_SIZE = 3000; 
     Configuration conf = new Configuration();
     conf.setLong("dfs.block.size", BLOCK_SIZE);
+    conf.setBoolean("dfs.support.append", true);
     MiniDFSCluster cluster = null;
 
     try {

Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestQuota.java?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestQuota.java Wed Mar
11 16:52:47 2009
@@ -57,6 +57,7 @@
     // set a smaller block size so that we can test with smaller 
     // Space quotas
     conf.set("dfs.block.size", "512");
+    conf.setBoolean("dfs.support.append", true);
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -429,6 +430,7 @@
     // set a smaller block size so that we can test with smaller 
     // diskspace quotas
     conf.set("dfs.block.size", "512");
+    conf.setBoolean("dfs.support.append", true);
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),

Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java?rev=752518&r1=752517&r2=752518&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
(original)
+++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
Wed Mar 11 16:52:47 2009
@@ -48,6 +48,7 @@
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
 
     // create cluster
     System.out.println("Test 1*****************************");
@@ -120,6 +121,7 @@
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 2************************************");
 
     // create cluster
@@ -192,6 +194,7 @@
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 3************************************");
 
     // create cluster
@@ -254,6 +257,7 @@
     conf.setInt("heartbeat.recheck.interval", 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 4************************************");
 
     // create cluster



Mime
View raw message