hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1332461 [1/2] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs/ hadoop-hdfs/src/co...
Date Mon, 30 Apr 2012 23:01:14 GMT
Author: szetszwo
Date: Mon Apr 30 23:01:07 2012
New Revision: 1332461

URL: http://svn.apache.org/viewvc?rev=1332461&view=rev
Log:
Merge r1329944 through r1332459 from trunk.

Added:
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
      - copied unchanged from r1332459, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
      - copied unchanged from r1332459, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
      - copied unchanged from r1332459, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
Modified:
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1329944-1332459

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java Mon Apr 30 23:01:07 2012
@@ -43,8 +43,8 @@ import java.util.Map;
 public class FSOperations {
 
   /**
-   * Converts a Unix permission octal & symbolic representation
-   * (i.e. 655 or -rwxr--r--) into a FileSystemAccess permission.
+   * Converts a Unix permission octal
+   * (i.e. 655 or 1777) into a FileSystemAccess permission.
    *
    * @param str Unix permission symbolic representation.
    *
@@ -55,10 +55,8 @@ public class FSOperations {
     FsPermission permission;
     if (str.equals(HttpFSFileSystem.DEFAULT_PERMISSION)) {
       permission = FsPermission.getDefault();
-    } else if (str.length() == 3) {
-      permission = new FsPermission(Short.parseShort(str, 8));
     } else {
-      permission = FsPermission.valueOf(str);
+      permission = new FsPermission(Short.parseShort(str, 8));
     }
     return permission;
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java Mon Apr 30 23:01:07 2012
@@ -446,7 +446,7 @@ public class HttpFSParams {
      * Symbolic Unix permissions regular expression pattern.
      */
     private static final Pattern PERMISSION_PATTERN =
-      Pattern.compile(DEFAULT + "|(-[-r][-w][-x][-r][-w][-x][-r][-w][-x])" + "|[0-7][0-7][0-7]");
+      Pattern.compile(DEFAULT + "|[0-1]?[0-7][0-7][0-7]");
 
     /**
      * Constructor.

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java Mon Apr 30 23:01:07 2012
@@ -310,11 +310,8 @@ public class TestHttpFSFileSystem extend
 
   private void testSetPermission() throws Exception {
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
-    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
-    OutputStream os = fs.create(path);
-    os.write(1);
-    os.close();
-    fs.close();
+    Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foodir");
+    fs.mkdirs(path);
 
     fs = getHttpFileSystem();
     FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
@@ -326,6 +323,19 @@ public class TestHttpFSFileSystem extend
     fs.close();
     FsPermission permission2 = status1.getPermission();
     Assert.assertEquals(permission2, permission1);
+
+    //sticky bit 
+    fs = getHttpFileSystem();
+    permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE, true);
+    fs.setPermission(path, permission1);
+    fs.close();
+
+    fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    status1 = fs.getFileStatus(path);
+    fs.close();
+    permission2 = status1.getPermission();
+    Assert.assertTrue(permission2.getStickyBit());
+    Assert.assertEquals(permission2, permission1);
   }
 
   private void testSetOwner() throws Exception {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Apr 30 23:01:07 2012
@@ -65,12 +65,17 @@ Trunk (unreleased changes)
     HDFS-3273. Refactor BackupImage and FSEditLog, and rename
     JournalListener.rollLogs(..) to startLogSegment(..).  (szetszwo)
 
-    HDFS-3292. Remove the deprecated DiskStatus, getDiskStatus(), getRawCapacity() and
-    getRawUsed() from DistributedFileSystem.  (Arpit Gupta via szetszwo)
+    HDFS-3292. Remove the deprecated DiskStatus, getDiskStatus(), getRawUsed()
+    and getRawCapacity() from DistributedFileSystem.  (Arpit Gupta via szetszwo)
 
-    HDFS-3282. Expose getFileLength API. (umamahesh)
+    HADOOP-8285. HDFS changes for Use ProtoBuf for RpcPayLoadHeader. (sanjay
+    radia)
 
-    HADOOP-8285 HDFS changes for Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
+    HDFS-2743. Streamline usage of bookkeeper journal manager. 
+    (Ivan Kelly via umamahesh)
+
+    HDFS-3293. Add toString(), equals(..) and hashCode() to JournalInfo.
+    (Hari Mankude via szetszwo)
 
   OPTIMIZATIONS
 
@@ -134,6 +139,8 @@ Trunk (unreleased changes)
     (Henry Robinson via atm)
 
     HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
+
+    HDFS-3265. PowerPc Build error. (Kumar Ravi via mattf)
     
 Release 2.0.0 - UNRELEASED 
 
@@ -214,6 +221,10 @@ Release 2.0.0 - UNRELEASED 
 
     HDFS-3004. Implement Recovery Mode. (Colin Patrick McCabe via eli)
 
+    HDFS-3282. Add HdfsDataInputStream as a public API. (umamahesh)
+
+    HDFS-3298. Add HdfsDataOutputStream as a public API.  (szetszwo)
+
   IMPROVEMENTS
 
     HDFS-2018. Move all journal stream management code into one place.
@@ -400,6 +411,12 @@ Release 2.0.0 - UNRELEASED 
     HDFS-3169. TestFsck should test multiple -move operations in a row.
     (Colin Patrick McCabe via eli)
 
+    HDFS-3258. Test for HADOOP-8144 (pseudoSortByDistance in
+    NetworkTopology for first rack local node). (Junping Du via eli)
+
+    HDFS-3322. Use HdfsDataInputStream and HdfsDataOutputStream in Hdfs.
+    (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -553,6 +570,17 @@ Release 2.0.0 - UNRELEASED 
 
     HDFS-3314. HttpFS operation for getHomeDirectory is incorrect. (tucu)
 
+    HDFS-3319. Change DFSOutputStream to not to start a thread in constructors.
+    (szetszwo)
+
+    HDFS-3181. Fix a test case in TestLeaseRecovery2.  (szetszwo)
+
+    HDFS-3309. HttpFS (Hoop) chmod not supporting octal and sticky bit 
+    permissions. (tucu)
+
+    HDFS-3326. Append enabled log message uses the wrong variable.
+    (Matthew Jacobs via eli)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
@@ -895,6 +923,17 @@ Release 0.23.3 - UNRELEASED
     HDFS-3312. In HftpFileSystem, the namenode URI is non-secure but the
     delegation tokens have to use secure URI.  (Daryn Sharp via szetszwo)
 
+    HDFS-3318. Use BoundedInputStream in ByteRangeInputStream, otherwise, it
+    hangs on transfers >2 GB.  (Daryn Sharp via szetszwo)
+
+    HDFS-3321. Fix safe mode turn off tip message.  (Ravi Prakash via szetszwo)
+
+    HDFS-3334. Fix ByteRangeInputStream stream leakage.  (Daryn Sharp via
+    szetszwo)
+
+    HDFS-3331. In namenode, check superuser privilege for setBalancerBandwidth
+    and acquire the write lock for finalizeUpgrade.  (szetszwo)
+
 Release 0.23.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/README.txt Mon Apr 30 23:01:07 2012
@@ -12,19 +12,25 @@ How do I build?
  To generate the distribution packages for BK journal, do the
  following.
 
-   $ mvn clean install -Pdist -Dtar
+   $ mvn clean package -Pdist
 
- This will generate a tarball, 
- target/hadoop-hdfs-bkjournal-<VERSION>.tar.gz 
+ This will generate a jar with all the dependencies needed by the journal
+ manager, 
+
+ target/hadoop-hdfs-bkjournal-<VERSION>.jar
+
+ Note that the -Pdist part of the build command is important, as otherwise
+ the dependencies would not be packaged in the jar. 
 
 -------------------------------------------------------------------------------
 How do I use the BookKeeper Journal?
 
- To run a HDFS namenode using BookKeeper as a backend, extract the
- distribution package on top of hdfs
+ To run a HDFS namenode using BookKeeper as a backend, copy the bkjournal
+ jar, generated above, into the lib directory of hdfs. In the standard 
+ distribution of HDFS, this is at $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
 
-   cd hadoop-hdfs-<VERSION>/
-   tar --strip-components 1 -zxvf path/to/hadoop-hdfs-bkjournal-<VERSION>.tar.gz
+  cp target/hadoop-hdfs-bkjournal-<VERSION>.jar \
+    $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
 
  Then, in hdfs-site.xml, set the following properties.
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml Mon Apr 30 23:01:07 2012
@@ -65,4 +65,50 @@
       <scope>test</scope>
     </dependency>
   </dependencies>
+  <profiles>
+    <profile>
+      <id>dist</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-shade-plugin</artifactId>
+            <version>1.5</version>
+            <executions>
+              <execution>
+                <phase>package</phase>
+                <goals>
+                  <goal>shade</goal>
+                </goals>
+                <configuration>
+                  <createDependencyReducedPom>false</createDependencyReducedPom>
+                  <artifactSet>
+                    <includes>
+                      <include>org.apache.bookkeeper:bookkeeper-server</include>
+                      <include>org.apache.zookeeper:zookeeper</include>
+                      <include>org.jboss.netty:netty</include>
+                    </includes>
+                  </artifactSet>
+                <relocations>
+                  <relocation>
+                    <pattern>org.apache.bookkeeper</pattern>
+                    <shadedPattern>hidden.bkjournal.org.apache.bookkeeper</shadedPattern>
+                  </relocation>
+                  <relocation>
+                    <pattern>org.apache.zookeeper</pattern>
+                    <shadedPattern>hidden.bkjournal.org.apache.zookeeper</shadedPattern>
+                  </relocation>
+                  <relocation>
+                    <pattern>org.jboss.netty</pattern>
+                    <shadedPattern>hidden.bkjournal.org.jboss.netty</shadedPattern>
+                  </relocation>
+                </relocations>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
 </project>

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1329944-1332459

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java Mon Apr 30 23:01:07 2012
@@ -35,6 +35,8 @@ import org.apache.hadoop.hdfs.CorruptFil
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -43,8 +45,8 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.util.Progressable;
 
@@ -88,11 +90,11 @@ public class Hdfs extends AbstractFileSy
   }
 
   @Override
-  public FSDataOutputStream createInternal(Path f,
+  public HdfsDataOutputStream createInternal(Path f,
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       int bufferSize, short replication, long blockSize, Progressable progress,
       int bytesPerChecksum, boolean createParent) throws IOException {
-    return new FSDataOutputStream(dfs.primitiveCreate(getUriPath(f),
+    return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
         absolutePermission, createFlag, createParent, replication, blockSize,
         progress, bufferSize, bytesPerChecksum), getStatistics());
   }
@@ -324,8 +326,9 @@ public class Hdfs extends AbstractFileSy
     dfs.mkdirs(getUriPath(dir), permission, createParent);
   }
 
+  @SuppressWarnings("deprecation")
   @Override
-  public FSDataInputStream open(Path f, int bufferSize) 
+  public HdfsDataInputStream open(Path f, int bufferSize) 
       throws IOException, UnresolvedLinkException {
     return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
         bufferSize, verifyChecksum));

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java Mon Apr 30 23:01:07 2012
@@ -23,9 +23,12 @@ import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.URL;
 
+import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * To support HTTP byte streams, a new connection to an HTTP server needs to be
  * created each time. This class hides the complexity of those multiple 
@@ -60,7 +63,7 @@ public abstract class ByteRangeInputStre
   }
 
   enum StreamStatus {
-    NORMAL, SEEK
+    NORMAL, SEEK, CLOSED
   }
   protected InputStream in;
   protected URLOpener originalURL;
@@ -88,66 +91,93 @@ public abstract class ByteRangeInputStre
   protected abstract URL getResolvedUrl(final HttpURLConnection connection
       ) throws IOException;
 
-  private InputStream getInputStream() throws IOException {
-    if (status != StreamStatus.NORMAL) {
-      
-      if (in != null) {
-        in.close();
-        in = null;
-      }
-      
-      // Use the original url if no resolved url exists, eg. if
-      // it's the first time a request is made.
-      final URLOpener opener =
-        (resolvedURL.getURL() == null) ? originalURL : resolvedURL;
-
-      final HttpURLConnection connection = opener.openConnection(startPos);
-      connection.connect();
-      checkResponseCode(connection);
-
-      final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH);
-      filelength = (cl == null) ? -1 : Long.parseLong(cl);
-      in = connection.getInputStream();
-
-      resolvedURL.setURL(getResolvedUrl(connection));
-      status = StreamStatus.NORMAL;
+  @VisibleForTesting
+  protected InputStream getInputStream() throws IOException {
+    switch (status) {
+      case NORMAL:
+        break;
+      case SEEK:
+        if (in != null) {
+          in.close();
+        }
+        in = openInputStream();
+        status = StreamStatus.NORMAL;
+        break;
+      case CLOSED:
+        throw new IOException("Stream closed");
     }
-    
     return in;
   }
   
-  private void update(final boolean isEOF, final int n)
-      throws IOException {
-    if (!isEOF) {
+  @VisibleForTesting
+  protected InputStream openInputStream() throws IOException {
+    // Use the original url if no resolved url exists, eg. if
+    // it's the first time a request is made.
+    final URLOpener opener =
+      (resolvedURL.getURL() == null) ? originalURL : resolvedURL;
+
+    final HttpURLConnection connection = opener.openConnection(startPos);
+    connection.connect();
+    checkResponseCode(connection);
+
+    final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH);
+    if (cl == null) {
+      throw new IOException(StreamFile.CONTENT_LENGTH+" header is missing");
+    }
+    final long streamlength = Long.parseLong(cl);
+    filelength = startPos + streamlength;
+    // Java has a bug with >2GB request streams.  It won't bounds check
+    // the reads so the transfer blocks until the server times out
+    InputStream is =
+        new BoundedInputStream(connection.getInputStream(), streamlength);
+
+    resolvedURL.setURL(getResolvedUrl(connection));
+    
+    return is;
+  }
+  
+  private int update(final int n) throws IOException {
+    if (n != -1) {
       currentPos += n;
     } else if (currentPos < filelength) {
       throw new IOException("Got EOF but currentPos = " + currentPos
           + " < filelength = " + filelength);
     }
+    return n;
   }
 
+  @Override
   public int read() throws IOException {
     final int b = getInputStream().read();
-    update(b == -1, 1);
+    update((b == -1) ? -1 : 1);
     return b;
   }
+
+  @Override
+  public int read(byte b[], int off, int len) throws IOException {
+    return update(getInputStream().read(b, off, len));
+  }
   
   /**
    * Seek to the given offset from the start of the file.
    * The next read() will be from that location.  Can't
    * seek past the end of the file.
    */
+  @Override
   public void seek(long pos) throws IOException {
     if (pos != currentPos) {
       startPos = pos;
       currentPos = pos;
-      status = StreamStatus.SEEK;
+      if (status != StreamStatus.CLOSED) {
+        status = StreamStatus.SEEK;
+      }
     }
   }
 
   /**
    * Return the current offset from the start of the file
    */
+  @Override
   public long getPos() throws IOException {
     return currentPos;
   }
@@ -156,7 +186,17 @@ public abstract class ByteRangeInputStre
    * Seeks a different copy of the data.  Returns true if
    * found a new source, false otherwise.
    */
+  @Override
   public boolean seekToNewSource(long targetPos) throws IOException {
     return false;
   }
-}
\ No newline at end of file
+  
+  @Override
+  public void close() throws IOException {
+    if (in != null) {
+      in.close();
+      in = null;
+    }
+    status = StreamStatus.CLOSED;
+  }
+}

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Mon Apr 30 23:01:07 2012
@@ -78,7 +78,6 @@ import org.apache.hadoop.fs.BlockLocatio
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
@@ -91,6 +90,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -996,7 +996,7 @@ public class DFSClient implements java.i
    * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
    * long, Progressable, int)} with <code>createParent</code> set to true.
    */
-  public OutputStream create(String src, 
+  public DFSOutputStream create(String src, 
                              FsPermission permission,
                              EnumSet<CreateFlag> flag, 
                              short replication,
@@ -1029,7 +1029,7 @@ public class DFSClient implements java.i
    * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
    * boolean, short, long) for detailed description of exceptions thrown
    */
-  public OutputStream create(String src, 
+  public DFSOutputStream create(String src, 
                              FsPermission permission,
                              EnumSet<CreateFlag> flag, 
                              boolean createParent,
@@ -1046,9 +1046,9 @@ public class DFSClient implements java.i
     if(LOG.isDebugEnabled()) {
       LOG.debug(src + ": masked=" + masked);
     }
-    final DFSOutputStream result = new DFSOutputStream(this, src, masked, flag,
-        createParent, replication, blockSize, progress, buffersize,
-        dfsClientConf.createChecksum());
+    final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
+        src, masked, flag, createParent, replication, blockSize, progress,
+        buffersize, dfsClientConf.createChecksum());
     leaserenewer.put(src, result, this);
     return result;
   }
@@ -1078,7 +1078,7 @@ public class DFSClient implements java.i
    *  Progressable, int)} except that the permission
    *  is absolute (ie has already been masked with umask.
    */
-  public OutputStream primitiveCreate(String src, 
+  public DFSOutputStream primitiveCreate(String src, 
                              FsPermission absPermission,
                              EnumSet<CreateFlag> flag,
                              boolean createParent,
@@ -1095,7 +1095,7 @@ public class DFSClient implements java.i
       DataChecksum checksum = DataChecksum.newDataChecksum(
           dfsClientConf.checksumType,
           bytesPerChecksum);
-      result = new DFSOutputStream(this, src, absPermission,
+      result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
           flag, createParent, replication, blockSize, progress, buffersize,
           checksum);
     }
@@ -1154,7 +1154,7 @@ public class DFSClient implements java.i
                                      UnsupportedOperationException.class,
                                      UnresolvedPathException.class);
     }
-    return new DFSOutputStream(this, src, buffersize, progress,
+    return DFSOutputStream.newStreamForAppend(this, src, buffersize, progress,
         lastBlock, stat, dfsClientConf.createChecksum());
   }
   
@@ -1169,11 +1169,11 @@ public class DFSClient implements java.i
    * 
    * @see ClientProtocol#append(String, String) 
    */
-  public FSDataOutputStream append(final String src, final int buffersize,
+  public HdfsDataOutputStream append(final String src, final int buffersize,
       final Progressable progress, final FileSystem.Statistics statistics
       ) throws IOException {
     final DFSOutputStream out = append(src, buffersize, progress);
-    return new FSDataOutputStream(out, statistics, out.getInitialLen());
+    return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
   }
 
   private DFSOutputStream append(String src, int buffersize, Progressable progress) 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java Mon Apr 30 23:01:07 2012
@@ -118,6 +118,39 @@ public class DFSInputStream extends FSIn
    * Grab the open-file info from namenode
    */
   synchronized void openInfo() throws IOException, UnresolvedLinkException {
+    lastBlockBeingWrittenLength = fetchLocatedBlocksAndGetLastBlockLength();
+    int retriesForLastBlockLength = 3;
+    while (retriesForLastBlockLength > 0) {
+      // Getting last block length as -1 is a special case. When cluster
+      // restarts, DNs may not report immediately. At this time partial block
+      // locations will not be available with NN for getting the length. Lets
+      // retry for 3 times to get the length.
+      if (lastBlockBeingWrittenLength == -1) {
+        DFSClient.LOG.warn("Last block locations not available. "
+            + "Datanodes might not have reported blocks completely."
+            + " Will retry for " + retriesForLastBlockLength + " times");
+        waitFor(4000);
+        lastBlockBeingWrittenLength = fetchLocatedBlocksAndGetLastBlockLength();
+      } else {
+        break;
+      }
+      retriesForLastBlockLength--;
+    }
+    if (retriesForLastBlockLength == 0) {
+      throw new IOException("Could not obtain the last block locations.");
+    }
+  }
+
+  private void waitFor(int waitTime) throws IOException {
+    try {
+      Thread.sleep(waitTime);
+    } catch (InterruptedException e) {
+      throw new IOException(
+          "Interrupted while getting the last block length.");
+    }
+  }
+
+  private long fetchLocatedBlocksAndGetLastBlockLength() throws IOException {
     LocatedBlocks newInfo = DFSClient.callGetBlockLocations(dfsClient.namenode, src, 0, prefetchSize);
     if (DFSClient.LOG.isDebugEnabled()) {
       DFSClient.LOG.debug("newInfo = " + newInfo);
@@ -136,10 +169,13 @@ public class DFSInputStream extends FSIn
       }
     }
     locatedBlocks = newInfo;
-    lastBlockBeingWrittenLength = 0;
+    long lastBlockBeingWrittenLength = 0;
     if (!locatedBlocks.isLastBlockComplete()) {
       final LocatedBlock last = locatedBlocks.getLastLocatedBlock();
       if (last != null) {
+        if (last.getLocations().length == 0) {
+          return -1;
+        }
         final long len = readBlockLength(last);
         last.getBlock().setNumBytes(len);
         lastBlockBeingWrittenLength = len; 
@@ -147,13 +183,12 @@ public class DFSInputStream extends FSIn
     }
 
     currentNode = null;
+    return lastBlockBeingWrittenLength;
   }
 
   /** Read the block length from one of the datanodes. */
   private long readBlockLength(LocatedBlock locatedblock) throws IOException {
-    if (locatedblock == null || locatedblock.getLocations().length == 0) {
-      return 0;
-    }
+    assert locatedblock != null : "LocatedBlock cannot be null";
     int replicaNotFoundCount = locatedblock.getLocations().length;
     
     for(DatanodeInfo datanode : locatedblock.getLocations()) {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Mon Apr 30 23:01:07 2012
@@ -44,7 +44,7 @@ import org.apache.hadoop.fs.ParentNotDir
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -99,7 +99,7 @@ import org.apache.hadoop.util.Progressab
  * starts sending packets from the dataQueue.
 ****************************************************************/
 @InterfaceAudience.Private
-class DFSOutputStream extends FSOutputSummer implements Syncable {
+public class DFSOutputStream extends FSOutputSummer implements Syncable {
   private final DFSClient dfsClient;
   private static final int MAX_PACKETS = 80; // each packet 64K, total 5MB
   private Socket s;
@@ -1233,14 +1233,11 @@ class DFSOutputStream extends FSOutputSu
     this.checksum = checksum;
   }
 
-  /**
-   * Create a new output stream to the given DataNode.
-   * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable, boolean, short, long)
-   */
-  DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked, EnumSet<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize, Progressable progress,
-      int buffersize, DataChecksum checksum) 
-      throws IOException {
+  /** Construct a new output stream for creating a file. */
+  private DFSOutputStream(DFSClient dfsClient, String src, FsPermission masked,
+      EnumSet<CreateFlag> flag, boolean createParent, short replication,
+      long blockSize, Progressable progress, int buffersize,
+      DataChecksum checksum) throws IOException {
     this(dfsClient, src, blockSize, progress, checksum, replication);
 
     computePacketChunkSize(dfsClient.getConf().writePacketSize,
@@ -1260,14 +1257,21 @@ class DFSOutputStream extends FSOutputSu
                                      UnresolvedPathException.class);
     }
     streamer = new DataStreamer();
-    streamer.start();
   }
 
-  /**
-   * Create a new output stream to the given DataNode.
-   * @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)
-   */
-  DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress,
+  static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
+      FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
+      short replication, long blockSize, Progressable progress, int buffersize,
+      DataChecksum checksum) throws IOException {
+    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, masked,
+        flag, createParent, replication, blockSize, progress, buffersize,
+        checksum);
+    out.streamer.start();
+    return out;
+  }
+
+  /** Construct a new output stream for append. */
+  private DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress,
       LocatedBlock lastBlock, HdfsFileStatus stat,
       DataChecksum checksum) throws IOException {
     this(dfsClient, src, stat.getBlockSize(), progress, checksum, stat.getReplication());
@@ -1285,7 +1289,15 @@ class DFSOutputStream extends FSOutputSu
           checksum.getBytesPerChecksum());
       streamer = new DataStreamer();
     }
-    streamer.start();
+  }
+
+  static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
+      int buffersize, Progressable progress, LocatedBlock lastBlock,
+      HdfsFileStatus stat, DataChecksum checksum) throws IOException {
+    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, buffersize,
+        progress, lastBlock, stat, checksum);
+    out.streamer.start();
+    return out;
   }
 
   private void computePacketChunkSize(int psize, int csize) {
@@ -1530,14 +1542,20 @@ class DFSOutputStream extends FSOutputSu
   }
 
   /**
-   * Returns the number of replicas of current block. This can be different
-   * from the designated replication factor of the file because the NameNode
-   * does not replicate the block to which a client is currently writing to.
-   * The client continues to write to a block even if a few datanodes in the
-   * write pipeline have failed. 
-   * @return the number of valid replicas of the current block
+   * @deprecated use {@link HdfsDataOutputStream#getCurrentBlockReplication()}.
    */
+  @Deprecated
   public synchronized int getNumCurrentReplicas() throws IOException {
+    return getCurrentBlockReplication();
+  }
+
+  /**
+   * Note that this is not a public API;
+   * use {@link HdfsDataOutputStream#getCurrentBlockReplication()} instead.
+   * 
+   * @return the number of valid replicas of the current block
+   */
+  public synchronized int getCurrentBlockReplication() throws IOException {
     dfsClient.checkOpen();
     isClosed();
     if (streamer == null) {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Mon Apr 30 23:01:07 2012
@@ -33,7 +33,6 @@ import org.apache.hadoop.fs.BlockLocatio
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
@@ -45,8 +44,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -89,6 +88,17 @@ public class DistributedFileSystem exten
   public DistributedFileSystem() {
   }
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>hdfs</code>
+   */
+  @Override
+  public String getScheme() {
+    return "hdfs";
+  }
+
   @Deprecated
   public DistributedFileSystem(InetSocketAddress namenode,
     Configuration conf) throws IOException {
@@ -205,31 +215,33 @@ public class DistributedFileSystem exten
 
   /** This optional operation is not yet supported. */
   @Override
-  public FSDataOutputStream append(Path f, int bufferSize,
+  public HdfsDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
     return dfs.append(getPathName(f), bufferSize, progress, statistics);
   }
 
   @Override
-  public FSDataOutputStream create(Path f, FsPermission permission,
+  public HdfsDataOutputStream create(Path f, FsPermission permission,
     boolean overwrite, int bufferSize, short replication, long blockSize,
     Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
-    return new FSDataOutputStream(dfs.create(getPathName(f), permission,
-        overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
-            : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
-        bufferSize), statistics);
+    final EnumSet<CreateFlag> cflags = overwrite?
+        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
+        : EnumSet.of(CreateFlag.CREATE);
+    final DFSOutputStream out = dfs.create(getPathName(f), permission, cflags,
+        replication, blockSize, progress, bufferSize);
+    return new HdfsDataOutputStream(out, statistics);
   }
   
   @SuppressWarnings("deprecation")
   @Override
-  protected FSDataOutputStream primitiveCreate(Path f,
+  protected HdfsDataOutputStream primitiveCreate(Path f,
     FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
     short replication, long blockSize, Progressable progress,
     int bytesPerChecksum) throws IOException {
     statistics.incrementReadOps(1);
-    return new FSDataOutputStream(dfs.primitiveCreate(getPathName(f),
+    return new HdfsDataOutputStream(dfs.primitiveCreate(getPathName(f),
         absolutePermission, flag, true, replication, blockSize,
         progress, bufferSize, bytesPerChecksum),statistics);
    } 
@@ -237,14 +249,14 @@ public class DistributedFileSystem exten
   /**
    * Same as create(), except fails if parent directory doesn't already exist.
    */
-  public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+  public HdfsDataOutputStream createNonRecursive(Path f, FsPermission permission,
       EnumSet<CreateFlag> flag, int bufferSize, short replication,
       long blockSize, Progressable progress) throws IOException {
     statistics.incrementWriteOps(1);
     if (flag.contains(CreateFlag.OVERWRITE)) {
       flag.add(CreateFlag.CREATE);
     }
-    return new FSDataOutputStream(dfs.create(getPathName(f), permission, flag,
+    return new HdfsDataOutputStream(dfs.create(getPathName(f), permission, flag,
         false, replication, blockSize, progress, bufferSize), statistics);
   }
 
@@ -625,19 +637,18 @@ public class DistributedFileSystem exten
   // We do not see a need for user to report block checksum errors and do not  
   // want to rely on user to report block corruptions.
   @Deprecated
-  @SuppressWarnings("deprecation")
   public boolean reportChecksumFailure(Path f, 
     FSDataInputStream in, long inPos, 
     FSDataInputStream sums, long sumsPos) {
     
-    if(!(in instanceof DFSDataInputStream && sums instanceof DFSDataInputStream))
-      throw new IllegalArgumentException("Input streams must be types " +
-                                         "of DFSDataInputStream");
+    if(!(in instanceof HdfsDataInputStream && sums instanceof HdfsDataInputStream))
+      throw new IllegalArgumentException(
+          "Input streams must be types of HdfsDataInputStream");
     
     LocatedBlock lblocks[] = new LocatedBlock[2];
 
     // Find block in data stream.
-    DFSClient.DFSDataInputStream dfsIn = (DFSClient.DFSDataInputStream) in;
+    HdfsDataInputStream dfsIn = (HdfsDataInputStream) in;
     ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
     if (dataBlock == null) {
       LOG.error("Error: Current block in data stream is null! ");
@@ -650,7 +661,7 @@ public class DistributedFileSystem exten
         + dataNode[0]);
 
     // Find block in checksum stream
-    DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums;
+    HdfsDataInputStream dfsSums = (HdfsDataInputStream) sums;
     ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
     if (sumsBlock == null) {
       LOG.error("Error: Current block in checksum stream is null! ");

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java Mon Apr 30 23:01:07 2012
@@ -154,6 +154,17 @@ public class HftpFileSystem extends File
     return SecurityUtil.buildTokenService(nnSecureUri).toString();
   }
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>hftp</code>
+   */
+  @Override
+  public String getScheme() {
+    return "hftp";
+  }
+
   @Override
   public void initialize(final URI name, final Configuration conf)
   throws IOException {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java Mon Apr 30 23:01:07 2012
@@ -58,6 +58,17 @@ public class HsftpFileSystem extends Hft
   private static final long MM_SECONDS_PER_DAY = 1000 * 60 * 60 * 24;
   private volatile int ExpWarnDays = 0;
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>hsftp</code>
+   */
+  @Override
+  public String getScheme() {
+    return "hsftp";
+  }
+
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {
     super.initialize(name, conf);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Mon Apr 30 23:01:07 2012
@@ -651,7 +651,6 @@ public class DatanodeManager {
    * checks if any of the hosts have changed states:
    */
   public void refreshNodes(final Configuration conf) throws IOException {
-    namesystem.checkSuperuserPrivilege();
     refreshHostsReader(conf);
     namesystem.writeLock();
     try {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Mon Apr 30 23:01:07 2012
@@ -50,7 +50,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -398,22 +398,21 @@ public class DatanodeWebHdfsMethods {
     {
       final int b = bufferSize.getValue(conf);
       final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
-      DFSDataInputStream in = null;
+      HdfsDataInputStream in = null;
       try {
-        in = new DFSClient.DFSDataInputStream(
-            dfsclient.open(fullpath, b, true));
+        in = new HdfsDataInputStream(dfsclient.open(fullpath, b, true));
         in.seek(offset.getValue());
       } catch(IOException ioe) {
         IOUtils.cleanup(LOG, in);
         IOUtils.cleanup(LOG, dfsclient);
         throw ioe;
       }
-      final DFSDataInputStream dis = in;
+      final HdfsDataInputStream dis = in;
       final StreamingOutput streaming = new StreamingOutput() {
         @Override
         public void write(final OutputStream out) throws IOException {
           final Long n = length.getValue();
-          DFSDataInputStream dfsin = dis;
+          HdfsDataInputStream dfsin = dis;
           DFSClient client = dfsclient;
           try {
             if (n == null) {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Apr 30 23:01:07 2012
@@ -457,7 +457,7 @@ public class FSNamesystem implements Nam
 
       this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
       this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY, DFS_SUPPORT_APPEND_DEFAULT);
-      LOG.info("Append Enabled: " + haEnabled);
+      LOG.info("Append Enabled: " + supportAppends);
 
       this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
       
@@ -3332,8 +3332,26 @@ public class FSNamesystem implements Nam
   }
     
   void finalizeUpgrade() throws IOException {
+    writeLock();
+    try {
+      checkOperation(OperationCategory.WRITE);
+      checkSuperuserPrivilege();
+      getFSImage().finalizeUpgrade();
+    } finally {
+      writeUnlock();
+    }
+  }
+
+  void refreshNodes() throws IOException {
+    checkOperation(OperationCategory.UNCHECKED);
+    checkSuperuserPrivilege();
+    getBlockManager().getDatanodeManager().refreshNodes(new HdfsConfiguration());
+  }
+
+  void setBalancerBandwidth(long bandwidth) throws IOException {
+    checkOperation(OperationCategory.UNCHECKED);
     checkSuperuserPrivilege();
-    getFSImage().finalizeUpgrade();
+    getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
   }
 
   /**
@@ -3723,7 +3741,7 @@ public class FSNamesystem implements Nam
           msg += String.format(
             "The number of live datanodes %d needs an additional %d live "
             + "datanodes to reach the minimum number %d.",
-            numLive, (datanodeThreshold - numLive) + 1 , datanodeThreshold);
+            numLive, (datanodeThreshold - numLive), datanodeThreshold);
         }
         msg += " " + leaveMsg;
       } else {

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Mon Apr 30 23:01:07 2012
@@ -707,9 +707,7 @@ class NameNodeRpcServer implements Namen
 
   @Override // ClientProtocol
   public void refreshNodes() throws IOException {
-    namesystem.checkOperation(OperationCategory.UNCHECKED);
-    namesystem.getBlockManager().getDatanodeManager().refreshNodes(
-        new HdfsConfiguration());
+    namesystem.refreshNodes();
   }
 
   @Override // NamenodeProtocol
@@ -732,7 +730,6 @@ class NameNodeRpcServer implements Namen
     
   @Override // ClientProtocol
   public void finalizeUpgrade() throws IOException {
-    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.finalizeUpgrade();
   }
 
@@ -772,8 +769,7 @@ class NameNodeRpcServer implements Namen
    */
   @Override // ClientProtocol
   public void setBalancerBandwidth(long bandwidth) throws IOException {
-    namesystem.checkOperation(OperationCategory.UNCHECKED);
-    namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
+    namesystem.setBalancerBandwidth(bandwidth);
   }
   
   @Override // ClientProtocol

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java Mon Apr 30 23:01:07 2012
@@ -45,4 +45,29 @@ public class JournalInfo {
   public int getNamespaceId() {
     return namespaceId;
   }
+  
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("lv=").append(layoutVersion).append(";cid=").append(clusterId)
+    .append(";nsid=").append(namespaceId);
+    return sb.toString();
+  }
+  
+  @Override
+  public boolean equals(Object o) {
+    JournalInfo jInfo;
+    if (!(o instanceof JournalInfo)) {
+      return false;
+    }
+    jInfo = (JournalInfo) o;
+    return ((jInfo.clusterId.equals(this.clusterId))
+        && (jInfo.namespaceId == this.namespaceId)
+        && (jInfo.layoutVersion == this.layoutVersion));
+  }
+  
+  @Override
+  public int hashCode() {
+    return (namespaceId ^ layoutVersion ^ clusterId.hashCode());
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Mon Apr 30 23:01:07 2012
@@ -155,6 +155,17 @@ public class WebHdfsFileSystem extends F
     }
   }
 
+  /**
+   * Return the protocol scheme for the FileSystem.
+   * <p/>
+   *
+   * @return <code>webhdfs</code>
+   */
+  @Override
+  public String getScheme() {
+    return "webhdfs";
+  }
+
   @Override
   public synchronized void initialize(URI uri, Configuration conf
       ) throws IOException {

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1329944-1332459

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4 (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4 Mon Apr 30 23:01:07 2012
@@ -71,7 +71,7 @@ AC_DEFUN([AP_SUPPORTED_HOST],[
   esac
 
   case $host_cpu in
-  powerpc)
+  powerpc*)
     CFLAGS="$CFLAGS -DCPU=\\\"$host_cpu\\\""
     HOST_CPU=$host_cpu;;
   sparc*)

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1329944-1332459

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1329944-1332459

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1329944-1332459

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1329944-1332459

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Mon Apr 30 23:01:07 2012
@@ -55,8 +55,8 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -515,15 +515,14 @@ public class DFSTestUtil {
   }
   
   public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
-    DFSDataInputStream in = 
-      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
+    HdfsDataInputStream in = (HdfsDataInputStream)((DistributedFileSystem)fs).open(path);
     in.readByte();
     return in.getCurrentBlock();
   }  
 
   public static List<LocatedBlock> getAllBlocks(FSDataInputStream in)
       throws IOException {
-    return ((DFSClient.DFSDataInputStream) in).getAllBlocks();
+    return ((HdfsDataInputStream) in).getAllBlocks();
   }
 
   public static Token<BlockTokenIdentifier> getBlockToken(

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Mon Apr 30 23:01:07 2012
@@ -87,7 +87,6 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -103,7 +102,6 @@ import org.apache.hadoop.util.ToolRunner
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import com.google.common.io.Files;
 
 /**
  * This class creates a single-process DFS cluster for junit testing.
@@ -1588,7 +1586,7 @@ public class MiniDFSCluster {
   /**
    * Get a client handle to the DFS cluster with a single namenode.
    */
-  public FileSystem getFileSystem() throws IOException {
+  public DistributedFileSystem getFileSystem() throws IOException {
     checkSingleNameNode();
     return getFileSystem(0);
   }
@@ -1596,8 +1594,9 @@ public class MiniDFSCluster {
   /**
    * Get a client handle to the DFS cluster for the namenode at given index.
    */
-  public FileSystem getFileSystem(int nnIndex) throws IOException {
-    return FileSystem.get(getURI(nnIndex), nameNodes[nnIndex].conf);
+  public DistributedFileSystem getFileSystem(int nnIndex) throws IOException {
+    return (DistributedFileSystem)FileSystem.get(getURI(nnIndex),
+        nameNodes[nnIndex].conf);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java Mon Apr 30 23:01:07 2012
@@ -48,7 +48,7 @@ public class TestBlocksScheduledCounter 
       out.write(i);
     }
     // flush to make sure a block is allocated.
-    ((DFSOutputStream)(out.getWrappedStream())).hflush();
+    out.hflush();
     
     ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
     final DatanodeManager dm = cluster.getNamesystem().getBlockManager(

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java Mon Apr 30 23:01:07 2012
@@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -31,6 +33,7 @@ import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.URL;
 
+import org.apache.hadoop.hdfs.server.namenode.StreamFile;
 import org.junit.Test;
 
 public class TestByteRangeInputStream {
@@ -84,6 +87,11 @@ public static class MockHttpURLConnectio
   public void setResponseCode(int resCode) {
     responseCode = resCode;
   }
+  
+  @Override
+  public String getHeaderField(String field) {
+    return (field.equalsIgnoreCase(StreamFile.CONTENT_LENGTH)) ? "65535" : null;
+  }
 }
   
   @Test
@@ -163,4 +171,74 @@ public static class MockHttpURLConnectio
                    "HTTP_OK expected, received 206", e.getMessage());
     }
   }
+  
+  @Test
+  public void testPropagatedClose() throws IOException {
+    ByteRangeInputStream brs = spy(
+        new HftpFileSystem.RangeHeaderInputStream(new URL("http://test/")));
+    
+    InputStream mockStream = mock(InputStream.class);
+    doReturn(mockStream).when(brs).openInputStream();
+
+    int brisOpens = 0;
+    int brisCloses = 0;
+    int isCloses = 0;
+    
+    // first open, shouldn't close underlying stream
+    brs.getInputStream();
+    verify(brs, times(++brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+    
+    // stream is open, shouldn't close underlying stream
+    brs.getInputStream();
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+    
+    // seek forces a reopen, should close underlying stream
+    brs.seek(1);
+    brs.getInputStream();
+    verify(brs, times(++brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(++isCloses)).close();
+
+    // verify that the underlying stream isn't closed after a seek
+    // ie. the state was correctly updated
+    brs.getInputStream();
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+
+    // seeking to same location should be a no-op
+    brs.seek(1);
+    brs.getInputStream();
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+
+    // close should of course close
+    brs.close();
+    verify(brs, times(++brisCloses)).close();
+    verify(mockStream, times(++isCloses)).close();
+    
+    // it's already closed, underlying stream should not close
+    brs.close();
+    verify(brs, times(++brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+    
+    // it's closed, don't reopen it
+    boolean errored = false;
+    try {
+      brs.getInputStream();
+    } catch (IOException e) {
+      errored = true;
+      assertEquals("Stream closed", e.getMessage());
+    } finally {
+      assertTrue("Read a closed steam", errored);
+    }
+    verify(brs, times(brisOpens)).openInputStream();
+    verify(brs, times(brisCloses)).close();
+    verify(mockStream, times(isCloses)).close();
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Mon Apr 30 23:01:07 2012
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
@@ -143,10 +144,10 @@ public class TestDecommission {
     String downnode, int numDatanodes) throws IOException {
     boolean isNodeDown = (downnode != null);
     // need a raw stream
-    assertTrue("Not HDFS:"+fileSys.getUri(), 
-    fileSys instanceof DistributedFileSystem);
-    DFSClient.DFSDataInputStream dis = (DFSClient.DFSDataInputStream)
-      ((DistributedFileSystem)fileSys).open(name);
+    assertTrue("Not HDFS:"+fileSys.getUri(),
+        fileSys instanceof DistributedFileSystem);
+    HdfsDataInputStream dis = (HdfsDataInputStream)
+        ((DistributedFileSystem)fileSys).open(name);
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
     for (LocatedBlock blk : dinfo) { // for each block
       int hasdown = 0;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Mon Apr 30 23:01:07 2012
@@ -31,6 +31,7 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.junit.Assume.assumeTrue;
 
 import java.io.BufferedReader;
 import java.io.File;
@@ -53,6 +54,7 @@ import org.apache.hadoop.fs.FsServerDefa
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -68,8 +70,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
 
-import static org.junit.Assume.assumeTrue;
-
 /**
  * This class tests various cases during file creation.
  */
@@ -99,6 +99,11 @@ public class TestFileCreation extends ju
     return stm;
   }
 
+  public static HdfsDataOutputStream create(DistributedFileSystem dfs,
+      Path name, int repl) throws IOException {
+    return (HdfsDataOutputStream)createFile(dfs, name, repl);
+  }
+
   //
   // writes to file but does not close it
   //
@@ -494,7 +499,7 @@ public class TestFileCreation extends ju
 
     // create cluster
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
-    FileSystem fs = null;
+    DistributedFileSystem fs = null;
     try {
       cluster.waitActive();
       fs = cluster.getFileSystem();
@@ -502,21 +507,17 @@ public class TestFileCreation extends ju
 
       // create a new file.
       Path file1 = new Path("/filestatus.dat");
-      FSDataOutputStream stm = createFile(fs, file1, 1);
+      HdfsDataOutputStream stm = create(fs, file1, 1);
       System.out.println("testFileCreationNamenodeRestart: "
                          + "Created file " + file1);
-      int actualRepl = ((DFSOutputStream)(stm.getWrappedStream())).
-                        getNumCurrentReplicas();
-      assertTrue(file1 + " should be replicated to 1 datanodes.",
-                 actualRepl == 1);
+      assertEquals(file1 + " should be replicated to 1 datanode.", 1,
+          stm.getCurrentBlockReplication());
 
       // write two full blocks.
       writeFile(stm, numBlocks * blockSize);
       stm.hflush();
-      actualRepl = ((DFSOutputStream)(stm.getWrappedStream())).
-                        getNumCurrentReplicas();
-      assertTrue(file1 + " should still be replicated to 1 datanodes.",
-                 actualRepl == 1);
+      assertEquals(file1 + " should still be replicated to 1 datanode.", 1,
+          stm.getCurrentBlockReplication());
 
       // rename file wile keeping it open.
       Path fileRenamed = new Path("/filestatusRenamed.dat");
@@ -849,11 +850,10 @@ public class TestFileCreation extends ju
       // create a new file.
       final String f = DIR + "foo";
       final Path fpath = new Path(f);
-      FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
+      HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
       out.write("something".getBytes());
       out.hflush();
-      int actualRepl = ((DFSOutputStream)(out.getWrappedStream())).
-                        getNumCurrentReplicas();
+      int actualRepl = out.getCurrentBlockReplication();
       assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.",
                  actualRepl == DATANODE_NUM);
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Mon Apr 30 23:01:07 2012
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.io.InputStream;
 import java.net.URISyntaxException;
 import java.net.URI;
 import java.net.URL;
@@ -234,6 +235,45 @@ public class TestHftpFileSystem {
     assertEquals('7', in.read());
   }
 
+  @Test
+  public void testReadClosedStream() throws IOException {
+    final Path testFile = new Path("/testfile+2");
+    FSDataOutputStream os = hdfs.create(testFile, true);
+    os.writeBytes("0123456789");
+    os.close();
+
+    // ByteRangeInputStream delays opens until reads.  Make sure it doesn't
+    // open a closed stream that has never been opened
+    FSDataInputStream in = hftpFs.open(testFile);
+    in.close();
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+    
+    // force the stream to connect and then close it
+    in = hftpFs.open(testFile);
+    int ch = in.read(); 
+    assertEquals('0', ch);
+    in.close();
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+    
+    // make sure seeking doesn't automagically reopen the stream
+    in.seek(4);
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+  }
+  
+  private void checkClosedStream(InputStream is) {
+    IOException ioe = null;
+    try {
+      is.read();
+    } catch (IOException e) {
+      ioe = e;
+    }
+    assertNotNull("No exception on closed read", ioe);
+    assertEquals("Stream closed", ioe.getMessage());
+  }
+  
   public void resetFileSystem() throws IOException {
     // filesystem caching has a quirk/bug that it caches based on the user's
     // given uri.  the result is if a filesystem is instantiated with no port,

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Mon Apr 30 23:01:07 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -49,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -90,7 +92,7 @@ public class TestLeaseRecovery2 {
 
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
     cluster.waitActive();
-    dfs = (DistributedFileSystem)cluster.getFileSystem();
+    dfs = cluster.getFileSystem();
   }
   
   /**
@@ -406,17 +408,26 @@ public class TestLeaseRecovery2 {
    */
   @Test
   public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
-    hardLeaseRecoveryRestartHelper(false);
+    hardLeaseRecoveryRestartHelper(false, -1);
   }
-  
+
+  @Test
+  public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception {
+    hardLeaseRecoveryRestartHelper(false, 1535);
+  }
+
   @Test
   public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
       throws Exception {
-    hardLeaseRecoveryRestartHelper(true);
+    hardLeaseRecoveryRestartHelper(true, -1);
   }
   
-  public void hardLeaseRecoveryRestartHelper(boolean doRename)
+  public void hardLeaseRecoveryRestartHelper(boolean doRename, int size)
       throws Exception {
+    if (size < 0) {
+      size =  AppendTestUtil.nextInt(FILE_SIZE + 1);
+    }
+
     //create a file
     String fileStr = "/hardLeaseRecovery";
     AppendTestUtil.LOG.info("filestr=" + fileStr);
@@ -426,7 +437,6 @@ public class TestLeaseRecovery2 {
     assertTrue(dfs.dfs.exists(fileStr));
 
     // write bytes into the file.
-    int size = AppendTestUtil.nextInt(FILE_SIZE);
     AppendTestUtil.LOG.info("size=" + size);
     stm.write(buffer, 0, size);
     
@@ -440,6 +450,11 @@ public class TestLeaseRecovery2 {
     AppendTestUtil.LOG.info("hflush");
     stm.hflush();
     
+    // check visible length
+    final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
+    Assert.assertEquals(size, in.getVisibleLength());
+    in.close();
+    
     if (doRename) {
       fileStr += ".renamed";
       Path renamedPath = new Path(fileStr);
@@ -463,14 +478,11 @@ public class TestLeaseRecovery2 {
     // Make sure lease recovery begins.
     Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
     
-    assertEquals("lease holder should now be the NN", HdfsServerConstants.NAMENODE_LEASE_HOLDER,
-        NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
+    checkLease(fileStr, size);
     
     cluster.restartNameNode(false);
     
-    assertEquals("lease holder should still be the NN after restart",
-        HdfsServerConstants.NAMENODE_LEASE_HOLDER,
-        NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
+    checkLease(fileStr, size);
     
     // Let the DNs send heartbeats again.
     for (DataNode dn : cluster.getDataNodes()) {
@@ -492,12 +504,12 @@ public class TestLeaseRecovery2 {
     assertEquals(size, locatedBlocks.getFileLength());
 
     // make sure that the client can't write data anymore.
-    stm.write('b');
     try {
+      stm.write('b');
       stm.hflush();
       fail("Should not be able to flush after we've lost the lease");
     } catch (IOException e) {
-      LOG.info("Expceted exception on hflush", e);
+      LOG.info("Expceted exception on write/hflush", e);
     }
     
     try {
@@ -512,4 +524,16 @@ public class TestLeaseRecovery2 {
         "File size is good. Now validating sizes from datanodes...");
     AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr);
   }
+  
+  static void checkLease(String f, int size) {
+    final String holder = NameNodeAdapter.getLeaseHolderForPath(
+        cluster.getNameNode(), f); 
+    if (size == 0) {
+      assertEquals("lease holder should null, file is closed", null, holder);
+    } else {
+      assertEquals("lease holder should now be the NN",
+          HdfsServerConstants.NAMENODE_LEASE_HOLDER, holder);
+    }
+    
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java?rev=1332461&r1=1332460&r2=1332461&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java Mon Apr 30 23:01:07 2012
@@ -28,7 +28,7 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.ipc.RemoteException;
@@ -147,7 +147,7 @@ public class TestReadWhileWriting {
     
     final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
     
-    final DFSDataInputStream in = (DFSDataInputStream)fs.open(p);
+    final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);
 
     //Check visible length
     Assert.assertTrue(in.getVisibleLength() >= expectedsize);



Mime
View raw message