hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1240450 [1/2] - in /hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/security/token/delegation/ src/main/java/org/apache/had...
Date Sat, 04 Feb 2012 03:40:54 GMT
Author: szetszwo
Date: Sat Feb  4 03:40:45 2012
New Revision: 1240450

URL: http://svn.apache.org/viewvc?rev=1240450&view=rev
Log:
Merge r1239398 through r1240449 from 0.23.

Added:
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
      - copied unchanged from r1240449, hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/ssl-client.xml
      - copied unchanged from r1240449, hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/ssl-client.xml
Modified:
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Feb  4 03:40:45 2012
@@ -1,5 +1,5 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs:1227776-1239397
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1204709,1204825,1205146,1205260,1205626,1205697,1206178,1206786,1206830,1207585,1207694,
 1208140,1208153,1208313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs:1227776-1240449
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1204709,1204825,1205146,1205260,1205626,1205697,1206178,1206786,1206830,1207585,1207694,
 1208140,1208153,1208313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sat Feb  4 03:40:45 2012
@@ -208,6 +208,12 @@ Release 0.23.1 - UNRELEASED
 
     HDFS-2397. Undeprecate SecondaryNameNode (eli)
 
+    HDFS-2814. NamenodeMXBean does not account for svn revision in the version 
+    information. (Hitesh Shah via jitendra)
+
+    HDFS-2784. Update hftp and hdfs for host-based token support.
+    (Kihwal Lee via jitendra)
+
   OPTIMIZATIONS
 
     HDFS-2130. Switch default checksum to CRC32C. (todd)
@@ -231,6 +237,8 @@ Release 0.23.1 - UNRELEASED
     HDFS-2864. Remove some redundant methods and the constant METADATA_VERSION
     from FSDataset.  (szetszwo)
 
+    HDFS-2879. Change FSDataset to package private.  (szetszwo)
+
   BUG FIXES
 
     HDFS-2541. For a sufficiently large value of blocks, the DN Scanner 
@@ -313,6 +321,9 @@ Release 0.23.1 - UNRELEASED
     HDFS-2835. Fix findbugs and javadoc issue with GetConf.java.
     (suresh)
 
+    HDFS-2889. getNumCurrentReplicas is package private but should be public on
+    0.23 (see HDFS-2408). (Gregory Chanan via atm)
+
 Release 0.23.0 - 2011-11-01 
 
   INCOMPATIBLE CHANGES
@@ -1654,8 +1665,13 @@ Release 0.22.1 - Unreleased
 
   OPTIMIZATIONS
 
+    HDFS-2718. Optimize OP_ADD in edits loading. (shv)
+
   BUG FIXES
 
+   HDFS-2877. If locking of a storage dir fails, it will remove the other
+   NN's lock file on exit. (todd)
+
 Release 0.22.0 - 2011-11-29
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Feb  4 03:40:45 2012
@@ -1,5 +1,5 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1227776-1239397
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1205146,1205260,1205697,1206786,1206830,1207694,1208140,1208153,1208313,12
 10208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1227776-1240449
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1205146,1205260,1205697,1206786,1206830,1207694,1208140,1208153,1208313,12
 10208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Sat Feb  4 03:40:45 2012
@@ -631,7 +631,7 @@ public class DFSClient implements java.i
                DelegationTokenIdentifier.stringifyToken(delToken));
       ClientProtocol nn = 
         DFSUtil.createNamenode
-           (NameNode.getAddress(token.getService().toString()),
+           (SecurityUtil.getTokenServiceAddr(delToken),
             conf, UserGroupInformation.getCurrentUser());
       try {
         return nn.renewDelegationToken(delToken);
@@ -649,7 +649,7 @@ public class DFSClient implements java.i
       LOG.info("Cancelling " + 
                DelegationTokenIdentifier.stringifyToken(delToken));
       ClientProtocol nn = DFSUtil.createNamenode(
-          NameNode.getAddress(token.getService().toString()), conf,
+          SecurityUtil.getTokenServiceAddr(delToken), conf,
           UserGroupInformation.getCurrentUser());
       try {
         nn.cancelDelegationToken(delToken);

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Sat Feb  4 03:40:45 2012
@@ -1538,7 +1538,7 @@ class DFSOutputStream extends FSOutputSu
    * write pipeline have failed. 
    * @return the number of valid replicas of the current block
    */
-  synchronized int getNumCurrentReplicas() throws IOException {
+  public synchronized int getNumCurrentReplicas() throws IOException {
     dfsClient.checkOpen();
     isClosed();
     if (streamer == null) {

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Sat Feb  4 03:40:45 2012
@@ -108,45 +108,10 @@ public class DistributedFileSystem exten
 
     InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
     this.dfs = new DFSClient(namenode, conf, statistics);
-    this.uri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
+    this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority());
     this.workingDir = getHomeDirectory();
   }
 
-  /** Permit paths which explicitly specify the default port. */
-  @Override
-  protected void checkPath(Path path) {
-    URI thisUri = this.getUri();
-    URI thatUri = path.toUri();
-    String thatAuthority = thatUri.getAuthority();
-    if (thatUri.getScheme() != null
-        && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme())
-        && thatUri.getPort() == NameNode.DEFAULT_PORT
-        && (thisUri.getPort() == -1 || 
-            thisUri.getPort() == NameNode.DEFAULT_PORT)
-        && thatAuthority.substring(0,thatAuthority.indexOf(":"))
-        .equalsIgnoreCase(thisUri.getAuthority()))
-      return;
-    super.checkPath(path);
-  }
-
-  /** Normalize paths that explicitly specify the default port. */
-  @Override
-  public Path makeQualified(Path path) {
-    URI thisUri = this.getUri();
-    URI thatUri = path.toUri();
-    String thatAuthority = thatUri.getAuthority();
-    if (thatUri.getScheme() != null
-        && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme())
-        && thatUri.getPort() == NameNode.DEFAULT_PORT
-        && thisUri.getPort() == -1
-        && thatAuthority.substring(0,thatAuthority.indexOf(":"))
-        .equalsIgnoreCase(thisUri.getAuthority())) {
-      path = new Path(thisUri.getScheme(), thisUri.getAuthority(),
-                      thatUri.getPath());
-    }
-    return super.makeQualified(path);
-  }
-
   @Override
   public Path getWorkingDirectory() {
     return workingDir;

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java Sat Feb  4 03:40:45 2012
@@ -59,6 +59,7 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ServletUtil;
 import org.xml.sax.Attributes;
@@ -89,17 +90,20 @@ public class HftpFileSystem extends File
 
   public static final Text TOKEN_KIND = new Text("HFTP delegation");
 
-  private String nnHttpUrl;
-  private Text hdfsServiceName;
+  protected UserGroupInformation ugi;
   private URI hftpURI;
+
   protected InetSocketAddress nnAddr;
-  protected UserGroupInformation ugi; 
+  protected InetSocketAddress nnSecureAddr;
 
   public static final String HFTP_TIMEZONE = "UTC";
   public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
+
   private Token<?> delegationToken;
   private Token<?> renewToken;
-  
+  private static final HftpDelegationTokenSelector hftpTokenSelector =
+      new HftpDelegationTokenSelector();
+
   public static final SimpleDateFormat getDateFormat() {
     final SimpleDateFormat df = new SimpleDateFormat(HFTP_DATE_FORMAT);
     df.setTimeZone(TimeZone.getTimeZone(HFTP_TIMEZONE));
@@ -115,11 +119,8 @@ public class HftpFileSystem extends File
 
   @Override
   protected int getDefaultPort() {
-    return getDefaultSecurePort();
-
-    //TODO: un-comment the following once HDFS-7510 is committed. 
-//    return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
-//        DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
+    return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
+        DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
   }
 
   protected int getDefaultSecurePort() {
@@ -127,16 +128,22 @@ public class HftpFileSystem extends File
         DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
   }
 
-  @Override
-  public String getCanonicalServiceName() {
-    return SecurityUtil.buildDTServiceName(hftpURI, getDefaultPort());
+  protected InetSocketAddress getNamenodeAddr(URI uri) {
+    // use authority so user supplied uri can override port
+    return NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
   }
-  
-  private String buildUri(String schema, String host, int port) {
-    StringBuilder sb = new StringBuilder(schema);
-    return sb.append(host).append(":").append(port).toString();
+
+  protected InetSocketAddress getNamenodeSecureAddr(URI uri) {
+    // must only use the host and the configured https port
+    return NetUtils.createSocketAddrForHost(uri.getHost(), getDefaultSecurePort());
   }
 
+  @Override
+  public String getCanonicalServiceName() {
+    // unlike other filesystems, hftp's service is the secure port, not the
+    // actual port in the uri
+    return SecurityUtil.buildTokenService(nnSecureAddr).toString();
+  }
 
   @Override
   public void initialize(final URI name, final Configuration conf)
@@ -144,95 +151,51 @@ public class HftpFileSystem extends File
     super.initialize(name, conf);
     setConf(conf);
     this.ugi = UserGroupInformation.getCurrentUser(); 
-    nnAddr = NetUtils.createSocketAddr(name.toString());
-    
-    // in case we open connection to hftp of a different cluster
-    // we need to know this cluster https port
-    // if it is not set we assume it is the same cluster or same port
-    int urlPort = conf.getInt("dfs.hftp.https.port", -1);
-    if(urlPort == -1)
-      urlPort = conf.getInt(DFSConfigKeys.DFS_HTTPS_PORT_KEY, 
-          DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
-
-    String normalizedNN = NetUtils.normalizeHostName(name.getHost());
-    nnHttpUrl = buildUri("https://", normalizedNN ,urlPort);
-    LOG.debug("using url to get DT:" + nnHttpUrl);
-    try {
-      hftpURI = new URI(buildUri("hftp://", normalizedNN, urlPort));
-    } catch (URISyntaxException ue) {
-      throw new IOException("bad uri for hdfs", ue);
-    }
-
-    // if one uses RPC port different from the Default one,  
-    // one should specify what is the setvice name for this delegation token
-    // otherwise it is hostname:RPC_PORT
-    String key = DelegationTokenSelector.SERVICE_NAME_KEY
-        + SecurityUtil.buildDTServiceName(name,
-            DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Trying to find DT for " + name + " using key=" + key + 
-          "; conf=" + conf.get(key, ""));
-    }
-    String nnServiceName = conf.get(key);
-    int nnPort = NameNode.DEFAULT_PORT;
-    if (nnServiceName != null) { // get the real port
-      nnPort = NetUtils.createSocketAddr(nnServiceName, 
-          NameNode.DEFAULT_PORT).getPort();
-    }
+    this.nnAddr = getNamenodeAddr(name);
+    this.nnSecureAddr = getNamenodeSecureAddr(name);
     try {
-      URI hdfsURI = new URI("hdfs://" + normalizedNN + ":" + nnPort);
-      hdfsServiceName = new Text(SecurityUtil.buildDTServiceName(hdfsURI, 
-                                                                 nnPort));
-    } catch (URISyntaxException ue) {
-      throw new IOException("bad uri for hdfs", ue);
+      this.hftpURI = new URI(name.getScheme(), name.getAuthority(),
+                             null, null, null);
+    } catch (URISyntaxException e) {
+      throw new IllegalArgumentException(e);
     }
 
     if (UserGroupInformation.isSecurityEnabled()) {
-      //try finding a token for this namenode (esp applicable for tasks
-      //using hftp). If there exists one, just set the delegationField
-      String hftpServiceName = getCanonicalServiceName();
-      for (Token<? extends TokenIdentifier> t : ugi.getTokens()) {
-        Text kind = t.getKind();
-        if (DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind)) {
-          if (t.getService().equals(hdfsServiceName)) {
-            setDelegationToken(t);
-            break;
-          }
-        } else if (TOKEN_KIND.equals(kind)) {
-          if (hftpServiceName
-              .equals(normalizeService(t.getService().toString()))) {
-            setDelegationToken(t);
-            break;
-          }
-        }
-      }
-      
-      //since we don't already have a token, go get one over https
-      if (delegationToken == null) {
-        setDelegationToken(getDelegationToken(null));
+      initDelegationToken();
+    }
+  }
+
+  protected void initDelegationToken() throws IOException {
+    // look for hftp token, then try hdfs
+    Token<?> token = selectHftpDelegationToken();
+    if (token == null) {
+      token = selectHdfsDelegationToken();
+    }  
+
+    // if we don't already have a token, go get one over https
+    boolean createdToken = false;
+    if (token == null) {
+      token = getDelegationToken(null);
+      createdToken = (token != null);
+    }
+
+    // we already had a token or getDelegationToken() didn't fail.
+    if (token != null) {
+      setDelegationToken(token);
+      if (createdToken) {
         dtRenewer.addRenewAction(this);
+        LOG.debug("Created new DT for " + token.getService());
+      } else {
+        LOG.debug("Found existing DT for " + token.getService());
       }
     }
   }
 
-  private String normalizeService(String service) {
-    int colonIndex = service.indexOf(':');
-    if (colonIndex == -1) {
-      throw new IllegalArgumentException("Invalid service for hftp token: " + 
-                                         service);
-    }
-    String hostname = 
-        NetUtils.normalizeHostName(service.substring(0, colonIndex));
-    String port = service.substring(colonIndex + 1);
-    return hostname + ":" + port;
-  }
-
-  //TODO: un-comment the following once HDFS-7510 is committed. 
-//  protected Token<DelegationTokenIdentifier> selectHftpDelegationToken() {
-//    Text serviceName = SecurityUtil.buildTokenService(nnSecureAddr);
-//    return hftpTokenSelector.selectToken(serviceName, ugi.getTokens());      
-//  }
-  
+  protected Token<DelegationTokenIdentifier> selectHftpDelegationToken() {
+    Text serviceName = SecurityUtil.buildTokenService(nnSecureAddr);
+    return hftpTokenSelector.selectToken(serviceName, ugi.getTokens());
+  }
+
   protected Token<DelegationTokenIdentifier> selectHdfsDelegationToken() {
     return  DelegationTokenSelector.selectHdfsDelegationToken(
         nnAddr, ugi, getConf());
@@ -245,13 +208,17 @@ public class HftpFileSystem extends File
   }
 
   @Override
-  public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
+  public synchronized <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
     renewToken = token;
     // emulate the 203 usage of the tokens
     // by setting the kind and service as if they were hdfs tokens
     delegationToken = new Token<T>(token);
+    // NOTE: the remote nn must be configured to use hdfs
     delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
-    delegationToken.setService(hdfsServiceName);
+    // no need to change service because we aren't exactly sure what it
+    // should be.  we can guess, but it might be wrong if the local conf
+    // value is incorrect.  the service is a client side field, so the remote
+    // end does not care about the value
   }
 
   @Override
@@ -262,6 +229,7 @@ public class HftpFileSystem extends File
       ugi.reloginFromKeytab();
       return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
         public Token<?> run() throws IOException {
+          final String nnHttpUrl = DFSUtil.createUri("https", nnSecureAddr).toString();
           Credentials c;
           try {
             c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer);
@@ -291,12 +259,7 @@ public class HftpFileSystem extends File
 
   @Override
   public URI getUri() {
-    try {
-      return new URI("hftp", null, nnAddr.getHostName(), nnAddr.getPort(),
-                     null, null, null);
-    } catch (URISyntaxException e) {
-      return null;
-    } 
+    return hftpURI;
   }
 
   /**
@@ -722,11 +685,12 @@ public class HftpFileSystem extends File
     public long renew(Token<?> token, 
                       Configuration conf) throws IOException {
       // update the kerberos credentials, if they are coming from a keytab
-      UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
+      UserGroupInformation.getLoginUser().reloginFromKeytab();
       // use https to renew the token
+      InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
       return 
         DelegationTokenFetcher.renewDelegationToken
-        ("https://" + token.getService().toString(), 
+        (DFSUtil.createUri("https", serviceAddr).toString(), 
          (Token<DelegationTokenIdentifier>) token);
     }
 
@@ -737,10 +701,18 @@ public class HftpFileSystem extends File
       // update the kerberos credentials, if they are coming from a keytab
       UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
       // use https to cancel the token
+      InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
       DelegationTokenFetcher.cancelDelegationToken
-        ("https://" + token.getService().toString(), 
+        (DFSUtil.createUri("https", serviceAddr).toString(), 
          (Token<DelegationTokenIdentifier>) token);
+    }    
+  }
+  
+  private static class HftpDelegationTokenSelector
+  extends AbstractDelegationTokenSelector<DelegationTokenIdentifier> {
+
+    public HftpDelegationTokenSelector() {
+      super(TOKEN_KIND);
     }
-    
   }
 }

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java Sat Feb  4 03:40:45 2012
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
@@ -121,6 +122,16 @@ public class HsftpFileSystem extends Hft
   }
 
   @Override
+  protected int getDefaultPort() {
+    return getDefaultSecurePort();
+  }
+
+  @Override
+  protected InetSocketAddress getNamenodeSecureAddr(URI uri) {
+    return getNamenodeAddr(uri);
+  }
+
+  @Override
   protected HttpURLConnection openConnection(String path, String query)
       throws IOException {
     query = addDelegationTokenParam(query);
@@ -161,16 +172,6 @@ public class HsftpFileSystem extends Hft
     return (HttpURLConnection) conn;
   }
 
-  @Override
-  public URI getUri() {
-    try {
-      return new URI("hsftp", null, nnAddr.getHostName(), nnAddr.getPort(),
-          null, null, null);
-    } catch (URISyntaxException e) {
-      return null;
-    }
-  }
-
   /**
    * Dummy hostname verifier that is used to bypass hostname checking
    */

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java Sat Feb  4 03:40:45 2012
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
@@ -296,8 +297,7 @@ public class DelegationTokenSecretManage
     }
 
     final InetSocketAddress addr = namenode.getNameNodeAddress();
-    final String s = addr.getAddress().getHostAddress() + ":" + addr.getPort();
-    token.setService(new Text(s));
+    SecurityUtil.setTokenService(token, addr);
     final Credentials c = new Credentials();
     c.addToken(new Text(ugi.getShortUserName()), token);
     return c;

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java Sat Feb  4 03:40:45 2012
@@ -157,9 +157,6 @@ public class BlockInfoUnderConstruction 
   BlockInfo convertToCompleteBlock() throws IOException {
     assert getBlockUCState() != BlockUCState.COMPLETE :
       "Trying to convert a COMPLETE block";
-    if(getBlockUCState() != BlockUCState.COMMITTED)
-      throw new IOException(
-          "Cannot complete block: block has not been COMMITTED by the client");
     return new BlockInfo(this);
   }
 

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Sat Feb  4 03:40:45 2012
@@ -441,15 +441,23 @@ public class BlockManager {
    */
   private BlockInfo completeBlock(final INodeFile fileINode,
       final int blkIndex) throws IOException {
+    return completeBlock(fileINode, blkIndex, false);
+  }
+
+  public BlockInfo completeBlock(final INodeFile fileINode, 
+      final int blkIndex, final boolean force) throws IOException {
     if(blkIndex < 0)
       return null;
     BlockInfo curBlock = fileINode.getBlocks()[blkIndex];
     if(curBlock.isComplete())
       return curBlock;
     BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)curBlock;
-    if(ucBlock.numNodes() < minReplication)
+    if(!force && ucBlock.numNodes() < minReplication)
       throw new IOException("Cannot complete block: " +
           "block does not satisfy minimal replication requirement.");
+    if(!force && ucBlock.getBlockUCState() != BlockUCState.COMMITTED)
+      throw new IOException(
+          "Cannot complete block: block has not been COMMITTED by the client");
     BlockInfo completeBlock = ucBlock.convertToCompleteBlock();
     // replace penultimate block in file
     fileINode.setBlock(blkIndex, completeBlock);

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Sat Feb  4 03:40:45 2012
@@ -62,6 +62,7 @@ import org.apache.hadoop.http.HtmlQuotin
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.util.KerberosName;
@@ -492,7 +493,7 @@ public class JspHelper {
     return UserGroupInformation.createRemoteUser(strings[0]);
   }
 
-  private static String getNNServiceAddress(ServletContext context,
+  private static InetSocketAddress getNNServiceAddress(ServletContext context,
       HttpServletRequest request) {
     String namenodeAddressInUrl = request.getParameter(NAMENODE_ADDRESS);
     InetSocketAddress namenodeAddress = null;
@@ -503,8 +504,7 @@ public class JspHelper {
           context); 
     }
     if (namenodeAddress != null) {
-      return (namenodeAddress.getAddress().getHostAddress() + ":" 
-          + namenodeAddress.getPort());
+      return namenodeAddress;
     }
     return null;
   }
@@ -547,9 +547,9 @@ public class JspHelper {
         Token<DelegationTokenIdentifier> token = 
           new Token<DelegationTokenIdentifier>();
         token.decodeFromUrlString(tokenString);
-        String serviceAddress = getNNServiceAddress(context, request);
+        InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
         if (serviceAddress != null) {
-          token.setService(new Text(serviceAddress));
+          SecurityUtil.setTokenService(token, serviceAddress);
           token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
         }
         ByteArrayInputStream buf = new ByteArrayInputStream(token

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java Sat Feb  4 03:40:45 2012
@@ -599,8 +599,12 @@ public abstract class Storage extends St
      * @throws IOException if locking fails.
      */
     FileLock tryLock() throws IOException {
+      boolean deletionHookAdded = false;
       File lockF = new File(root, STORAGE_FILE_LOCK);
-      lockF.deleteOnExit();
+      if (!lockF.exists()) {
+        lockF.deleteOnExit();
+        deletionHookAdded = true;
+      }
       RandomAccessFile file = new RandomAccessFile(lockF, "rws");
       FileLock res = null;
       try {
@@ -613,6 +617,12 @@ public abstract class Storage extends St
         file.close();
         throw e;
       }
+      if (res != null && !deletionHookAdded) {
+        // If the file existed prior to our startup, we didn't
+        // call deleteOnExit above. But since we successfully locked
+        // the dir, we can take care of cleaning it up.
+        lockF.deleteOnExit();
+      }
       return res;
     }
 

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Sat Feb  4 03:40:45 2012
@@ -222,7 +222,7 @@ class BlockReceiver implements Closeable
       cleanupBlock();
       
       // check if there is a disk error
-      IOException cause = FSDataset.getCauseIfDiskError(ioe);
+      IOException cause = DatanodeUtil.getCauseIfDiskError(ioe);
       DataNode.LOG.warn("IOException in BlockReceiver constructor. Cause is ",
           cause);
       

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Sat Feb  4 03:40:45 2012
@@ -75,7 +75,7 @@ import org.apache.hadoop.util.Reflection
  *
  ***************************************************/
 @InterfaceAudience.Private
-public class FSDataset implements FSDatasetInterface {
+class FSDataset implements FSDatasetInterface {
 
   /**
    * A node type that can be built into a tree reflecting the
@@ -373,7 +373,7 @@ public class FSDataset implements FSData
      */
     File createTmpFile(Block b) throws IOException {
       File f = new File(tmpDir, b.getBlockName());
-      return FSDataset.createTmpFile(b, f);
+      return DatanodeUtil.createTmpFile(b, f);
     }
 
     /**
@@ -382,7 +382,7 @@ public class FSDataset implements FSData
      */
     File createRbwFile(Block b) throws IOException {
       File f = new File(rbwDir, b.getBlockName());
-      return FSDataset.createTmpFile(b, f);
+      return DatanodeUtil.createTmpFile(b, f);
     }
 
     File addBlock(Block b, File f) throws IOException {
@@ -536,15 +536,15 @@ public class FSDataset implements FSData
     }
 
     /** Return storage directory corresponding to the volume */
-    public File getDir() {
+    File getDir() {
       return currentDir.getParentFile();
     }
     
-    public File getCurrentDir() {
+    File getCurrentDir() {
       return currentDir;
     }
     
-    public File getRbwDir(String bpid) throws IOException {
+    File getRbwDir(String bpid) throws IOException {
       BlockPoolSlice bp = getBlockPoolSlice(bpid);
       return bp.getRbwDir();
     }
@@ -1085,26 +1085,6 @@ public class FSDataset implements FSData
     return new MetaDataInputStream(new FileInputStream(checksumFile),
                                                     checksumFile.length());
   }
-
-  static File createTmpFile(Block b, File f) throws IOException {
-    if (f.exists()) {
-      throw new IOException("Unexpected problem in creating temporary file for "+
-                            b + ".  File " + f + " should not be present, but is.");
-    }
-    // Create the zero-length temp file
-    //
-    boolean fileCreated = false;
-    try {
-      fileCreated = f.createNewFile();
-    } catch (IOException ioe) {
-      throw (IOException)new IOException(DISK_ERROR +f).initCause(ioe);
-    }
-    if (!fileCreated) {
-      throw new IOException("Unexpected problem in creating temporary file for "+
-                            b + ".  File " + f + " should be creatable, but is already present.");
-    }
-    return f;
-  }
     
   private final DataNode datanode;
   final FSVolumeSet volumes;
@@ -1246,7 +1226,7 @@ public class FSDataset implements FSData
   /**
    * Get File name for a given block.
    */
-  public File getBlockFile(ExtendedBlock b) throws IOException {
+  private File getBlockFile(ExtendedBlock b) throws IOException {
     return getBlockFile(b.getBlockPoolId(), b.getLocalBlock());
   }
   
@@ -1320,7 +1300,7 @@ public class FSDataset implements FSData
    * @throws ReplicaNotFoundException if no entry is in the map or 
    *                        there is a generation stamp mismatch
    */
-  private ReplicaInfo getReplicaInfo(ExtendedBlock b)
+  ReplicaInfo getReplicaInfo(ExtendedBlock b)
       throws ReplicaNotFoundException {
     ReplicaInfo info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
     if (info == null) {
@@ -1456,19 +1436,6 @@ public class FSDataset implements FSData
     }
   }
 
-  private final static String DISK_ERROR = "Possible disk error on file creation: ";
-  /** Get the cause of an I/O exception if caused by a possible disk error
-   * @param ioe an I/O exception
-   * @return cause if the I/O exception is caused by a possible disk error;
-   *         null otherwise.
-   */ 
-  static IOException getCauseIfDiskError(IOException ioe) {
-    if (ioe.getMessage()!=null && ioe.getMessage().startsWith(DISK_ERROR)) {
-      return (IOException)ioe.getCause();
-    } else {
-      return null;
-    }
-  }
 
   @Override  // FSDatasetInterface
   public synchronized ReplicaInPipelineInterface append(ExtendedBlock b,

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java Sat Feb  4 03:40:45 2012
@@ -162,7 +162,7 @@ abstract public class ReplicaInfo extend
    * be recovered (especially on Windows) on datanode restart.
    */
   private void unlinkFile(File file, Block b) throws IOException {
-    File tmpFile = FSDataset.createTmpFile(b, FSDataset.getUnlinkTmpFile(file));
+    File tmpFile = DatanodeUtil.createTmpFile(b, FSDataset.getUnlinkTmpFile(file));
     try {
       FileInputStream in = new FileInputStream(file);
       try {

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Sat Feb  4 03:40:45 2012
@@ -262,22 +262,28 @@ public class FSDirectory implements Clos
                             short replication,
                             long modificationTime,
                             long atime,
-                            long preferredBlockSize) 
+                            long preferredBlockSize,
+                            String clientName,
+                            String clientMachine)
       throws UnresolvedLinkException {
     INode newNode;
-    long diskspace = UNKNOWN_DISK_SPACE;
     assert hasWriteLock();
     if (blocks == null)
       newNode = new INodeDirectory(permissions, modificationTime);
-    else {
+    else if(blocks.length == 0 || blocks[blocks.length-1].getBlockUCState()
+        == BlockUCState.UNDER_CONSTRUCTION) {
+      newNode = new INodeFileUnderConstruction(
+          permissions, blocks.length, replication,
+          preferredBlockSize, modificationTime, clientName, 
+          clientMachine, null);
+    } else {
       newNode = new INodeFile(permissions, blocks.length, replication,
                               modificationTime, atime, preferredBlockSize);
-      diskspace = ((INodeFile)newNode).diskspaceConsumed(blocks);
     }
     writeLock();
     try {
       try {
-        newNode = addNode(path, newNode, diskspace);
+        newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE);
         if(newNode != null && blocks != null) {
           int nrBlocks = blocks.length;
           // Add file->block mapping
@@ -296,6 +302,74 @@ public class FSDirectory implements Clos
 
   }
 
+  /**
+   * Update files in-memory data structures with new block information.
+   * @throws IOException 
+   */
+  void updateFile(INodeFile file,
+                  String path,
+                  PermissionStatus permissions,
+                  BlockInfo[] blocks, 
+                  short replication,
+                  long mtime,
+                  long atime,
+                  long preferredBlockSize) throws IOException {
+
+    // Update the salient file attributes.
+    file.setAccessTime(atime);
+    file.setModificationTimeForce(mtime);
+
+    // Update its block list
+    BlockInfo[] oldBlocks = file.getBlocks();
+
+    // Are we only updating the last block's gen stamp.
+    boolean isGenStampUpdate = oldBlocks.length == blocks.length;
+
+    // First, update blocks in common
+    BlockInfo oldBlock = null;
+    for (int i = 0; i < oldBlocks.length && i < blocks.length; i++) {
+      oldBlock = oldBlocks[i];
+      Block newBlock = blocks[i];
+
+      boolean isLastBlock = i == oldBlocks.length - 1;
+      if (oldBlock.getBlockId() != newBlock.getBlockId() ||
+          (oldBlock.getGenerationStamp() != newBlock.getGenerationStamp() && 
+              !(isGenStampUpdate && isLastBlock))) {
+        throw new IOException("Mismatched block IDs or generation stamps, " + 
+            "attempting to replace block " + oldBlock + " with " + newBlock +
+            " as block # " + i + "/" + blocks.length + " of " + path);
+      }
+
+      oldBlock.setNumBytes(newBlock.getNumBytes());
+      oldBlock.setGenerationStamp(newBlock.getGenerationStamp());
+    }
+
+    if (blocks.length < oldBlocks.length) {
+      // We're removing a block from the file, e.g. abandonBlock(...)
+      if (!file.isUnderConstruction()) {
+        throw new IOException("Trying to remove a block from file " +
+            path + " which is not under construction.");
+      }
+      if (blocks.length != oldBlocks.length - 1) {
+        throw new IOException("Trying to remove more than one block from file "
+            + path);
+      }
+      unprotectedRemoveBlock(path,
+          (INodeFileUnderConstruction)file, oldBlocks[oldBlocks.length - 1]);
+    } else if (blocks.length > oldBlocks.length) {
+      // We're adding blocks
+      // First complete last old Block
+      getBlockManager().completeBlock(file, oldBlocks.length-1, true);
+      // Add the new blocks
+      for (int i = oldBlocks.length; i < blocks.length; i++) {
+        // addBlock();
+        BlockInfo newBI = blocks[i];
+        getBlockManager().addINode(newBI, file);
+        file.addBlock(newBI);
+      }
+    }
+  }
+
   INodeDirectory addToParent(byte[] src, INodeDirectory parentINode,
       INode newNode, boolean propagateModTime) throws UnresolvedLinkException {
     // NOTE: This does not update space counts for parents
@@ -417,28 +491,33 @@ public class FSDirectory implements Clos
 
     writeLock();
     try {
-      // modify file-> block and blocksMap
-      fileNode.removeLastBlock(block);
-      getBlockManager().removeBlockFromMap(block);
-
+      unprotectedRemoveBlock(path, fileNode, block);
       // write modified block locations to log
       fsImage.getEditLog().logOpenFile(path, fileNode);
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: "
-            +path+" with "+block
-            +" block is removed from the file system");
-      }
-
-      // update space consumed
-      INode[] pathINodes = getExistingPathINodes(path);
-      updateCount(pathINodes, pathINodes.length-1, 0,
-          -fileNode.getPreferredBlockSize()*fileNode.getReplication(), true);
     } finally {
       writeUnlock();
     }
     return true;
   }
 
+  void unprotectedRemoveBlock(String path, INodeFileUnderConstruction fileNode, 
+      Block block) throws IOException {
+    // modify file-> block and blocksMap
+    fileNode.removeLastBlock(block);
+    getBlockManager().removeBlockFromMap(block);
+
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: "
+          +path+" with "+block
+          +" block is removed from the file system");
+    }
+
+    // update space consumed
+    INode[] pathINodes = getExistingPathINodes(path);
+    updateCount(pathINodes, pathINodes.length-1, 0,
+        -fileNode.getPreferredBlockSize()*fileNode.getReplication(), true);
+  }
+
   /**
    * @see #unprotectedRenameTo(String, String, long)
    * @deprecated Use {@link #renameTo(String, String, Rename...)} instead.

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Sat Feb  4 03:40:45 2012
@@ -187,31 +187,53 @@ public class FSEditLogLoader {
                   " clientMachine " + addCloseOp.clientMachine);
             }
 
-            fsDir.unprotectedDelete(addCloseOp.path, addCloseOp.mtime);
-
-            // add to the file tree
-            INodeFile node = (INodeFile)fsDir.unprotectedAddFile(
-                addCloseOp.path, permissions,
-                blocks, replication,
-                addCloseOp.mtime, addCloseOp.atime, blockSize);
-            if (addCloseOp.opCode == FSEditLogOpCodes.OP_ADD) {
-              //
-              // Replace current node with a INodeUnderConstruction.
-              // Recreate in-memory lease record.
-              //
-              INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
-                                        node.getLocalNameBytes(),
-                                        node.getReplication(),
-                                        node.getModificationTime(),
-                                        node.getPreferredBlockSize(),
-                                        node.getBlocks(),
-                                        node.getPermissionStatus(),
-                                        addCloseOp.clientName,
-                                        addCloseOp.clientMachine,
-                                        null);
-              fsDir.replaceNode(addCloseOp.path, node, cons);
-              fsNamesys.leaseManager.addLease(cons.getClientName(),
-                                              addCloseOp.path);
+            // There are four cases here:
+            // 1. OP_ADD to create a new file
+            // 2. OP_ADD to update file blocks
+            // 3. OP_ADD to open file for append
+            // 4. OP_CLOSE to close the file
+
+            // See if the file already exists
+            INodeFile oldFile = fsDir.getFileINode(addCloseOp.path);
+            if (oldFile == null) { // OP_ADD for a new file
+              assert addCloseOp.opCode == FSEditLogOpCodes.OP_ADD : 
+                "Expected opcode OP_ADD, but got " + addCloseOp.opCode;
+              fsDir.unprotectedAddFile(
+                  addCloseOp.path, permissions, blocks, replication,
+                  addCloseOp.mtime, addCloseOp.atime, blockSize,
+                  addCloseOp.clientName, addCloseOp.clientMachine);
+            } else {
+              fsDir.updateFile(oldFile,
+                  addCloseOp.path, permissions, blocks, replication,
+                  addCloseOp.mtime, addCloseOp.atime, blockSize);
+              if(addCloseOp.opCode == FSEditLogOpCodes.OP_CLOSE) {  // OP_CLOSE
+                assert oldFile.isUnderConstruction() : 
+                  "File is not under construction: " + addCloseOp.path;
+                fsNamesys.getBlockManager().completeBlock(
+                    oldFile, blocks.length-1, true);
+                INodeFile newFile =
+                  ((INodeFileUnderConstruction)oldFile).convertToInodeFile();
+                fsDir.replaceNode(addCloseOp.path, oldFile, newFile);
+              } else if(! oldFile.isUnderConstruction()) {  // OP_ADD for append
+                INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
+                    oldFile.getLocalNameBytes(),
+                    oldFile.getReplication(), 
+                    oldFile.getModificationTime(),
+                    oldFile.getPreferredBlockSize(),
+                    oldFile.getBlocks(),
+                    oldFile.getPermissionStatus(),
+                    addCloseOp.clientName,
+                    addCloseOp.clientMachine,
+                    null);
+                fsDir.replaceNode(addCloseOp.path, oldFile, cons);
+              }
+            }
+            // Update file lease
+            if(addCloseOp.opCode == FSEditLogOpCodes.OP_ADD) {
+              fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
+            } else {  // Ops.OP_CLOSE
+              fsNamesys.leaseManager.removeLease(
+                  ((INodeFileUnderConstruction)oldFile).getClientName(), addCloseOp.path);
             }
             break;
           }

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat Feb  4 03:40:45 2012
@@ -4249,7 +4249,7 @@ public class FSNamesystem implements Nam
    */
   @Override // NameNodeMXBean
   public String getVersion() {
-    return VersionInfo.getVersion();
+    return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision();
   }
 
   @Override // NameNodeMXBean

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Sat Feb  4 03:40:45 2012
@@ -41,8 +41,20 @@ public class INodeFileUnderConstruction 
                              String clientName,
                              String clientMachine,
                              DatanodeDescriptor clientNode) {
-    super(permissions.applyUMask(UMASK), 0, replication, modTime, modTime,
-        preferredBlockSize);
+    this(permissions, 0, replication, preferredBlockSize, modTime,
+        clientName, clientMachine, clientNode);
+  }
+
+  INodeFileUnderConstruction(PermissionStatus permissions,
+                             int nrBlocks,
+                             short replication,
+                             long preferredBlockSize,
+                             long modTime,
+                             String clientName,
+                             String clientMachine,
+                             DatanodeDescriptor clientNode) {
+    super(permissions.applyUMask(UMASK), nrBlocks, replication,
+        modTime, modTime, preferredBlockSize);
     this.clientName = clientName;
     this.clientMachine = clientMachine;
     this.clientNode = clientNode;

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Sat Feb  4 03:40:45 2012
@@ -25,6 +25,7 @@ import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.PrintStream;
 import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URLConnection;
 import java.security.PrivilegedExceptionAction;
@@ -49,6 +50,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -201,7 +203,8 @@ public class DelegationTokenFetcher {
   static public Credentials getDTfromRemote(String nnAddr, 
       String renewer) throws IOException {
     DataInputStream dis = null;
-
+    InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnAddr);
+    
     try {
       StringBuffer url = new StringBuffer();
       if (renewer != null) {
@@ -221,9 +224,7 @@ public class DelegationTokenFetcher {
       ts.readFields(dis);
       for(Token<?> token: ts.getAllTokens()) {
         token.setKind(HftpFileSystem.TOKEN_KIND);
-        token.setService(new Text(SecurityUtil.buildDTServiceName
-                                   (remoteURL.toURI(), 
-                                    DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT)));
+        SecurityUtil.setTokenService(token, serviceAddr);
       }
       return ts;
     } catch (Exception e) {

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Feb  4 03:40:45 2012
@@ -1,5 +1,5 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1227776-1239397
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1227776-1240449
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779,1239752
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs:987665-1095512

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Feb  4 03:40:45 2012
@@ -1,5 +1,5 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1227776-1239397
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1227776-1240449
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779,1239752
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:987665-1095512

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Feb  4 03:40:45 2012
@@ -1,5 +1,5 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1227776-1239397
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1227776-1240449
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779,1239752
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:987665-1095512

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Feb  4 03:40:45 2012
@@ -1,5 +1,5 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1227776-1239397
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1227776-1240449
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779,1239752
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:987665-1095512

Propchange: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Feb  4 03:40:45 2012
@@ -1,5 +1,5 @@
-/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1227776-1239397
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779
+/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1227776-1240449
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170379,1170459,1171297,1172916,1173402,1176550,1177487,1177531,1177859,1177864,1182189,1182205,1182214,1189932,1189982,1195575,1196113,1196129,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204370,1204376,1204388,1205260,1205697,1206786,1206830,1207694,1208153,1208313,1212021,1212062,1212073,1212084,1213537,1213586,1213592-1213593,1213954,1214046,1220510,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227964,1229347,1230398,1231569,1231572,1231627,1231640,1233605,1234555,1235135,1235137,1235956,1236456,1238779,1239752
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Sat Feb  4 03:40:45 2012
@@ -882,6 +882,8 @@ public class MiniDFSCluster {
       if(dn == null)
         throw new IOException("Cannot start DataNode in "
             + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
+      //NOTE: the following is true if and only if:
+      //      hadoop.security.token.service.use_ip=true
       //since the HDFS does things based on IP:port, we need to add the mapping
       //for IP:port to rackId
       String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java Sat Feb  4 03:40:45 2012
@@ -72,12 +72,20 @@ public class TestAbandonBlock {
 
     // Now abandon the last block
     DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
-    LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
+    LocatedBlocks blocks =
+      dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
+    int orginalNumBlocks = blocks.locatedBlockCount();
     LocatedBlock b = blocks.getLastLocatedBlock();
     dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName);
 
     // And close the file
     fout.close();
+
+    // Close cluster and check the block has been abandoned after restart
+    cluster.restartNameNode();
+    blocks = dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
+    assert orginalNumBlocks == blocks.locatedBlockCount() + 1 :
+      "Blocks " + b + " has not been abandoned.";
   }
 
   @Test

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java Sat Feb  4 03:40:45 2012
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
@@ -35,16 +36,15 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.ToolRunner;
-
-import org.junit.Before;
 import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
-import junit.framework.Assert;
 
 /**
  * Class is used to test client reporting corrupted block replica to name node.
@@ -305,9 +305,9 @@ public class TestClientReportBadBlock {
    */
   private static void corruptBlock(final ExtendedBlock block, final DataNode dn)
       throws FileNotFoundException, IOException {
-    final FSDataset data = (FSDataset) dn.getFSDataset();
-    final RandomAccessFile raFile = new RandomAccessFile(
-        data.getBlockFile(block), "rw");
+    final File f = DataNodeTestUtils.getBlockFile(
+        dn, block.getBlockPoolId(), block.getLocalBlock());
+    final RandomAccessFile raFile = new RandomAccessFile(f, "rw");
     final byte[] bytes = new byte[(int) BLOCK_SIZE];
     for (int i = 0; i < BLOCK_SIZE; i++) {
       bytes[i] = (byte) (i);

Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1240450&r1=1240449&r2=1240450&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Sat Feb  4 03:40:45 2012
@@ -46,7 +46,6 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -1164,9 +1163,9 @@ public class TestDFSShell extends TestCa
     String poolId = cluster.getNamesystem().getBlockPoolId();
     Iterable<Block>[] blocks = cluster.getAllBlockReports(poolId);
     for(int i = 0; i < blocks.length; i++) {
-      FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();
+      DataNode dn = datanodes.get(i);
       for(Block b : blocks[i]) {
-        files.add(DataNodeTestUtils.getBlockFile(ds, poolId, b.getBlockId()));
+        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
       }        
     }
     return files;



Mime
View raw message