hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1297334 - in /hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/
Date Tue, 06 Mar 2012 03:14:22 GMT
Author: szetszwo
Date: Tue Mar  6 03:14:21 2012
New Revision: 1297334

URL: http://svn.apache.org/viewvc?rev=1297334&view=rev
Log:
svn merge -c 1297328 from trunk for HDFS-3032.

Modified:
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props
changed)
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java

Propchange: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Mar  6 03:14:21 2012
@@ -1,5 +1,5 @@
 /hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs:1227776-1294021
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1204709,1204825,1205146,1205260,1205626,1205697,1206178,1206786,1206830,1207585,
 1207694,1208140,1208153,1208313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519,1242087,1242891,1243065,1243104,1243654,1243690,1244766,1245751,1245762,1292419,1292626,1293419,1293487,1295061,1295227,1295929
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1204709,1204825,1205146,1205260,1205626,1205697,1206178,1206786,1206830,1207585,
 1207694,1208140,1208153,1208313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519,1242087,1242891,1243065,1243104,1243654,1243690,1244766,1245751,1245762,1292419,1292626,1293419,1293487,1295061,1295227,1295929,1297328
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1297334&r1=1297333&r2=1297334&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Mar
 6 03:14:21 2012
@@ -217,6 +217,9 @@ Release 0.23.2 - UNRELEASED
     HDFS-3012. Exception while renewing delegation token. (Bobby Evans via
     jitendra)
 
+    HDFS-3032. Change DFSClient.renewLease() so that it only retries up to the
+    lease soft-limit.  (Kihwal Lee via szetszwo)
+
 Release 0.23.1 - 2012-02-17 
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Mar  6 03:14:21 2012
@@ -1,5 +1,5 @@
 /hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1227776-1294021
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1205146,1205260,1205697,1206786,1206830,1207694,1208140,1208153,12
 08313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519,1242087,1242891,1243065,1243104,1243654,1244766,1245751,1245762,1292419,1293419,1293487,1295061,1295227,1295929
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1161992,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1165826,1166402,1166466,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1171711,1172916,1173402,1173468,1173488,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177161,1177487,1177531,1177757,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1182189,1182205,1182214,1183081,1183098,1183175,1183554,1186508,1186896,1187140,1187505,1188282,1188286,1188300,1188436,1188487,1189028,1189355,1189360,1189546,1189613,1189901,1189932,1189982,1190077,1190127,1190620,1190708,1195575,1195656,1195731,1195754,1196113,1196129,1196171,1197329,1198903,1199396,1200731,1204114,1204117,1204122,1204124,1204129,1204131,1204177,1204366,1204370,1204376,1204388,1204544,1204707,1205146,1205260,1205697,1206786,1206830,1207694,1208140,1208153,12
 08313,1210208,1210657,1210719,1210746,1211206,1211249,1211769,1212021,1212062,1212073,1212084,1212299,1212606,1213040,1213143,1213537,1213586,1213592-1213593,1213808,1213813,1213954,1213985,1214027,1214033,1214046,1214102-1214103,1214128,1215364,1215366,1220315,1220510,1221106,1221348,1225114,1225192,1225456,1225489,1225591,1226211,1226239,1226350,1227091,1227165,1227423,1227887,1227964,1229347,1229877,1229897,1230398,1231569,1231572,1231627,1231640,1233584,1233605,1234555,1235135,1235137,1235956,1236456,1238700,1238779,1238969,1239752,1240020,1240653,1240897,1240928,1241007,1241519,1242087,1242891,1243065,1243104,1243654,1244766,1245751,1245762,1292419,1293419,1293487,1295061,1295227,1295929,1297328
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1297334&r1=1297333&r2=1297334&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
Tue Mar  6 03:14:21 2012
@@ -18,6 +18,31 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+
 import java.io.BufferedOutputStream;
 import java.io.Closeable;
 import java.io.DataInputStream;
@@ -59,7 +84,6 @@ import org.apache.hadoop.fs.ParentNotDir
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -83,6 +107,7 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
@@ -105,7 +130,6 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 
 /********************************************************
  * DFSClient can connect to a Hadoop Filesystem and 
@@ -127,6 +151,7 @@ public class DFSClient implements java.i
   private final InetSocketAddress nnAddress;
   final UserGroupInformation ugi;
   volatile boolean clientRunning = true;
+  volatile long lastLeaseRenewal;
   private volatile FsServerDefaults serverDefaults;
   private volatile long serverDefaultsLastUpdate;
   final String clientName;
@@ -351,6 +376,12 @@ public class DFSClient implements java.i
   void putFileBeingWritten(final String src, final DFSOutputStream out) {
     synchronized(filesBeingWritten) {
       filesBeingWritten.put(src, out);
+      // update the last lease renewal time only when there was no
+      // writes. once there is one write stream open, the lease renewer
+      // thread keeps it updated well with in anyone's expiration time.
+      if (lastLeaseRenewal == 0) {
+        updateLastLeaseRenewal();
+      }
     }
   }
 
@@ -358,6 +389,9 @@ public class DFSClient implements java.i
   void removeFileBeingWritten(final String src) {
     synchronized(filesBeingWritten) {
       filesBeingWritten.remove(src);
+      if (filesBeingWritten.isEmpty()) {
+        lastLeaseRenewal = 0;
+      }
     }
   }
 
@@ -373,6 +407,19 @@ public class DFSClient implements java.i
     return clientRunning;
   }
 
+  long getLastLeaseRenewal() {
+    return lastLeaseRenewal;
+  }
+
+  void updateLastLeaseRenewal() {
+    synchronized(filesBeingWritten) {
+      if (filesBeingWritten.isEmpty()) {
+        return;
+      }
+      lastLeaseRenewal = System.currentTimeMillis();
+    }
+  }
+
   /**
    * Renew leases.
    * @return true if lease was renewed. May return false if this
@@ -380,8 +427,24 @@ public class DFSClient implements java.i
    **/
   boolean renewLease() throws IOException {
     if (clientRunning && !isFilesBeingWrittenEmpty()) {
-      namenode.renewLease(clientName);
-      return true;
+      try {
+        namenode.renewLease(clientName);
+        updateLastLeaseRenewal();
+        return true;
+      } catch (IOException e) {
+        // Abort if the lease has already expired. 
+        final long elapsed = System.currentTimeMillis() - getLastLeaseRenewal();
+        if (elapsed > HdfsConstants.LEASE_SOFTLIMIT_PERIOD) {
+          LOG.warn("Failed to renew lease for " + clientName + " for "
+              + (elapsed/1000) + " seconds (>= soft-limit ="
+              + (HdfsConstants.LEASE_SOFTLIMIT_PERIOD/1000) + " seconds.) "
+              + "Closing all files being written ...", e);
+          closeAllFilesBeingWritten(true);
+        } else {
+          // Let the lease renewer handle it and retry.
+          throw e;
+        }
+      }
     }
     return false;
   }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java?rev=1297334&r1=1297333&r2=1297334&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
Tue Mar  6 03:14:21 2012
@@ -430,7 +430,8 @@ class LeaseRenewer {
     for(long lastRenewed = System.currentTimeMillis();
         clientsRunning() && !Thread.interrupted();
         Thread.sleep(getSleepPeriod())) {
-      if (System.currentTimeMillis() - lastRenewed >= getRenewalTime()) {
+      final long elapsed = System.currentTimeMillis() - lastRenewed;
+      if (elapsed >= getRenewalTime()) {
         try {
           renew();
           if (LOG.isDebugEnabled()) {
@@ -440,7 +441,7 @@ class LeaseRenewer {
           lastRenewed = System.currentTimeMillis();
         } catch (SocketTimeoutException ie) {
           LOG.warn("Failed to renew lease for " + clientsString() + " for "
-              + (getRenewalTime()/1000) + " seconds.  Aborting ...", ie);
+              + (elapsed/1000) + " seconds.  Aborting ...", ie);
           synchronized (this) {
             for(DFSClient c : dfsclients) {
               c.abort();
@@ -449,8 +450,7 @@ class LeaseRenewer {
           break;
         } catch (IOException ie) {
           LOG.warn("Failed to renew lease for " + clientsString() + " for "
-              + (getRenewalTime()/1000) + " seconds.  Will retry shortly ...",
-              ie);
+              + (elapsed/1000) + " seconds.  Will retry shortly ...", ie);
         }
       }
 

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java?rev=1297334&r1=1297333&r2=1297334&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
Tue Mar  6 03:14:21 2012
@@ -18,29 +18,113 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.DataOutputStream;
+import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.doNothing;
 
 public class TestLease {
   static boolean hasLease(MiniDFSCluster cluster, Path src) {
     return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()
         ).getLeaseByPath(src.toString()) != null;
   }
-  
-  final Path dir = new Path("/test/lease/");
+
+  static final String dirString = "/test/lease";
+  final Path dir = new Path(dirString);
+  static final Log LOG = LogFactory.getLog(TestLease.class);
+  Configuration conf = new HdfsConfiguration();
+
+  @Test
+  public void testLeaseAbort() throws Exception {
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    try {
+      cluster.waitActive();
+      NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
+      NamenodeProtocols spyNN = spy(preSpyNN);
+
+      DFSClient dfs = new DFSClient(null, spyNN, conf, null);
+      byte[] buf = new byte[1024];
+
+      FSDataOutputStream c_out = createFsOut(dfs, dirString + "c");
+      c_out.write(buf, 0, 1024);
+      c_out.close();
+
+      DFSInputStream c_in = dfs.open(dirString + "c");
+      FSDataOutputStream d_out = createFsOut(dfs, dirString + "d");
+
+      // stub the renew method.
+      doThrow(new RemoteException(InvalidToken.class.getName(),
+          "Your token is worthless")).when(spyNN).renewLease(anyString());
+
+      // We don't need to wait the lease renewer thread to act.
+      // call renewLease() manually.
+      // make it look like lease has already expired.
+      dfs.lastLeaseRenewal = System.currentTimeMillis() - 300000;
+      dfs.renewLease();
+
+      // this should not work.
+      try {
+        d_out.write(buf, 0, 1024);
+        d_out.close();
+        Assert.fail("Write did not fail even after the fatal lease renewal failure");
+      } catch (IOException e) {
+        LOG.info("Write failed as expected. ", e);
+      }
+
+      // unstub
+      doNothing().when(spyNN).renewLease(anyString());
+
+      // existing input streams should work
+      try {
+        int num = c_in.read(buf, 0, 1);
+        if (num != 1) {
+          Assert.fail("Failed to read 1 byte");
+        }
+        c_in.close();
+      } catch (IOException e) {
+         LOG.error("Read failed with ", e);
+         Assert.fail("Read after lease renewal failure failed");
+      }
+
+      // new file writes should work.
+      try {
+        c_out = createFsOut(dfs, dirString + "c");
+        c_out.write(buf, 0, 1024);
+        c_out.close();
+      } catch (IOException e) {
+        LOG.error("Write failed with ", e);
+        Assert.fail("Write failed");
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
 
   @Test
   public void testLease() throws Exception {
-    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     try {
       FileSystem fs = cluster.getFileSystem();
@@ -94,6 +178,11 @@ public class TestLease {
     Assert.assertTrue(c3.leaserenewer != c5.leaserenewer);
   }
   
+  private FSDataOutputStream createFsOut(DFSClient dfs, String path) 
+      throws IOException {
+    return new FSDataOutputStream(dfs.create(path, true), null);
+  }
+
   static final ClientProtocol mcp = Mockito.mock(ClientProtocol.class);
   static public DFSClient createDFSClientAs(UserGroupInformation ugi, 
       final Configuration conf) throws Exception {



Mime
View raw message