hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1098781 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/
Date Mon, 02 May 2011 20:38:52 GMT
Author: szetszwo
Date: Mon May  2 20:38:51 2011
New Revision: 1098781

URL: http://svn.apache.org/viewvc?rev=1098781&view=rev
Log:
HDFS-1870. Move and rename DFSClient.LeaseChecker to a seperated class LeaseRenewer.

Added:
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1098781&r1=1098780&r2=1098781&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Mon May  2 20:38:51 2011
@@ -380,6 +380,9 @@ Trunk (unreleased changes)
     HDFS-1741. Provide a minimal pom file to allow integration of HDFS into Sonar
     analysis (cos)
 
+    HDFS-1870. Move and rename DFSClient.LeaseChecker to a seperated class
+    LeaseRenewer.  (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1098781&r1=1098780&r2=1098781&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Mon May  2 20:38:51 2011
@@ -31,21 +31,17 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.net.InetSocketAddress;
 import java.net.Socket;
-import java.net.SocketTimeoutException;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
-import java.util.SortedMap;
-import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 
 import javax.net.SocketFactory;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -64,7 +60,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@@ -73,6 +68,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -101,7 +97,6 @@ import org.apache.hadoop.security.Access
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.StringUtils;
 
@@ -122,7 +117,7 @@ public class DFSClient implements FSCons
   public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
   static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
   final ClientProtocol namenode;
-  private final ClientProtocol rpcNamenode;
+  final ClientProtocol rpcNamenode;
   final UserGroupInformation ugi;
   volatile boolean clientRunning = true;
   private volatile FsServerDefaults serverDefaults;
@@ -138,7 +133,7 @@ public class DFSClient implements FSCons
   final DataTransferProtocol.ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
   final FileSystem.Statistics stats;
   final int hdfsTimeout;    // timeout value for a DFS operation.
-  final LeaseChecker leasechecker;
+  final LeaseRenewer leaserenewer;
 
   /**
    * The locking hierarchy is to first acquire lock on DFSClient object, followed by 
@@ -254,7 +249,7 @@ public class DFSClient implements FSCons
 
     // The hdfsTimeout is currently the same as the ipc timeout 
     this.hdfsTimeout = Client.getTimeout(conf);
-    this.leasechecker = new LeaseChecker(hdfsTimeout);
+    this.leaserenewer = new LeaseRenewer(this, hdfsTimeout);
 
     this.ugi = UserGroupInformation.getCurrentUser();
     
@@ -320,10 +315,10 @@ public class DFSClient implements FSCons
    */
   public synchronized void close() throws IOException {
     if(clientRunning) {
-      leasechecker.close();
+      leaserenewer.close();
       clientRunning = false;
       try {
-        leasechecker.interruptAndJoin();
+        leaserenewer.interruptAndJoin();
       } catch (InterruptedException ie) {
       }
   
@@ -638,7 +633,7 @@ public class DFSClient implements FSCons
         flag, createParent, replication, blockSize, progress, buffersize,
         conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 
                     DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT));
-    leasechecker.put(src, result);
+    leaserenewer.put(src, result);
     return result;
   }
   
@@ -685,7 +680,7 @@ public class DFSClient implements FSCons
           flag, createParent, replication, blockSize, progress, buffersize,
           bytesPerChecksum);
     }
-    leasechecker.put(src, result);
+    leaserenewer.put(src, result);
     return result;
   }
   
@@ -764,7 +759,7 @@ public class DFSClient implements FSCons
           + src + " on client " + clientName);
     }
     OutputStream result = callAppend(stat, src, buffersize, progress);
-    leasechecker.put(src, result);
+    leaserenewer.put(src, result);
     return result;
   }
 
@@ -1364,211 +1359,6 @@ public class DFSClient implements FSCons
     }
   }
 
-  /** Lease management*/
-  class LeaseChecker {
-    static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L;
-    static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L;
-    /** A map from src -> DFSOutputStream of files that are currently being
-     * written by this client.
-     */
-    private final SortedMap<String, OutputStream> pendingCreates
-        = new TreeMap<String, OutputStream>();
-    /** The time in milliseconds that the map became empty. */
-    private long emptyTime = Long.MAX_VALUE;
-    /** A fixed lease renewal time period in milliseconds */
-    private final long renewal;
-
-    /** A daemon for renewing lease */
-    private Daemon daemon = null;
-    /** Only the daemon with currentId should run. */
-    private int currentId = 0;
-
-    /** 
-     * A period in milliseconds that the lease renewer thread should run
-     * after the map became empty.
-     * If the map is empty for a time period longer than the grace period,
-     * the renewer should terminate.  
-     */
-    private long gracePeriod;
-    /**
-     * The time period in milliseconds
-     * that the renewer sleeps for each iteration. 
-     */
-    private volatile long sleepPeriod;
-
-    private LeaseChecker(final long timeout) {
-      this.renewal = (timeout > 0 && timeout < LEASE_SOFTLIMIT_PERIOD)? 
-          timeout/2: LEASE_SOFTLIMIT_PERIOD/2;
-      setGraceSleepPeriod(LEASE_RENEWER_GRACE_DEFAULT);
-    }
-
-    /** Set the grace period and adjust the sleep period accordingly. */
-    void setGraceSleepPeriod(final long gracePeriod) {
-      if (gracePeriod < 100L) {
-        throw new HadoopIllegalArgumentException(gracePeriod
-            + " = gracePeriod < 100ms is too small.");
-      }
-      synchronized(this) {
-        this.gracePeriod = gracePeriod;
-      }
-      final long half = gracePeriod/2;
-      this.sleepPeriod = half < LEASE_RENEWER_SLEEP_DEFAULT?
-          half: LEASE_RENEWER_SLEEP_DEFAULT;
-    }
-
-    /** Is the daemon running? */
-    synchronized boolean isRunning() {
-      return daemon != null && daemon.isAlive();
-    }
-
-    /** Is the empty period longer than the grace period? */  
-    private synchronized boolean isRenewerExpired() {
-      return emptyTime != Long.MAX_VALUE
-          && System.currentTimeMillis() - emptyTime > gracePeriod;
-    }
-
-    synchronized void put(String src, OutputStream out) {
-      if (clientRunning) {
-        if (daemon == null || isRenewerExpired()) {
-          //start a new deamon with a new id.
-          final int id = ++currentId;
-          daemon = new Daemon(new Runnable() {
-            @Override
-            public void run() {
-              try {
-                LeaseChecker.this.run(id);
-              } catch(InterruptedException e) {
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug(LeaseChecker.this.getClass().getSimpleName()
-                      + " is interrupted.", e);
-                }
-              }
-            }
-          });
-          daemon.start();
-        }
-        pendingCreates.put(src, out);
-        emptyTime = Long.MAX_VALUE;
-      }
-    }
-    
-    synchronized void remove(String src) {
-      pendingCreates.remove(src);
-      if (pendingCreates.isEmpty() && emptyTime == Long.MAX_VALUE) {
-        //discover the first time that the map is empty.
-        emptyTime = System.currentTimeMillis();
-      }
-    }
-    
-    void interruptAndJoin() throws InterruptedException {
-      Daemon daemonCopy = null;
-      synchronized (this) {
-        if (isRunning()) {
-          daemon.interrupt();
-          daemonCopy = daemon;
-        }
-      }
-     
-      if (daemonCopy != null) {
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("Wait for lease checker to terminate");
-        }
-        daemonCopy.join();
-      }
-    }
-
-    void close() {
-      while (true) {
-        String src;
-        OutputStream out;
-        synchronized (this) {
-          if (pendingCreates.isEmpty()) {
-            return;
-          }
-          src = pendingCreates.firstKey();
-          out = pendingCreates.remove(src);
-        }
-        if (out != null) {
-          try {
-            out.close();
-          } catch (IOException ie) {
-            LOG.error("Exception closing file " + src+ " : " + ie, ie);
-          }
-        }
-      }
-    }
-
-    /**
-     * Abort all open files. Release resources held. Ignore all errors.
-     */
-    synchronized void abort() {
-      clientRunning = false;
-      while (!pendingCreates.isEmpty()) {
-        String src = pendingCreates.firstKey();
-        DFSOutputStream out = (DFSOutputStream)pendingCreates.remove(src);
-        if (out != null) {
-          try {
-            out.abort();
-          } catch (IOException ie) {
-            LOG.error("Exception aborting file " + src+ ": ", ie);
-          }
-        }
-      }
-      RPC.stopProxy(rpcNamenode); // close connections to the namenode
-    }
-
-    private void renew() throws IOException {
-      synchronized(this) {
-        if (pendingCreates.isEmpty()) {
-          return;
-        }
-      }
-      namenode.renewLease(clientName);
-    }
-
-    /**
-     * Periodically check in with the namenode and renew all the leases
-     * when the lease period is half over.
-     */
-    private void run(final int id) throws InterruptedException {
-      for(long lastRenewed = System.currentTimeMillis();
-          clientRunning && !Thread.interrupted();
-          Thread.sleep(sleepPeriod)) {
-        if (System.currentTimeMillis() - lastRenewed >= renewal) {
-          try {
-            renew();
-            lastRenewed = System.currentTimeMillis();
-          } catch (SocketTimeoutException ie) {
-            LOG.warn("Failed to renew lease for " + clientName + " for "
-                + (renewal/1000) + " seconds.  Aborting ...", ie);
-            abort();
-            break;
-          } catch (IOException ie) {
-            LOG.warn("Failed to renew lease for " + clientName + " for "
-                + (renewal/1000) + " seconds.  Will retry shortly ...", ie);
-          }
-        }
-
-        synchronized(this) {
-          if (id != currentId || isRenewerExpired()) {
-            //no longer the current daemon or expired
-            return;
-          }
-        }
-      }
-    }
-
-    /** {@inheritDoc} */
-    public String toString() {
-      String s = getClass().getSimpleName();
-      if (LOG.isTraceEnabled()) {
-        return s + "@" + DFSClient.this + ": "
-               + StringUtils.stringifyException(new Throwable("for testing"));
-      }
-      return s;
-    }
-  }
-
   /**
    * The Hdfs implementation of {@link FSDataInputStream}
    */

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1098781&r1=1098780&r2=1098781&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Mon May  2 20:38:51
2011
@@ -1639,7 +1639,7 @@ class DFSOutputStream extends FSOutputSu
       ExtendedBlock lastBlock = streamer.getBlock();
       closeThreads(false);
       completeFile(lastBlock);
-      dfsClient.leasechecker.remove(src);
+      dfsClient.leaserenewer.remove(src);
     } finally {
       closed = true;
     }

Added: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java?rev=1098781&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java (added)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java Mon May  2 20:38:51
2011
@@ -0,0 +1,241 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.SocketTimeoutException;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.StringUtils;
+
+public class LeaseRenewer {
+  private static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
+
+  static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L;
+  static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L;
+  /** A map from src -> DFSOutputStream of files that are currently being
+   * written by this client.
+   */
+  private final SortedMap<String, OutputStream> pendingCreates
+      = new TreeMap<String, OutputStream>();
+  /** The time in milliseconds that the map became empty. */
+  private long emptyTime = Long.MAX_VALUE;
+  /** A fixed lease renewal time period in milliseconds */
+  private final long renewal;
+
+  /** A daemon for renewing lease */
+  private Daemon daemon = null;
+  /** Only the daemon with currentId should run. */
+  private int currentId = 0;
+
+  /** 
+   * A period in milliseconds that the lease renewer thread should run
+   * after the map became empty.
+   * If the map is empty for a time period longer than the grace period,
+   * the renewer should terminate.  
+   */
+  private long gracePeriod;
+  /**
+   * The time period in milliseconds
+   * that the renewer sleeps for each iteration. 
+   */
+  private volatile long sleepPeriod;
+
+  private final DFSClient dfsclient;
+
+  LeaseRenewer(final DFSClient dfsclient, final long timeout) {
+    this.dfsclient = dfsclient;
+    this.renewal = (timeout > 0 && timeout < FSConstants.LEASE_SOFTLIMIT_PERIOD)?

+        timeout/2: FSConstants.LEASE_SOFTLIMIT_PERIOD/2;
+    setGraceSleepPeriod(LEASE_RENEWER_GRACE_DEFAULT);
+  }
+
+  /** Set the grace period and adjust the sleep period accordingly. */
+  void setGraceSleepPeriod(final long gracePeriod) {
+    if (gracePeriod < 100L) {
+      throw new HadoopIllegalArgumentException(gracePeriod
+          + " = gracePeriod < 100ms is too small.");
+    }
+    synchronized(this) {
+      this.gracePeriod = gracePeriod;
+    }
+    final long half = gracePeriod/2;
+    this.sleepPeriod = half < LEASE_RENEWER_SLEEP_DEFAULT?
+        half: LEASE_RENEWER_SLEEP_DEFAULT;
+  }
+
+  /** Is the daemon running? */
+  synchronized boolean isRunning() {
+    return daemon != null && daemon.isAlive();
+  }
+
+  /** Is the empty period longer than the grace period? */  
+  private synchronized boolean isRenewerExpired() {
+    return emptyTime != Long.MAX_VALUE
+        && System.currentTimeMillis() - emptyTime > gracePeriod;
+  }
+
+  synchronized void put(String src, OutputStream out) {
+    if (dfsclient.clientRunning) {
+      if (daemon == null || isRenewerExpired()) {
+        //start a new deamon with a new id.
+        final int id = ++currentId;
+        daemon = new Daemon(new Runnable() {
+          @Override
+          public void run() {
+            try {
+              LeaseRenewer.this.run(id);
+            } catch(InterruptedException e) {
+              if (LOG.isDebugEnabled()) {
+                LOG.debug(LeaseRenewer.this.getClass().getSimpleName()
+                    + " is interrupted.", e);
+              }
+            }
+          }
+        });
+        daemon.start();
+      }
+      pendingCreates.put(src, out);
+      emptyTime = Long.MAX_VALUE;
+    }
+  }
+  
+  synchronized void remove(String src) {
+    pendingCreates.remove(src);
+    if (pendingCreates.isEmpty() && emptyTime == Long.MAX_VALUE) {
+      //discover the first time that the map is empty.
+      emptyTime = System.currentTimeMillis();
+    }
+  }
+  
+  void interruptAndJoin() throws InterruptedException {
+    Daemon daemonCopy = null;
+    synchronized (this) {
+      if (isRunning()) {
+        daemon.interrupt();
+        daemonCopy = daemon;
+      }
+    }
+   
+    if (daemonCopy != null) {
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Wait for lease checker to terminate");
+      }
+      daemonCopy.join();
+    }
+  }
+
+  void close() {
+    while (true) {
+      String src;
+      OutputStream out;
+      synchronized (this) {
+        if (pendingCreates.isEmpty()) {
+          return;
+        }
+        src = pendingCreates.firstKey();
+        out = pendingCreates.remove(src);
+      }
+      if (out != null) {
+        try {
+          out.close();
+        } catch (IOException ie) {
+          LOG.error("Exception closing file " + src+ " : " + ie, ie);
+        }
+      }
+    }
+  }
+
+  /**
+   * Abort all open files. Release resources held. Ignore all errors.
+   */
+  synchronized void abort() {
+    dfsclient.clientRunning = false;
+    while (!pendingCreates.isEmpty()) {
+      String src = pendingCreates.firstKey();
+      DFSOutputStream out = (DFSOutputStream)pendingCreates.remove(src);
+      if (out != null) {
+        try {
+          out.abort();
+        } catch (IOException ie) {
+          LOG.error("Exception aborting file " + src+ ": ", ie);
+        }
+      }
+    }
+    RPC.stopProxy(dfsclient.rpcNamenode); // close connections to the namenode
+  }
+
+  private void renew() throws IOException {
+    synchronized(this) {
+      if (pendingCreates.isEmpty()) {
+        return;
+      }
+    }
+    dfsclient.namenode.renewLease(dfsclient.clientName);
+  }
+
+  /**
+   * Periodically check in with the namenode and renew all the leases
+   * when the lease period is half over.
+   */
+  private void run(final int id) throws InterruptedException {
+    for(long lastRenewed = System.currentTimeMillis();
+        dfsclient.clientRunning && !Thread.interrupted();
+        Thread.sleep(sleepPeriod)) {
+      if (System.currentTimeMillis() - lastRenewed >= renewal) {
+        try {
+          renew();
+          lastRenewed = System.currentTimeMillis();
+        } catch (SocketTimeoutException ie) {
+          LOG.warn("Failed to renew lease for " + dfsclient.clientName + " for "
+              + (renewal/1000) + " seconds.  Aborting ...", ie);
+          abort();
+          break;
+        } catch (IOException ie) {
+          LOG.warn("Failed to renew lease for " + dfsclient.clientName + " for "
+              + (renewal/1000) + " seconds.  Will retry shortly ...", ie);
+        }
+      }
+
+      synchronized(this) {
+        if (id != currentId || isRenewerExpired()) {
+          //no longer the current daemon or expired
+          return;
+        }
+      }
+    }
+  }
+
+  /** {@inheritDoc} */
+  public String toString() {
+    String s = getClass().getSimpleName();
+    if (LOG.isTraceEnabled()) {
+      return s + "@" + dfsclient + ": "
+             + StringUtils.stringifyException(new Throwable("for testing"));
+    }
+    return s;
+  }
+}

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1098781&r1=1098780&r2=1098781&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
Mon May  2 20:38:51 2011
@@ -118,78 +118,78 @@ public class TestDistributedFileSystem {
 
       {
         DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
-        dfs.dfs.leasechecker.setGraceSleepPeriod(grace);
-        assertFalse(dfs.dfs.leasechecker.isRunning());
+        dfs.dfs.leaserenewer.setGraceSleepPeriod(grace);
+        assertFalse(dfs.dfs.leaserenewer.isRunning());
   
         {
           //create a file
           final FSDataOutputStream out = dfs.create(filepaths[0]);
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           //write something
           out.writeLong(millis);
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           //close
           out.close();
           Thread.sleep(grace/4*3);
           //within grace period
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           for(int i = 0; i < 3; i++) {
-            if (dfs.dfs.leasechecker.isRunning()) {
+            if (dfs.dfs.leaserenewer.isRunning()) {
               Thread.sleep(grace/2);
             }
           }
           //passed grace period
-          assertFalse(dfs.dfs.leasechecker.isRunning());
+          assertFalse(dfs.dfs.leaserenewer.isRunning());
         }
 
         {
           //create file1
           final FSDataOutputStream out1 = dfs.create(filepaths[1]);
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           //create file2
           final FSDataOutputStream out2 = dfs.create(filepaths[2]);
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
 
           //write something to file1
           out1.writeLong(millis);
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           //close file1
           out1.close();
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
 
           //write something to file2
           out2.writeLong(millis);
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           //close file2
           out2.close();
           Thread.sleep(grace/4*3);
           //within grace period
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
         }
 
         {
           //create file3
           final FSDataOutputStream out3 = dfs.create(filepaths[3]);
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           Thread.sleep(grace/4*3);
           //passed previous grace period, should still running
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           //write something to file3
           out3.writeLong(millis);
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           //close file3
           out3.close();
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           Thread.sleep(grace/4*3);
           //within grace period
-          assertTrue(dfs.dfs.leasechecker.isRunning());
+          assertTrue(dfs.dfs.leaserenewer.isRunning());
           for(int i = 0; i < 3; i++) {
-            if (dfs.dfs.leasechecker.isRunning()) {
+            if (dfs.dfs.leaserenewer.isRunning()) {
               Thread.sleep(grace/2);
             }
           }
           //passed grace period
-          assertFalse(dfs.dfs.leasechecker.isRunning());
+          assertFalse(dfs.dfs.leaserenewer.isRunning());
         }
 
         dfs.close();
@@ -218,15 +218,15 @@ public class TestDistributedFileSystem {
 
       {
         DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
-        assertFalse(dfs.dfs.leasechecker.isRunning());
+        assertFalse(dfs.dfs.leaserenewer.isRunning());
 
         //open and check the file
         FSDataInputStream in = dfs.open(filepaths[0]);
-        assertFalse(dfs.dfs.leasechecker.isRunning());
+        assertFalse(dfs.dfs.leaserenewer.isRunning());
         assertEquals(millis, in.readLong());
-        assertFalse(dfs.dfs.leasechecker.isRunning());
+        assertFalse(dfs.dfs.leaserenewer.isRunning());
         in.close();
-        assertFalse(dfs.dfs.leasechecker.isRunning());
+        assertFalse(dfs.dfs.leaserenewer.isRunning());
         dfs.close();
       }
       

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java?rev=1098781&r1=1098780&r2=1098781&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java Mon May  2
20:38:51 2011
@@ -184,7 +184,7 @@ public class TestFileAppend4 {
       // has not been completed in the NN.
       // Lose the leases
       LOG.info("Killing lease checker");
-      client.leasechecker.interruptAndJoin();
+      client.leaserenewer.interruptAndJoin();
  
       FileSystem fs1 = cluster.getFileSystem();
       FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(
@@ -255,7 +255,7 @@ public class TestFileAppend4 {
       // has not been completed in the NN.
       // Lose the leases
       LOG.info("Killing lease checker");
-      client.leasechecker.interruptAndJoin();
+      client.leaserenewer.interruptAndJoin();
  
       FileSystem fs1 = cluster.getFileSystem();
       FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1098781&r1=1098780&r2=1098781&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Mon May
 2 20:38:51 2011
@@ -119,7 +119,7 @@ public class TestLeaseRecovery2 {
     
     // kill the lease renewal thread
     AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
-    dfs.dfs.leasechecker.interruptAndJoin();
+    dfs.dfs.leaserenewer.interruptAndJoin();
 
     // set the hard limit to be 1 second 
     cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
@@ -182,7 +182,7 @@ public class TestLeaseRecovery2 {
     AppendTestUtil.LOG.info("hflush");
     stm.hflush();
     AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
-    dfs.dfs.leasechecker.interruptAndJoin();
+    dfs.dfs.leaserenewer.interruptAndJoin();
 
     // set the soft limit to be 1 second so that the
     // namenode triggers lease recovery on next attempt to write-for-open.

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java?rev=1098781&r1=1098780&r2=1098781&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java Mon May
 2 20:38:51 2011
@@ -86,7 +86,7 @@ public class TestReadWhileWriting {
       //   of data can be read successfully.
       checkFile(p, half, conf);
       AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
-      ((DistributedFileSystem)fs).dfs.leasechecker.interruptAndJoin();
+      ((DistributedFileSystem)fs).dfs.leaserenewer.interruptAndJoin();
 
       //c. On M1, append another half block of data.  Close file on M1.
       {



Mime
View raw message