hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r722258 - in /hadoop/core/branches/branch-0.18: ./ src/hdfs/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/dfs/
Date Mon, 01 Dec 2008 22:03:56 GMT
Author: szetszwo
Date: Mon Dec  1 14:03:56 2008
New Revision: 722258

URL: http://svn.apache.org/viewvc?rev=722258&view=rev
Log:
HADOOP-4257 should also be committed to 0.18. (szetszwo)

Added:
    hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/AppendTestUtil.java
Modified:
    hadoop/core/branches/branch-0.18/CHANGES.txt
    hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DFSClient.java
    hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DataNode.java
    hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSDataset.java
    hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/INode.java
    hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/TestLeaseRecovery2.java

Modified: hadoop/core/branches/branch-0.18/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/CHANGES.txt?rev=722258&r1=722257&r2=722258&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.18/CHANGES.txt Mon Dec  1 14:03:56 2008
@@ -49,6 +49,9 @@
     HADOOP-4542. TestDistributedUpgrade used succeed for wrong reasons.
     (Raghu Angadi)
 
+    HADOOP-4257. The DFS client should pick only one datanode as the candidate
+    to initiate lease recovery.  (Tsz Wo (Nicholas), SZE via dhruba)
+
 Release 0.18.2 - 2008-11-03
 
   BUG FIXES

Modified: hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DFSClient.java?rev=722258&r1=722257&r2=722258&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DFSClient.java (original)
+++ hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DFSClient.java Mon Dec
 1 14:03:56 2008
@@ -65,8 +65,8 @@
   final UnixUserGroupInformation ugi;
   volatile boolean clientRunning = true;
   Random r = new Random();
-  String clientName;
-  Daemon leaseChecker;
+  final String clientName;
+  final Daemon leaseChecker;
   private Configuration conf;
   private long defaultBlockSize;
   private short defaultReplication;

Modified: hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DataNode.java?rev=722258&r1=722257&r2=722258&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DataNode.java (original)
+++ hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/DataNode.java Mon Dec
 1 14:03:56 2008
@@ -1870,8 +1870,10 @@
           IOUtils.closeStream(checksumIn);
           checksumIn = null;
           if (corruptChecksumOk) {
-            // Just fill the array with zeros.
-            Arrays.fill(buf, checksumOff, checksumLen, (byte) 0);
+            if (checksumOff < checksumLen) {
+              // Just fill the array with zeros.
+              Arrays.fill(buf, checksumOff, checksumLen, (byte) 0);
+            }
           } else {
             throw e;
           }
@@ -3146,7 +3148,8 @@
 
   /** {@inheritDoc} */
   public void updateBlock(Block oldblock, Block newblock, boolean finalize) throws IOException
{
-    LOG.info("oldblock=" + oldblock + ", newblock=" + newblock);
+    LOG.info("oldblock=" + oldblock + ", newblock=" + newblock
+        + ", datanode=" + dnRegistration.getName());
     data.updateBlock(oldblock, newblock);
     if (finalize) {
       data.finalizeBlock(newblock);

Modified: hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSDataset.java?rev=722258&r1=722257&r2=722258&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSDataset.java (original)
+++ hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSDataset.java Mon Dec
 1 14:03:56 2008
@@ -827,7 +827,7 @@
     
     //rename meta file to a tmp file
     File tmpMetaFile = new File(oldMetaFile.getParent(),
-        oldMetaFile.getName()+"_tmp");
+        oldMetaFile.getName()+"_tmp" + newblock.getGenerationStamp());
     if (!oldMetaFile.renameTo(tmpMetaFile)){
       throw new IOException("Cannot rename block meta file to " + tmpMetaFile);
     }

Modified: hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/INode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/INode.java?rev=722258&r1=722257&r2=722258&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/INode.java (original)
+++ hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/INode.java Mon Dec  1
14:03:56 2008
@@ -1061,7 +1061,8 @@
         DatanodeDescriptor primary = targets[primaryNodeIndex = j]; 
         primary.addBlockToBeRecovered(blocks[blocks.length - 1], targets);
         NameNode.stateChangeLog.info("BLOCK* " + blocks[blocks.length - 1]
-          + " recovery started.");
+          + " recovery started, primary=" + primary);
+        return;
       }
     }
   }

Added: hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/AppendTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/AppendTestUtil.java?rev=722258&view=auto
==============================================================================
--- hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/AppendTestUtil.java (added)
+++ hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/AppendTestUtil.java Mon
Dec  1 14:03:56 2008
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/** Utilities for append-related tests */ 
+class AppendTestUtil {
+  /** For specifying the random number generator seed,
+   *  change the following value:
+   */
+  static final Long RANDOM_NUMBER_GENERATOR_SEED = null;
+
+  static final Log LOG = LogFactory.getLog(AppendTestUtil.class);
+
+  private static final Random SEED = new Random();
+  static {
+    final long seed = RANDOM_NUMBER_GENERATOR_SEED == null?
+        SEED.nextLong(): RANDOM_NUMBER_GENERATOR_SEED;
+    LOG.info("seed=" + seed);
+    SEED.setSeed(seed);
+  }
+
+  private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>()
{
+    protected Random initialValue() {
+      final Random r =  new Random();
+      synchronized(SEED) { 
+        final long seed = SEED.nextLong();
+        r.setSeed(seed);
+        LOG.info(Thread.currentThread().getName() + ": seed=" + seed);
+      }
+      return r;
+    }
+  };
+  
+  static int nextInt() {return RANDOM.get().nextInt();}
+  static int nextInt(int n) {return RANDOM.get().nextInt(n);}
+  static int nextLong() {return RANDOM.get().nextInt();}
+
+  static byte[] randomBytes(long seed, int size) {
+    LOG.info("seed=" + seed + ", size=" + size);
+    final byte[] b = new byte[size];
+    final Random rand = new Random(seed);
+    rand.nextBytes(b);
+    return b;
+  }
+
+  static void sleep(long ms) {
+    try {
+      Thread.sleep(ms);
+    } catch (InterruptedException e) {
+      LOG.info("ms=" + ms, e);
+    }
+  }
+
+  static FileSystem createHdfsWithDifferentUsername(Configuration conf
+      ) throws IOException {
+    Configuration conf2 = new Configuration(conf);
+    String username = UserGroupInformation.getCurrentUGI().getUserName()+"_XXX";
+    UnixUserGroupInformation.saveToConf(conf2,
+        UnixUserGroupInformation.UGI_PROPERTY_NAME,
+        new UnixUserGroupInformation(username, new String[]{"supergroup"}));
+    return FileSystem.get(conf2);
+  }
+
+  static void write(OutputStream out, int offset, int length) throws IOException {
+    final byte[] bytes = new byte[length];
+    for(int i = 0; i < length; i++) {
+      bytes[i] = (byte)(offset + i);
+    }
+    out.write(bytes);
+  }
+  
+  static void check(FileSystem fs, Path p, long length) throws IOException {
+    int i = -1;
+    try {
+      final FileStatus status = fs.getFileStatus(p);
+      TestCase.assertEquals(length, status.getLen());
+      InputStream in = fs.open(p);
+      for(i++; i < length; i++) {
+        TestCase.assertEquals((byte)i, (byte)in.read());  
+      }
+      i = -(int)length;
+      TestCase.assertEquals(-1, in.read()); //EOF  
+      in.close();
+    } catch(IOException ioe) {
+      throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
+    }
+  }
+}

Modified: hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/TestLeaseRecovery2.java?rev=722258&r1=722257&r2=722258&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/TestLeaseRecovery2.java
(original)
+++ hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/TestLeaseRecovery2.java
Mon Dec  1 14:03:56 2008
@@ -18,13 +18,15 @@
 package org.apache.hadoop.dfs;
 
 import java.io.IOException;
-import java.util.Random;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
 
 public class TestLeaseRecovery2 extends junit.framework.TestCase {
@@ -34,36 +36,21 @@
     ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
   }
 
-  static final int BLOCK_SIZE = 64;
-  static final int FILE_SIZE = 1024;
+  static final long BLOCK_SIZE = 1024;
+  static final int FILE_SIZE = 1024*16;
   static final short REPLICATION_NUM = (short)3;
-  static final Random RANDOM = new Random();
   static byte[] buffer = new byte[FILE_SIZE];
 
-  static void checkMetaInfo(Block b, InterDatanodeProtocol idp
-      ) throws IOException {
-    TestInterDatanodeProtocol.checkMetaInfo(b, idp, null);
-  }
-  
-  static int min(Integer... x) {
-    int m = x[0];
-    for(int i = 1; i < x.length; i++) {
-      if (x[i] < m) {
-        m = x[i];
-      }
-    }
-    return m;
-  }
-
-  /**
-   */
   public void testBlockSynchronization() throws Exception {
     final long softLease = 1000;
     final long hardLease = 60 * 60 *1000;
     final short repl = 3;
-    Configuration conf = new Configuration();
+    final Configuration conf = new Configuration();
+    final int bufferSize = conf.getInt("io.file.buffer.size", 4096);
     conf.setLong("dfs.block.size", BLOCK_SIZE);
-    conf.setInt("io.bytes.per.checksum", 16);
+    conf.setInt("dfs.heartbeat.interval", 1);
+  //  conf.setInt("io.bytes.per.checksum", 16);
+
     MiniDFSCluster cluster = null;
     byte[] actual = new byte[FILE_SIZE];
 
@@ -74,19 +61,23 @@
       //create a file
       DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
       // create a random file name
-      String filestr = "/foo" + RANDOM.nextInt();
+      String filestr = "/foo" + AppendTestUtil.nextInt();
+      System.out.println("filestr=" + filestr);
       Path filepath = new Path(filestr);
       FSDataOutputStream stm = dfs.create(filepath, true,
-                                 dfs.getConf().getInt("io.file.buffer.size", 4096),
-                                 (short)repl, (long)BLOCK_SIZE);
+          bufferSize, repl, BLOCK_SIZE);
       assertTrue(dfs.dfs.exists(filestr));
 
       // write random number of bytes into it.
-      int size = RANDOM.nextInt(FILE_SIZE);
+      int size = AppendTestUtil.nextInt(FILE_SIZE);
+      System.out.println("size=" + size);
       stm.write(buffer, 0, size);
 
       // sync file
+      AppendTestUtil.LOG.info("sync");
       stm.sync();
+      AppendTestUtil.LOG.info("leasechecker.interrupt()");
+      dfs.dfs.leaseChecker.interrupt();
 
       // set the soft limit to be 1 second so that the
       // namenode triggers lease recovery on next attempt to write-for-open.
@@ -94,30 +85,44 @@
 
       // try to re-open the file before closing the previous handle. This
       // should fail but will trigger lease recovery.
-      String oldClientName = dfs.dfs.clientName;
-      dfs.dfs.clientName += "_1";
-      while (true) {
-        try {
-          FSDataOutputStream newstm = dfs.create(filepath, false,
-            dfs.getConf().getInt("io.file.buffer.size", 4096),
-            (short)repl, (long)BLOCK_SIZE);
-          assertTrue("Creation of an existing file should never succeed.", false);
-        } catch (IOException e) {
-          if (e.getMessage().contains("file exists")) {
-            break;
+      {
+        Configuration conf2 = new Configuration(conf);
+        String username = UserGroupInformation.getCurrentUGI().getUserName()+"_1";
+        UnixUserGroupInformation.saveToConf(conf2,
+            UnixUserGroupInformation.UGI_PROPERTY_NAME,
+            new UnixUserGroupInformation(username, new String[]{"supergroup"}));
+        FileSystem dfs2 = FileSystem.get(conf2);
+  
+        boolean done = false;
+        for(int i = 0; i < 10 && !done; i++) {
+          AppendTestUtil.LOG.info("i=" + i);
+          try {
+            dfs2.create(filepath, false, bufferSize, repl, BLOCK_SIZE);
+            fail("Creation of an existing file should never succeed.");
+          } catch (IOException ioe) {
+            final String message = ioe.getMessage();
+            if (message.contains("file exists")) {
+              AppendTestUtil.LOG.info("done", ioe);
+              done = true;
+            }
+            else if (message.contains(AlreadyBeingCreatedException.class.getSimpleName()))
{
+              AppendTestUtil.LOG.info("GOOD! got " + message);
+            }
+            else {
+              AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe);
+            }
+          }
+
+          if (!done) {
+            AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
+            try {Thread.sleep(5000);} catch (InterruptedException e) {}
           }
-          e.printStackTrace();
-        }
-        try {
-          Thread.sleep(1000);
-        } catch (InterruptedException e) {
         }
+        assertTrue(done);
       }
-      System.out.println("Lease for file " +  filepath + " is recovered. " +
-                         "validating its contents now...");
 
-      // revert back  client identity
-      dfs.dfs.clientName = oldClientName;
+      AppendTestUtil.LOG.info("Lease for file " +  filepath + " is recovered. "
+          + "Validating its contents now...");
 
       // verify that file-size matches
       assertTrue("File should be " + size + " bytes, but is actually " +



Mime
View raw message