hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dhr...@apache.org
Subject svn commit: r566886 - in /lucene/hadoop/trunk: CHANGES.txt src/java/org/apache/hadoop/dfs/FSDirectory.java src/java/org/apache/hadoop/dfs/FSNamesystem.java src/test/org/apache/hadoop/dfs/TestFileCreation.java
Date Thu, 16 Aug 2007 23:53:53 GMT
Author: dhruba
Date: Thu Aug 16 16:53:50 2007
New Revision: 566886

URL: http://svn.apache.org/viewvc?view=rev&rev=566886
Log:
HADOOP-1708.  Make files appear in namespace as soon as they are created.
 (Dhruba Borthakur via dhruba)

Added:
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java   (with props)
Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=566886&r1=566885&r2=566886
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Thu Aug 16 16:53:50 2007
@@ -28,6 +28,9 @@
     HADOOP-1610.  Add metrics for failed tasks.
     (Devaraj Das via tomwhite)
 
+    HADOOP-1708.  Make files appear in namespace as soon as they are
+    created.  (Dhruba Borthakur via dhruba)
+
   OPTIMIZATIONS
 
     HADOOP-1565.  Reduce memory usage of NameNode by replacing 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java?view=diff&rev=566886&r1=566885&r2=566886
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java Thu Aug 16 16:53:50
2007
@@ -174,6 +174,14 @@
     }
 
     /**
+     * Set file blocks 
+     * @return file blocks
+     */
+    void setBlocks(Block[] blockList) {
+      this.blocks = blockList;
+    }
+
+    /**
      * Get children iterator
      * @return Iterator of children
      */
@@ -539,6 +547,43 @@
                               new INode(path, blocks, replication,
                                         modificationTime),
                               modificationTime);
+  }
+
+  /**
+   * Add blocks to the file.
+   */
+  boolean addBlocks(String path, Block[] blocks) throws IOException {
+    waitForReady();
+
+    synchronized (rootDir) {
+      INode fileNode = rootDir.getNode(path);
+      if (fileNode == null) {
+        throw new IOException("Unknown file: " + path);
+      }
+      if (fileNode.getBlocks() != null &&
+          fileNode.getBlocks().length != 0) {
+        throw new IOException("Cannot add new blocks to " +
+                              "already existing file.");
+      }
+
+      // associate the new list of blocks with this file
+      fileNode.setBlocks(blocks);
+      for (int i = 0; i < blocks.length; i++) {
+        namesystem.blocksMap.addINode(blocks[i], fileNode);
+      }
+
+      // create two transactions. The first one deletes the empty
+      // file and the second transaction recreates the same file
+      // with the appropriate set of blocks.
+      fsImage.getEditLog().logDelete(path, fileNode.getModificationTime());
+
+      // re-add create file record to log
+      fsImage.getEditLog().logCreateFile(fileNode);
+      NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
+                                    +path+" with "+blocks.length
+                                    +" blocks is added to the file system");
+    }
+    return true;
   }
 
   /**

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=566886&r1=566885&r2=566886
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Thu Aug 16 16:53:50
2007
@@ -820,6 +820,15 @@
                                    +ie.getMessage());
       throw ie;
     }
+
+    //
+    // Now we can add the name to the filesystem. This file has no
+    // blocks associated with it.
+    //
+    if (!dir.addFile(src, new Block[0], replication)) {
+      throw new IOException("DIR* NameSystem.startFile: " +
+                            "Unable to add file to namespace.");
+    }
   }
 
   /**
@@ -953,11 +962,13 @@
     FileUnderConstruction pendingFile = pendingCreates.get(src);
 
     Block[] fileBlocks =  dir.getFileBlocks(src);
-    if (fileBlocks!= null || pendingFile == null) {    
+    if ((fileBlocks != null && fileBlocks.length > 0) ||
+         pendingFile == null) {    
       NameNode.stateChangeLog.warn("DIR* NameSystem.completeFile: "
                                    + "failed to complete " + src
                                    + " because dir.getFileBlocks() is " + 
-                                   ((fileBlocks == null) ? "null":"non-null") + 
+                                   ((fileBlocks == null) ? 
+                                    "null":fileBlocks.length) + 
                                    " and pendingFile is " + 
                                    ((pendingFile == null) ? "null" : 
                                      ("from " + pendingFile.getClientMachine()))
@@ -986,9 +997,9 @@
     }
         
     //
-    // Now we can add the (name,blocks) tuple to the filesystem
+    // add blocks to the file
     //
-    if (!dir.addFile(src, pendingBlocks, pendingFile.getReplication())) {
+    if (!dir.addBlocks(src, pendingBlocks)) {
       return OPERATION_FAILED;
     }
 

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java?view=auto&rev=566886
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java Thu Aug 16 16:53:50
2007
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+
+/**
+ * This class tests the FileStatus API.
+ */
+public class TestFileCreation extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  static final int fileSize = 16384;
+
+  private static String TEST_ROOT_DIR =
+    new Path(System.getProperty("test.build.data","/tmp"))
+    .toString().replace(' ', '+');
+  
+  //
+  // creates a file but does not close it
+  //
+  private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    // create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size",
4096),
+                                            (short)repl, (long)blockSize);
+    return stm;
+  }
+
+  //
+  // writes to file but does not close it
+  //
+  private void writeFile(FSDataOutputStream stm) throws IOException {
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+  }
+
+  private void checkFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    boolean done = false;
+    while (!done) {
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {}
+      done = true;
+      String[][] locations = fileSys.getFileCacheHints(name, 0, fileSize);
+      for (int idx = 0; idx < locations.length; idx++) {
+        if (locations[idx].length < repl) {
+          done = false;
+          break;
+        }
+      }
+    }
+  }
+
+
+  /**
+   * Tests various options of File creation.
+   */
+  public void testFileCreation() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    try {
+
+      //
+      // check that / exists
+      //
+      Path path = new Path("/");
+      System.out.println("Path : \"" + path.toString() + "\"");
+      System.out.println(fs.isDirectory(path));
+      System.out.println(fs.getFileStatus(path).isDir()); 
+      assertTrue("/ should be a directory", 
+                 fs.getFileStatus(path).isDir() == true);
+      
+      // create a new a file in home directory. Do not close it.
+      //
+      Path file1 = new Path("filestatus.dat");
+      FSDataOutputStream stm = createFile(fs, file1, 1);
+      System.out.println("Created file filestatus.dat with one "
+                         + " replicas.");
+
+      // verify that file exists in FS namespace
+      assertTrue(file1 + " should be a file", 
+                  fs.getFileStatus(file1).isDir() == false);
+      System.out.println("Path : \"" + file1 + "\"");
+
+      // write to file
+      writeFile(stm);
+
+      // close file. This makes all file data visible to clients.
+      stm.close();
+      checkFile(fs, file1, 1);
+
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+}

Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java
------------------------------------------------------------------------------
    svn:keywords = Id Revision HeadURL



Mime
View raw message