hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dhr...@apache.org
Subject svn commit: r800238 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java
Date Mon, 03 Aug 2009 07:43:49 GMT
Author: dhruba
Date: Mon Aug  3 07:43:48 2009
New Revision: 800238

URL: http://svn.apache.org/viewvc?rev=800238&view=rev
Log:
HDFS-504. Update the modification time of a file when the file 
is closed. (Chun Zhang via dhruba)


Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=800238&r1=800237&r2=800238&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Mon Aug  3 07:43:48 2009
@@ -67,6 +67,9 @@
 
     HDFS-511. Remove redundant block searches in BlockManager. (shv)
 
+    HDFS-504. Update the modification time of a file when the file 
+    is closed. (Chun Zhang via dhruba)
+
   BUG FIXES
     HDFS-76. Better error message to users when commands fail because of 
     lack of quota. Allow quota to be set even if the limit is lower than

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=800238&r1=800237&r2=800238&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon
Aug  3 07:43:48 2009
@@ -306,8 +306,10 @@
    */
   void closeFile(String path, INodeFile file) {
     waitForReady();
+    long now = FSNamesystem.now();
     synchronized (rootDir) {
       // file is closed
+      file.setModificationTimeForce(now);
       fsImage.getEditLog().logCloseFile(path, file);
       if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("DIR* FSDirectory.closeFile: "

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java?rev=800238&r1=800237&r2=800238&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java Mon Aug  3 07:43:48
2009
@@ -183,6 +183,64 @@
     }
   }
 
+  /**
+   * Tests mod time change at close in DFS.
+   */
+  public void testTimesAtClose() throws IOException {
+    Configuration conf = new Configuration();
+    final int MAX_IDLE_TIME = 2000; // 2s
+    int replicas = 1;
+
+    // parameter initialization
+    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    conf.setInt("dfs.datanode.handler.count", 50);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    cluster.waitActive();
+    InetSocketAddress addr = new InetSocketAddress("localhost",
+                                                     cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+    FileSystem fileSys = cluster.getFileSystem();
+    assertTrue(fileSys instanceof DistributedFileSystem);
+
+    try {
+      // create a new file and write to it
+      Path file1 = new Path("/simple.dat");
+      FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
+      System.out.println("Created and wrote file simple.dat");
+      FileStatus statBeforeClose = fileSys.getFileStatus(file1);
+      long mtimeBeforeClose = statBeforeClose.getModificationTime();
+      String mdateBeforeClose = dateForm.format(new Date(
+                                                     mtimeBeforeClose));
+      System.out.println("mtime on " + file1 + " before close is "
+                  + mdateBeforeClose + " (" + mtimeBeforeClose + ")");
+      assertTrue(mtimeBeforeClose != 0);
+
+      //close file after writing
+      stm.close();
+      System.out.println("Closed file.");
+      FileStatus statAfterClose = fileSys.getFileStatus(file1);
+      long mtimeAfterClose = statAfterClose.getModificationTime();
+      String mdateAfterClose = dateForm.format(new Date(mtimeAfterClose));
+      System.out.println("mtime on " + file1 + " after close is "
+                  + mdateAfterClose + " (" + mtimeAfterClose + ")");
+      assertTrue(mtimeAfterClose != 0);
+      assertTrue(mtimeBeforeClose != mtimeAfterClose);
+
+      cleanupFile(fileSys, file1);
+    } catch (IOException e) {
+      info = client.datanodeReport(DatanodeReportType.ALL);
+      printDatanodeReport(info);
+      throw e;
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     new TestSetTimes().testTimes();
   }



Mime
View raw message