hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r816751 - in /hadoop/common/trunk: CHANGES.txt src/java/org/apache/hadoop/fs/FileContext.java src/test/core/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
Date Fri, 18 Sep 2009 19:13:30 GMT
Author: szetszwo
Date: Fri Sep 18 19:13:29 2009
New Revision: 816751

URL: http://svn.apache.org/viewvc?rev=816751&view=rev
Log:
HADOOP-6270. Support deleteOnExit in FileContext.  Contributed by Suresh Srinivas

Added:
    hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
Modified:
    hadoop/common/trunk/CHANGES.txt
    hadoop/common/trunk/src/java/org/apache/hadoop/fs/FileContext.java

Modified: hadoop/common/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/CHANGES.txt?rev=816751&r1=816750&r2=816751&view=diff
==============================================================================
--- hadoop/common/trunk/CHANGES.txt (original)
+++ hadoop/common/trunk/CHANGES.txt Fri Sep 18 19:13:29 2009
@@ -571,6 +571,9 @@
 
     HADOOP-6268. Add ivy jar to .gitignore. (Todd Lipcon via cdouglas)
 
+    HADOOP-6270. Support deleteOnExit in FileContext.  (Suresh Srinivas via
+    szetszwo)
+
   OPTIMIZATIONS
 
     HADOOP-5595. NameNode does not need to run a replicator to choose a

Modified: hadoop/common/trunk/src/java/org/apache/hadoop/fs/FileContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/java/org/apache/hadoop/fs/FileContext.java?rev=816751&r1=816750&r2=816751&view=diff
==============================================================================
--- hadoop/common/trunk/src/java/org/apache/hadoop/fs/FileContext.java (original)
+++ hadoop/common/trunk/src/java/org/apache/hadoop/fs/FileContext.java Fri Sep 18 19:13:29
2009
@@ -25,15 +25,23 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
+import java.util.IdentityHashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.Map.Entry;
 import java.util.regex.Pattern;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate.Project;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.classification.*;
-import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate.*;
 
 /**
  * The FileContext class provides an interface to the application writer for
@@ -124,6 +132,18 @@
 
 public final class FileContext {
   
+  public static final Log LOG = LogFactory.getLog(FileContext.class);
+  
+  /**
+   * List of files that should be deleted on JVM shutdown
+   */
+  final static Map<FileContext, Set<Path>> deleteOnExit = 
+    new IdentityHashMap<FileContext, Set<Path>>();
+
+  /** JVM shutdown hook thread */
+  final static FileContextFinalizer finalizer = 
+    new FileContextFinalizer();
+  
   /**
    * The FileContext is defined by.
    *  1) defaultFS (slash)
@@ -180,6 +200,28 @@
   }
 
   /**
+   * Delete all the paths that were marked as delete-on-exit.
+   */
+  static void processDeleteOnExit() {
+    synchronized (deleteOnExit) {
+      Set<Entry<FileContext, Set<Path>>> set = deleteOnExit.entrySet();
+      for (Entry<FileContext, Set<Path>> entry : set) {
+        FileContext fc = entry.getKey();
+        Set<Path> paths = entry.getValue();
+        for (Path path : paths) {
+          try {
+            fc.delete(path, true);
+          }
+          catch (IOException e) {
+            LOG.warn("Ignoring failure to deleteOnExit for path " + path);
+          }
+        }
+      }
+      deleteOnExit.clear();
+    }
+  }
+  
+  /**
    * Pathnames with scheme and relative path are illegal.
    * @param path to be checked
    * @throws IllegalArgumentException if of type scheme:foo/bar
@@ -886,6 +928,32 @@
     return getFSofPath(absF).listStatus(absF);
   }
 
+  /**
+   * Mark a path to be deleted on JVM shutdown.
+   * 
+   * @param f the existing path to delete.
+   * @return  true if deleteOnExit is successful, otherwise false.
+   * @throws IOException
+   */
+  public boolean deleteOnExit(Path f) throws IOException {
+    if (!exists(f)) {
+      return false;
+    }
+    synchronized (deleteOnExit) {
+      if (deleteOnExit.isEmpty() && !finalizer.isAlive()) {
+        Runtime.getRuntime().addShutdownHook(finalizer);
+      }
+      
+      Set<Path> set = deleteOnExit.get(this);
+      if (set == null) {
+        set = new TreeSet<Path>();
+        deleteOnExit.put(this, set);
+      }
+      set.add(f);
+    }
+    return true;
+  }
+  
   private final Util util;
   public Util util() {
     return util;
@@ -1420,4 +1488,13 @@
     return (srcUri.getAuthority().equals(dstUri.getAuthority()) && srcUri
         .getAuthority().equals(dstUri.getAuthority()));
   }
-}
\ No newline at end of file
+  
+  /**
+   * Deletes all the paths in deleteOnExit on JVM shutdown
+   */
+  static class FileContextFinalizer extends Thread {
+    public synchronized void run() {
+      processDeleteOnExit();
+    }
+  }
+}

Added: hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java?rev=816751&view=auto
==============================================================================
--- hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
(added)
+++ hadoop/common/trunk/src/test/core/org/apache/hadoop/fs/TestFileContextDeleteOnExit.java
Fri Sep 18 19:13:29 2009
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Set;
+
+import junit.framework.Assert;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests {@link FileContext.#deleteOnExit(Path)} functionality.
+ */
+public class TestFileContextDeleteOnExit {
+  private static String TEST_ROOT_DIR =
+    System.getProperty("test.build.data", "/tmp") + "/test";
+  
+  private static byte[] data = new byte[1024 * 2]; // two blocks of data
+  {
+    for (int i = 0; i < data.length; i++) {
+      data[i] = (byte) (i % 10);
+    }
+  }
+  
+  private FileContext fc;
+  
+  @Before
+  public void setup() throws IOException {
+    fc = FileContext.getLocalFSFileContext();
+  }
+  
+  @After
+  public void tearDown() throws IOException {
+    fc.delete(getTestRootPath(), true);
+  }
+  
+  private Path getTestRootPath() {
+    return fc.makeQualified(new Path(TEST_ROOT_DIR));
+  }
+  
+  private Path getTestPath(String pathString) {
+    return fc.makeQualified(new Path(TEST_ROOT_DIR, pathString));
+  }
+  
+  private void createFile(FileContext fc, Path path) throws IOException {
+    FSDataOutputStream out = fc.create(path, EnumSet.of(CreateFlag.CREATE));
+    out.write(data, 0, data.length);
+    out.close();
+  }
+  
+  private void checkDeleteOnExitData(int size, FileContext fc, Path... paths) {
+    Assert.assertEquals(size, FileContext.deleteOnExit.size());
+    Set<Path> set = FileContext.deleteOnExit.get(fc);
+    Assert.assertEquals(paths.length, (set == null ? 0 : set.size()));
+    for (Path path : paths) {
+      Assert.assertTrue(set.contains(path));
+    }
+  }
+  
+  @Test
+  public void testDeleteOnExit() throws Exception {
+    // Create deleteOnExit entries
+    Path file1 = getTestPath("file1");
+    createFile(fc, file1);
+    fc.deleteOnExit(file1);
+    checkDeleteOnExitData(1, fc, file1);
+    
+    // Ensure shutdown hook is added
+    Assert.assertTrue(Runtime.getRuntime().removeShutdownHook(FileContext.finalizer));
+    
+    Path file2 = getTestPath("dir1/file2");
+    createFile(fc, file2);
+    fc.deleteOnExit(file2);
+    checkDeleteOnExitData(1, fc, file1, file2);
+    
+    Path dir = getTestPath("dir3/dir4/dir5/dir6");
+    createFile(fc, dir);
+    fc.deleteOnExit(dir);
+    checkDeleteOnExitData(1, fc, file1, file2, dir);
+    
+    // trigger deleteOnExit and ensure the registered
+    // paths are cleaned up
+    FileContext.finalizer.start();
+    FileContext.finalizer.join();
+    checkDeleteOnExitData(0, fc, new Path[0]);
+    Assert.assertFalse(fc.exists(file1));
+    Assert.assertFalse(fc.exists(file2));
+    Assert.assertFalse(fc.exists(dir));
+  }
+}



Mime
View raw message