hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tomwh...@apache.org
Subject svn commit: r632385 - in /hadoop/core/trunk: CHANGES.txt src/java/org/apache/hadoop/fs/DU.java src/test/org/apache/hadoop/fs/TestDU.java src/test/org/apache/hadoop/util/TestShell.java
Date Fri, 29 Feb 2008 16:48:11 GMT
Author: tomwhite
Date: Fri Feb 29 08:48:04 2008
New Revision: 632385

URL: http://svn.apache.org/viewvc?rev=632385&view=rev
Log:
HADOOP-2845.  Fix dfsadmin disk utilization report on Solaris.  Contributed by Martin Traverso.

Added:
    hadoop/core/trunk/src/test/org/apache/hadoop/util/TestShell.java
Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/java/org/apache/hadoop/fs/DU.java
    hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestDU.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=632385&r1=632384&r2=632385&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Fri Feb 29 08:48:04 2008
@@ -91,6 +91,9 @@
 
     HADOOP-2891.  DFSClient.close() closes all open files. (dhruba)
 
+    HADOOP-2845.  Fix dfsadmin disk utilization report on Solaris.
+    (Martin Traverso via tomwhite)
+
 Release 0.16.1 - Unreleased
 
   IMPROVEMENTS

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/fs/DU.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/fs/DU.java?rev=632385&r1=632384&r2=632385&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/fs/DU.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/fs/DU.java Fri Feb 29 08:48:04 2008
@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.fs;
 
-import java.io.File;
-import java.io.IOException;
-import java.io.BufferedReader;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.dfs.FSConstants;
 import org.apache.hadoop.util.Shell;
 
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+
 /** Filesystem disk space usage statistics.  Uses the unix 'du' program*/
 public class DU extends Shell {
   private String  dirPath;
@@ -61,12 +61,12 @@
   
   public String toString() {
     return
-      "du -s " + dirPath +"\n" +
+      "du -sk " + dirPath +"\n" +
       used + "\t" + dirPath;
   }
 
   protected String[] getExecString() {
-    return new String[] {"du","-s", dirPath};
+    return new String[] {"du","-sk", dirPath};
   }
   
   protected void parseExecResult(BufferedReader lines) throws IOException {

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestDU.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestDU.java?rev=632385&r1=632384&r2=632385&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestDU.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestDU.java Fri Feb 29 08:48:04 2008
@@ -17,65 +17,68 @@
  */
 package org.apache.hadoop.fs;
 
+import junit.framework.TestCase;
+
 import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
-
-import junit.framework.TestCase;
+import java.util.Random;
 
 /** This test makes sure that "DU" does not get to run on each call to getUsed */ 
 public class TestDU extends TestCase {
   final static private File DU_DIR = new File(
       System.getProperty("test.build.data","/tmp"), "dutmp");
-  final static private File DU_FILE1 = new File(DU_DIR, "tmp1");
-  final static private File DU_FILE2 = new File(DU_DIR, "tmp2");
-  
-  /** create a file of more than 1K size */
-  private void createFile( File newFile ) throws IOException {
+
+  public void setUp() throws IOException {
+      FileUtil.fullyDelete(DU_DIR);
+      assertTrue(DU_DIR.mkdirs());
+  }
+
+  public void tearDown() throws IOException {
+      FileUtil.fullyDelete(DU_DIR);
+  }
+    
+  private void createFile(File newFile, int size) throws IOException {
+    // write random data so that filesystems with compression enabled (e.g., ZFS)
+    // can't compress the file
+    Random random = new Random();
+    byte[] data = new byte[size];
+    random.nextBytes(data);
+
     newFile.createNewFile();
-    RandomAccessFile file = new RandomAccessFile(newFile, "rw");
-    file.seek(1024);
-    file.writeBytes("du test du test");
+    RandomAccessFile file = new RandomAccessFile(newFile, "rws");
+
+    file.write(data);
+      
     file.getFD().sync();
     file.close();
   }
   
-  /** delete a file */
-  private void rmFile(File file) {
-    if(file.exists()) {
-      assertTrue(file.delete());
-    }
-  }
+  /*
+   * Find a number that is a multiple of the block size in this file system
+   */
+  private int getBlockSize() throws IOException, InterruptedException {
+    File file = new File(DU_DIR, "small");
+    createFile(file, 128); // this is an arbitrary number. It has to be big enough for the
filesystem to report
+                           // any usage at all. For instance, NFS reports 0 blocks if the
file is <= 64 bytes
 
-  /* interval is in a unit of minutes */
-  private void testDU(long interval) throws IOException {
-    rmFile(DU_FILE1);
-    rmFile(DU_FILE2);
-    DU_DIR.delete();
-    assertTrue(DU_DIR.mkdirs());
-    try {
-      createFile(DU_FILE1);
-      DU du = new DU(DU_DIR, interval*60000);
-      long oldUsedSpace = du.getUsed();
-      assertTrue(oldUsedSpace>0); // make sure that du is called
-      createFile(DU_FILE2);
-      if(interval>0) {
-        assertEquals( oldUsedSpace, du.getUsed());  // du does not get called
-      } else {
-        assertTrue( oldUsedSpace < du.getUsed());   // du gets called again
-      }
-    } finally {
-      rmFile(DU_FILE1);
-      rmFile(DU_FILE2);
-      DU_DIR.delete();
-    }
-  }
+    Thread.sleep(5000); // let the metadata updater catch up
 
-  public void testDU() throws Exception {
-    testDU(Long.MIN_VALUE/60000);  // test a negative interval
-    testDU(0L);  // test a zero interval
-    testDU(10L); // interval equal to 10mins
-    testDU(System.currentTimeMillis()/60000+60); // test a very big interval
+    DU du = new DU(file, 0);
+    return (int) du.getUsed();
   }
+
+  public void testDU() throws IOException, InterruptedException {
+    int blockSize = getBlockSize();
+
+    File file = new File(DU_DIR, "data");
+    createFile(file, 2 * blockSize);
+
+    Thread.sleep(5000); // let the metadata updater catch up
     
+    DU du = new DU(file, 0);
+    long size = du.getUsed();
+
+    assertEquals(2 * blockSize, size);
+  }
 }

Added: hadoop/core/trunk/src/test/org/apache/hadoop/util/TestShell.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/util/TestShell.java?rev=632385&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/util/TestShell.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/util/TestShell.java Fri Feb 29 08:48:04 2008
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import junit.framework.TestCase;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+
+public class TestShell extends TestCase {
+
+  private static class Command extends Shell {
+    private int runCount = 0;
+
+    private Command(long interval) {
+      super(interval);
+    }
+
+    protected String[] getExecString() {
+      return new String[] {"echo", "hello"};
+    }
+
+    protected void parseExecResult(BufferedReader lines) throws IOException {
+      ++runCount;
+    }
+
+    public int getRunCount() {
+      return runCount;
+    }
+  }
+
+  public void testInterval() throws IOException {
+    testInterval(Long.MIN_VALUE / 60000);  // test a negative interval
+    testInterval(0L);  // test a zero interval
+    testInterval(10L); // interval equal to 10mins
+    testInterval(System.currentTimeMillis() / 60000 + 60); // test a very big interval
+  }
+
+  private void testInterval(long interval) throws IOException {
+    Command command = new Command(interval);
+
+    command.run();
+    assertEquals(1, command.getRunCount());
+
+    command.run();
+    if (interval > 0) {
+      assertEquals(1, command.getRunCount());
+    } else {
+      assertEquals(2, command.getRunCount());
+    }
+  }
+}



Mime
View raw message