hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r417300 - in /lucene/hadoop/trunk: CHANGES.txt conf/hadoop-default.xml src/java/org/apache/hadoop/dfs/FSDataset.java
Date Mon, 26 Jun 2006 21:59:14 GMT
Author: cutting
Date: Mon Jun 26 14:59:13 2006
New Revision: 417300

URL: http://svn.apache.org/viewvc?rev=417300&view=rev
Log:
HADOOP-296.  Permit specification of the amount of reserved space on a DFS datanode.  Contributed
by Johan Oskarson.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/conf/hadoop-default.xml
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=417300&r1=417299&r2=417300&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Mon Jun 26 14:59:13 2006
@@ -66,6 +66,10 @@
     "dfs", "fsck", "job", and "distcp" commands currently support
     this, with more to be added.  (Hairong Kuang via cutting)
 
+16. HADOOP-296.  Permit specification of the amount of reserved space
+    on a DFS datanode.  One may specify both the percentage free and
+    the number of bytes.  (Johan Oskarson via cutting)
+
 
 Release 0.3.2 - 2006-06-09
 

Modified: lucene/hadoop/trunk/conf/hadoop-default.xml
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/conf/hadoop-default.xml?rev=417300&r1=417299&r2=417300&view=diff
==============================================================================
--- lucene/hadoop/trunk/conf/hadoop-default.xml (original)
+++ lucene/hadoop/trunk/conf/hadoop-default.xml Mon Jun 26 14:59:13 2006
@@ -103,6 +103,20 @@
 </property>
 
 <property>
+  <name>dfs.datanode.du.reserved</name>
+  <value>0</value>
+  <description>Reserved space in bytes. Always leave this much space free for non dfs
use
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.du.pct</name>
+  <value>0.98f</value>
+  <description>When calculating remaining space, only use this percentage of the real
available space
+  </description>
+</property>
+
+<property>
   <name>dfs.name.dir</name>
   <value>/tmp/hadoop/dfs/name</value>
   <description>Determines where on the local filesystem the DFS name node
@@ -160,6 +174,7 @@
   before we signal failure to the application.
   </description>
 </property>
+
 
 <!-- map/reduce properties -->
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java?rev=417300&r1=417299&r2=417300&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java Mon Jun 26 14:59:13
2006
@@ -30,8 +30,9 @@
  * @author Mike Cafarella
  ***************************************************/
 class FSDataset implements FSConstants {
-    static final double USABLE_DISK_PCT = 0.98;
 
+		static final double USABLE_DISK_PCT_DEFAULT = 0.98f; 
+	
   /**
      * A node type that can be built into a tree reflecting the
      * hierarchy of blocks on the local disk.
@@ -202,6 +203,7 @@
     DF diskUsage;
     File data = null, tmp = null;
     long reserved = 0;
+    double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
     FSDir dirTree;
     TreeSet ongoingCreates = new TreeSet();
 
@@ -209,6 +211,8 @@
      * An FSDataset has a directory where it loads its data files.
      */
     public FSDataset(File dir, Configuration conf) throws IOException {
+    		this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
+    		this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct", (float) USABLE_DISK_PCT_DEFAULT);
         diskUsage = new DF( dir.getCanonicalPath(), conf); 
         this.data = new File(dir, "data");
         if (! data.exists()) {
@@ -233,7 +237,7 @@
      * Return how many bytes can still be stored in the FSDataset
      */
     public long getRemaining() throws IOException {
-        return ((long) Math.round(USABLE_DISK_PCT * diskUsage.getAvailable())) - reserved;
+        return ((long) Math.round(usableDiskPct * diskUsage.getAvailable())) - reserved;
     }
 
     /**



Mime
View raw message