hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dhr...@apache.org
Subject svn commit: r827928 - in /hadoop/hdfs/trunk: CHANGES.txt src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
Date Wed, 21 Oct 2009 08:57:19 GMT
Author: dhruba
Date: Wed Oct 21 08:57:18 2009
New Revision: 827928

URL: http://svn.apache.org/viewvc?rev=827928&view=rev
Log:
HDFS-695. RaidNode should read in configuration from hdfs-site.xml.
(dhruba)


Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java
    hadoop/hdfs/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java
    hadoop/hdfs/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=827928&r1=827927&r2=827928&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Oct 21 08:57:18 2009
@@ -22,6 +22,9 @@
     HDFS-646. Fix test-patch failure by adding test-contrib ant target.
     (gkesavan)
 
+    HDFS-695. RaidNode should read in configuration from hdfs-site.xml.
+    (dhruba)
+
 Release 0.21.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hdfs/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java?rev=827928&r1=827927&r2=827928&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java (original)
+++ hadoop/hdfs/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java Wed Oct
21 08:57:18 2009
@@ -39,6 +39,8 @@
   static final String JOB_DIR_LABEL = NAME + ".job.dir";
   static final String OP_LIST_LABEL = NAME + ".op.list";
   static final String OP_COUNT_LABEL = NAME + ".op.count";
+  static final int   OP_LIST_BLOCK_SIZE = 32 * 1024 * 1024; // block size of control file
+  static final short OP_LIST_REPLICATION = 10; // replication factor of control file
 
   private static final long OP_PER_MAP = 100;
   private static final int MAX_MAPS_PER_NODE = 20;
@@ -287,6 +289,10 @@
     jobconf.set(JOB_DIR_LABEL, jobdir.toString());
     Path log = new Path(jobdir, "_logs");
 
+    // The control file should have small size blocks. This helps
+    // in spreading out the load from mappers that will be spawned.
+    jobconf.setInt("dfs.blocks.size",  OP_LIST_BLOCK_SIZE);
+
     FileOutputFormat.setOutputPath(jobconf, log);
     LOG.info("log=" + log);
 
@@ -314,6 +320,7 @@
       if (opWriter != null) {
         opWriter.close();
       }
+      fs.setReplication(opList, OP_LIST_REPLICATION); // increase replication for control
file
     }
     raidPolicyPathPairList.clear();
     

Modified: hadoop/hdfs/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java?rev=827928&r1=827927&r2=827928&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java (original)
+++ hadoop/hdfs/trunk/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java Wed Oct
21 08:57:18 2009
@@ -19,6 +19,7 @@
 package org.apache.hadoop.raid;
 
 import java.io.IOException;
+import java.io.FileNotFoundException;
 import java.util.Collection;
 import java.util.List;
 import java.util.LinkedList;
@@ -56,6 +57,14 @@
  * A {@link RaidNode} that implements 
  */
 public class RaidNode implements RaidProtocol {
+
+  static{
+    Configuration.addDefaultResource("hdfs-default.xml");
+    Configuration.addDefaultResource("hdfs-site.xml");
+    Configuration.addDefaultResource("mapred-default.xml");
+    Configuration.addDefaultResource("mapred-site.xml");
+  }
+
   public static final Log LOG = LogFactory.getLog( "org.apache.hadoop.raid.RaidNode");
   public static final long SLEEP_TIME = 10000L; // 10 seconds
   public static final int DEFAULT_PORT = 60000;
@@ -712,7 +721,7 @@
           ins[i].seek(blockSize * (startBlock + i));
         }
 
-        generateParity(ins,out,blockSize,bufs,xor);
+        generateParity(ins,out,blockSize,bufs,xor, reporter);
         
         // close input file handles
         for (int i = 0; i < ins.length; i++) {
@@ -769,7 +778,7 @@
   }
   
   private static void generateParity(FSDataInputStream[] ins, FSDataOutputStream fout, 
-      long parityBlockSize, byte[] bufs, byte[] xor) throws IOException {
+      long parityBlockSize, byte[] bufs, byte[] xor, Reporter reporter) throws IOException
{
     
     int bufSize;
     if ((bufs == null) || (bufs.length == 0)){
@@ -796,6 +805,11 @@
 
       // read all remaining blocks and xor them into the buffer
       for (int i = 1; i < ins.length; i++) {
+
+        // report progress to Map-reduce framework
+        if (reporter != null) {
+          reporter.progress();
+        }
         
         int actualRead = readInputUntilEnd(ins[i], bufs, toRead);
         
@@ -911,7 +925,7 @@
     byte[] bufs = new byte[bufSize];
     byte[] xor = new byte[bufSize];
    
-    generateParity(ins,fout,corruptBlockSize,bufs,xor);
+    generateParity(ins,fout,corruptBlockSize,bufs,xor,null);
     
     // close all files
     fout.close();
@@ -1055,12 +1069,17 @@
                         info.getName() + " has already been procesed.");
                 continue;
               }
-              LOG.info("Purging obsolete parity files for policy " + 
-                        info.getName() + " " + destp);
 
               FileSystem srcFs = info.getSrcPath().getFileSystem(conf);
-              FileStatus stat = destFs.getFileStatus(destp);
+              FileStatus stat = null;
+              try {
+                stat = destFs.getFileStatus(destp);
+              } catch (FileNotFoundException e) {
+                // do nothing, leave stat = null;
+              }
               if (stat != null) {
+                LOG.info("Purging obsolete parity files for policy " + 
+                          info.getName() + " " + destp);
                 recursePurge(srcFs, destFs, destinationPrefix, stat);
               }
 

Modified: hadoop/hdfs/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java?rev=827928&r1=827927&r2=827928&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
(original)
+++ hadoop/hdfs/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
Wed Oct 21 08:57:18 2009
@@ -60,7 +60,6 @@
       "test-raid.xml").getAbsolutePath();
   final static long RELOAD_INTERVAL = 1000;
   final static Log LOG = LogFactory.getLog("org.apache.hadoop.raid.TestRaidNode");
-  final Random rand = new Random();
 
   {
     ((Log4JLogger)RaidNode.LOG).getLogger().setLevel(Level.ALL);



Mime
View raw message