Return-Path: Delivered-To: apmail-lucene-hadoop-commits-archive@locus.apache.org Received: (qmail 97532 invoked from network); 8 Nov 2006 23:00:44 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.2) by minotaur.apache.org with SMTP; 8 Nov 2006 23:00:44 -0000 Received: (qmail 1443 invoked by uid 500); 8 Nov 2006 23:00:56 -0000 Delivered-To: apmail-lucene-hadoop-commits-archive@lucene.apache.org Received: (qmail 1364 invoked by uid 500); 8 Nov 2006 23:00:55 -0000 Mailing-List: contact hadoop-commits-help@lucene.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hadoop-dev@lucene.apache.org Delivered-To: mailing list hadoop-commits@lucene.apache.org Received: (qmail 1355 invoked by uid 99); 8 Nov 2006 23:00:55 -0000 Received: from herse.apache.org (HELO herse.apache.org) (140.211.11.133) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 08 Nov 2006 15:00:55 -0800 X-ASF-Spam-Status: No, hits=-8.6 required=10.0 tests=ALL_TRUSTED,INFO_TLD,NO_REAL_NAME X-Spam-Check-By: apache.org Received: from [140.211.11.3] (HELO eris.apache.org) (140.211.11.3) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 08 Nov 2006 15:00:25 -0800 Received: by eris.apache.org (Postfix, from userid 65534) id 9D8551A984D; Wed, 8 Nov 2006 14:59:51 -0800 (PST) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r472681 - in /lucene/hadoop/trunk: CHANGES.txt src/java/org/apache/hadoop/dfs/FSDataset.java Date: Wed, 08 Nov 2006 22:59:51 -0000 To: hadoop-commits@lucene.apache.org From: cutting@apache.org X-Mailer: svnmailer-1.1.0 Message-Id: <20061108225951.9D8551A984D@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: cutting Date: Wed Nov 8 14:59:50 2006 New Revision: 472681 URL: http://svn.apache.org/viewvc?view=rev&rev=472681 Log: HADOOP-604. Fix some synchronization issues and a NullPointerException in DFS datanode. Contributed by Raghu. Modified: lucene/hadoop/trunk/CHANGES.txt lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java Modified: lucene/hadoop/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=472681&r1=472680&r2=472681 ============================================================================== --- lucene/hadoop/trunk/CHANGES.txt (original) +++ lucene/hadoop/trunk/CHANGES.txt Wed Nov 8 14:59:50 2006 @@ -25,6 +25,9 @@ 7. HADOOP-382. Extend unit tests to run multiple datanodes. (Milind Bhandarkar via cutting) + 8. HADOOP-604. Fix some synchronization issues and a + NullPointerException in DFS datanode. (Raghu Angadi via cutting) + Release 0.8.0 - 2006-11-03 Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java?view=diff&rev=472681&r1=472680&r2=472681 ============================================================================== --- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java (original) +++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java Wed Nov 8 14:59:50 2006 @@ -34,7 +34,7 @@ ***************************************************/ class FSDataset implements FSConstants { - + /** * A node type that can be built into a tree reflecting the * hierarchy of blocks on the local disk. @@ -274,7 +274,7 @@ this.volumes = volumes; } - FSVolume getNextVolume(long blockSize) throws IOException { + synchronized FSVolume getNextVolume(long blockSize) throws IOException { int startVolume = curVolume; while (true) { FSVolume volume = volumes[curVolume]; @@ -302,25 +302,25 @@ return remaining; } - void getBlockInfo(TreeSet blockSet) { + synchronized void getBlockInfo(TreeSet blockSet) { for (int idx = 0; idx < volumes.length; idx++) { volumes[idx].getBlockInfo(blockSet); } } - void getVolumeMap(HashMap volumeMap) { + synchronized void getVolumeMap(HashMap volumeMap) { for (int idx = 0; idx < volumes.length; idx++) { volumes[idx].getVolumeMap(volumeMap); } } - void getBlockMap(HashMap blockMap) { + synchronized void getBlockMap(HashMap blockMap) { for (int idx = 0; idx < volumes.length; idx++) { volumes[idx].getBlockMap(blockMap); } } - void checkDirs() throws DiskErrorException { + synchronized void checkDirs() throws DiskErrorException { for (int idx = 0; idx < volumes.length; idx++) { volumes[idx].checkDirs(); } @@ -391,10 +391,11 @@ /** * Get a stream of data from the indicated block. */ - public InputStream getBlockData(Block b) throws IOException { + public synchronized InputStream getBlockData(Block b) throws IOException { if (! isValidBlock(b)) { throw new IOException("Block " + b + " is not valid."); } + // File should be opened with the lock. return new FileInputStream(getFile(b)); } @@ -414,7 +415,7 @@ // Serialize access to /tmp, and check if file already there. // File f = null; - synchronized (ongoingCreates) { + synchronized ( this ) { // // Is it already in the create process? // @@ -422,11 +423,12 @@ throw new IOException("Block " + b + " has already been started (though not completed), and thus cannot be created."); } - - FSVolume v = volumes.getNextVolume(blockSize); - - // create temporary file to hold block in the designated volume - f = v.createTmpFile(b); + FSVolume v = null; + synchronized ( volumes ) { + v = volumes.getNextVolume(blockSize); + // create temporary file to hold block in the designated volume + f = v.createTmpFile(b); + } ongoingCreates.put(b, f); volumeMap.put(b, v); } @@ -450,8 +452,7 @@ /** * Complete the block write! */ - public void finalizeBlock(Block b) throws IOException { - synchronized (ongoingCreates) { + public synchronized void finalizeBlock(Block b) throws IOException { File f = ongoingCreates.get(b); if (f == null || ! f.exists()) { throw new IOException("No temporary file " + f + " for block " + b); @@ -460,10 +461,12 @@ b.setNumBytes(finalLen); FSVolume v = volumeMap.get(b); - File dest = v.addBlock(b, f); + File dest = null; + synchronized ( volumes ) { + dest = v.addBlock(b, f); + } blockMap.put(b, dest); ongoingCreates.remove(b); - } } /** @@ -495,12 +498,15 @@ */ public void invalidate(Block invalidBlks[]) throws IOException { for (int i = 0; i < invalidBlks.length; i++) { - File f = getFile(invalidBlks[i]); - if (!f.delete()) { - throw new IOException("Unexpected error trying to delete block " - + invalidBlks[i] + " at file " + f); - } - blockMap.remove(invalidBlks[i]); + synchronized ( this ) { + File f = getFile(invalidBlks[i]); + if (!f.delete()) { + throw new IOException("Unexpected error trying to delete block " + + invalidBlks[i] + " at file " + f); + } + blockMap.remove(invalidBlks[i]); + volumeMap.remove(invalidBlks[i]); + } DataNode.LOG.info("Deleting block " + invalidBlks[i]); } } @@ -508,7 +514,7 @@ /** * Turn the block identifier into a filename. */ - File getFile(Block b) { + synchronized File getFile(Block b) { return blockMap.get(b); }