Return-Path: X-Original-To: apmail-hadoop-mapreduce-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-mapreduce-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id A75F4D9C8 for ; Tue, 16 Oct 2012 00:05:26 +0000 (UTC) Received: (qmail 26221 invoked by uid 500); 16 Oct 2012 00:05:26 -0000 Delivered-To: apmail-hadoop-mapreduce-commits-archive@hadoop.apache.org Received: (qmail 26159 invoked by uid 500); 16 Oct 2012 00:05:26 -0000 Mailing-List: contact mapreduce-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: mapreduce-dev@hadoop.apache.org Delivered-To: mailing list mapreduce-commits@hadoop.apache.org Received: (qmail 26151 invoked by uid 99); 16 Oct 2012 00:05:26 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 16 Oct 2012 00:05:26 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 16 Oct 2012 00:05:24 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id D09CA2388C7D; Tue, 16 Oct 2012 00:03:59 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1398581 [9/9] - in /hadoop/common/branches/MR-3902/hadoop-mapreduce-project: ./ bin/ conf/ hadoop-mapreduce-client/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ hadoop-mapreduce-client/hadoop-... Date: Tue, 16 Oct 2012 00:03:53 -0000 To: mapreduce-commits@hadoop.apache.org From: sseth@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20121016000359.D09CA2388C7D@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestIndexUpdater.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestIndexUpdater.java?rev=1398581&r1=1398580&r2=1398581&view=diff ============================================================================== --- hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestIndexUpdater.java (original) +++ hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/contrib/index/src/test/org/apache/hadoop/contrib/index/mapred/TestIndexUpdater.java Tue Oct 16 00:02:55 2012 @@ -1,258 +1,258 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.contrib.index.mapred; - -import java.io.File; -import java.io.IOException; -import java.text.NumberFormat; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.contrib.index.lucene.FileSystemDirectory; +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.contrib.index.mapred; + +import java.io.File; +import java.io.IOException; +import java.text.NumberFormat; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.contrib.index.lucene.FileSystemDirectory; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.lucene.document.Document; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; -import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.Hits; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.Directory; - -import junit.framework.TestCase; - -public class TestIndexUpdater extends TestCase { - - private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance(); - static { - NUMBER_FORMAT.setMinimumIntegerDigits(5); - NUMBER_FORMAT.setGroupingUsed(false); - } - - // however, "we only allow 0 or 1 reducer in local mode" - from - // LocalJobRunner - private Configuration conf; - private Path localInputPath = new Path(System.getProperty("build.test") + "/sample/data.txt"); - private Path inputPath = new Path("/myexample/data.txt"); - private Path outputPath = new Path("/myoutput"); - private Path indexPath = new Path("/myindex"); - private int initNumShards = 3; - private int numMapTasks = 5; - - private int numDataNodes = 3; - private int numTaskTrackers = 3; - - private int numRuns = 3; - private int numDocsPerRun = 10; // num of docs in local input path - - private FileSystem fs; - private MiniDFSCluster dfsCluster; - private MiniMRCluster mrCluster; - - public TestIndexUpdater() throws IOException { - super(); - if (System.getProperty("hadoop.log.dir") == null) { - String base = new File(".").getPath(); // getAbsolutePath(); - System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs"); - } - conf = new Configuration(); +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.mapred.MiniMRCluster; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; +import org.apache.lucene.index.MultiReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Hits; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.store.Directory; + +import junit.framework.TestCase; + +public class TestIndexUpdater extends TestCase { + + private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance(); + static { + NUMBER_FORMAT.setMinimumIntegerDigits(5); + NUMBER_FORMAT.setGroupingUsed(false); + } + + // however, "we only allow 0 or 1 reducer in local mode" - from + // LocalJobRunner + private Configuration conf; + private Path localInputPath = new Path(System.getProperty("build.test") + "/sample/data.txt"); + private Path inputPath = new Path("/myexample/data.txt"); + private Path outputPath = new Path("/myoutput"); + private Path indexPath = new Path("/myindex"); + private int initNumShards = 3; + private int numMapTasks = 5; + + private int numDataNodes = 3; + private int numTaskTrackers = 3; + + private int numRuns = 3; + private int numDocsPerRun = 10; // num of docs in local input path + + private FileSystem fs; + private MiniDFSCluster dfsCluster; + private MiniMRCluster mrCluster; + + public TestIndexUpdater() throws IOException { + super(); + if (System.getProperty("hadoop.log.dir") == null) { + String base = new File(".").getPath(); // getAbsolutePath(); + System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs"); + } + conf = new Configuration(); //See MAPREDUCE-947 for more details. Setting to false prevents the creation of _SUCCESS. conf.setBoolean("mapreduce.fileoutputcommitter.marksuccessfuljobs", false); - } - - protected void setUp() throws Exception { - super.setUp(); - try { - dfsCluster = - new MiniDFSCluster(conf, numDataNodes, true, (String[]) null); - - fs = dfsCluster.getFileSystem(); - if (fs.exists(inputPath)) { - fs.delete(inputPath, true); - } - fs.copyFromLocalFile(localInputPath, inputPath); - - if (fs.exists(outputPath)) { - // do not create, mapred will create - fs.delete(outputPath, true); - } - - if (fs.exists(indexPath)) { - fs.delete(indexPath, true); - } - - mrCluster = - new MiniMRCluster(numTaskTrackers, fs.getUri().toString(), 1); - - } catch (IOException e) { - if (dfsCluster != null) { - dfsCluster.shutdown(); - dfsCluster = null; - } - - if (fs != null) { - fs.close(); - fs = null; - } - - if (mrCluster != null) { - mrCluster.shutdown(); - mrCluster = null; - } - - throw e; - } - - } - - protected void tearDown() throws Exception { - if (dfsCluster != null) { - dfsCluster.shutdown(); - dfsCluster = null; - } - - if (fs != null) { - fs.close(); - fs = null; - } - - if (mrCluster != null) { - mrCluster.shutdown(); - mrCluster = null; - } - - super.tearDown(); - } - - public void testIndexUpdater() throws IOException { - IndexUpdateConfiguration iconf = new IndexUpdateConfiguration(conf); - // max field length, compound file and number of segments will be checked - // later - iconf.setIndexMaxFieldLength(2); - iconf.setIndexUseCompoundFile(true); - iconf.setIndexMaxNumSegments(1); - iconf.setMaxRAMSizeInBytes(20480); - - long versionNumber = -1; - long generation = -1; - - for (int i = 0; i < numRuns; i++) { - if (fs.exists(outputPath)) { - fs.delete(outputPath, true); - } - - Shard[] shards = new Shard[initNumShards + i]; - for (int j = 0; j < shards.length; j++) { - shards[j] = - new Shard(versionNumber, new Path(indexPath, - NUMBER_FORMAT.format(j)).toString(), generation); - } - run(i + 1, shards); - } - } - - private void run(int numRuns, Shard[] shards) throws IOException { - IIndexUpdater updater = new IndexUpdater(); - updater.run(conf, new Path[] { inputPath }, outputPath, numMapTasks, - shards); - - // verify the done files - Path[] doneFileNames = new Path[shards.length]; - int count = 0; - FileStatus[] fileStatus = fs.listStatus(outputPath); - for (int i = 0; i < fileStatus.length; i++) { - FileStatus[] doneFiles = fs.listStatus(fileStatus[i].getPath()); - for (int j = 0; j < doneFiles.length; j++) { - doneFileNames[count++] = doneFiles[j].getPath(); - } - } - assertEquals(shards.length, count); - for (int i = 0; i < count; i++) { - assertTrue(doneFileNames[i].getName().startsWith( - IndexUpdateReducer.DONE.toString())); - } - - // verify the index - IndexReader[] readers = new IndexReader[shards.length]; - for (int i = 0; i < shards.length; i++) { - Directory dir = - new FileSystemDirectory(fs, new Path(shards[i].getDirectory()), - false, conf); - readers[i] = IndexReader.open(dir); - } - - IndexReader reader = new MultiReader(readers); - IndexSearcher searcher = new IndexSearcher(reader); - Hits hits = searcher.search(new TermQuery(new Term("content", "apache"))); - - assertEquals(numRuns * numDocsPerRun, hits.length()); - - int[] counts = new int[numDocsPerRun]; - for (int i = 0; i < hits.length(); i++) { - Document doc = hits.doc(i); - counts[Integer.parseInt(doc.get("id"))]++; - } - - for (int i = 0; i < numDocsPerRun; i++) { - assertEquals(numRuns, counts[i]); - } - - // max field length is 2, so "dot" is also indexed but not "org" - hits = searcher.search(new TermQuery(new Term("content", "dot"))); - assertEquals(numRuns, hits.length()); - - hits = searcher.search(new TermQuery(new Term("content", "org"))); - assertEquals(0, hits.length()); - - searcher.close(); - reader.close(); - - // open and close an index writer with KeepOnlyLastCommitDeletionPolicy - // to remove earlier checkpoints - for (int i = 0; i < shards.length; i++) { - Directory dir = - new FileSystemDirectory(fs, new Path(shards[i].getDirectory()), - false, conf); - IndexWriter writer = - new IndexWriter(dir, false, null, - new KeepOnlyLastCommitDeletionPolicy()); - writer.close(); - } - - // verify the number of segments, must be done after an writer with - // KeepOnlyLastCommitDeletionPolicy so that earlier checkpoints are removed - for (int i = 0; i < shards.length; i++) { - PathFilter cfsFilter = new PathFilter() { - public boolean accept(Path path) { - return path.getName().endsWith(".cfs"); - } - }; - FileStatus[] cfsFiles = - fs.listStatus(new Path(shards[i].getDirectory()), cfsFilter); - assertEquals(1, cfsFiles.length); - } - } - -} + } + + protected void setUp() throws Exception { + super.setUp(); + try { + dfsCluster = + new MiniDFSCluster(conf, numDataNodes, true, (String[]) null); + + fs = dfsCluster.getFileSystem(); + if (fs.exists(inputPath)) { + fs.delete(inputPath, true); + } + fs.copyFromLocalFile(localInputPath, inputPath); + + if (fs.exists(outputPath)) { + // do not create, mapred will create + fs.delete(outputPath, true); + } + + if (fs.exists(indexPath)) { + fs.delete(indexPath, true); + } + + mrCluster = + new MiniMRCluster(numTaskTrackers, fs.getUri().toString(), 1); + + } catch (IOException e) { + if (dfsCluster != null) { + dfsCluster.shutdown(); + dfsCluster = null; + } + + if (fs != null) { + fs.close(); + fs = null; + } + + if (mrCluster != null) { + mrCluster.shutdown(); + mrCluster = null; + } + + throw e; + } + + } + + protected void tearDown() throws Exception { + if (dfsCluster != null) { + dfsCluster.shutdown(); + dfsCluster = null; + } + + if (fs != null) { + fs.close(); + fs = null; + } + + if (mrCluster != null) { + mrCluster.shutdown(); + mrCluster = null; + } + + super.tearDown(); + } + + public void testIndexUpdater() throws IOException { + IndexUpdateConfiguration iconf = new IndexUpdateConfiguration(conf); + // max field length, compound file and number of segments will be checked + // later + iconf.setIndexMaxFieldLength(2); + iconf.setIndexUseCompoundFile(true); + iconf.setIndexMaxNumSegments(1); + iconf.setMaxRAMSizeInBytes(20480); + + long versionNumber = -1; + long generation = -1; + + for (int i = 0; i < numRuns; i++) { + if (fs.exists(outputPath)) { + fs.delete(outputPath, true); + } + + Shard[] shards = new Shard[initNumShards + i]; + for (int j = 0; j < shards.length; j++) { + shards[j] = + new Shard(versionNumber, new Path(indexPath, + NUMBER_FORMAT.format(j)).toString(), generation); + } + run(i + 1, shards); + } + } + + private void run(int numRuns, Shard[] shards) throws IOException { + IIndexUpdater updater = new IndexUpdater(); + updater.run(conf, new Path[] { inputPath }, outputPath, numMapTasks, + shards); + + // verify the done files + Path[] doneFileNames = new Path[shards.length]; + int count = 0; + FileStatus[] fileStatus = fs.listStatus(outputPath); + for (int i = 0; i < fileStatus.length; i++) { + FileStatus[] doneFiles = fs.listStatus(fileStatus[i].getPath()); + for (int j = 0; j < doneFiles.length; j++) { + doneFileNames[count++] = doneFiles[j].getPath(); + } + } + assertEquals(shards.length, count); + for (int i = 0; i < count; i++) { + assertTrue(doneFileNames[i].getName().startsWith( + IndexUpdateReducer.DONE.toString())); + } + + // verify the index + IndexReader[] readers = new IndexReader[shards.length]; + for (int i = 0; i < shards.length; i++) { + Directory dir = + new FileSystemDirectory(fs, new Path(shards[i].getDirectory()), + false, conf); + readers[i] = IndexReader.open(dir); + } + + IndexReader reader = new MultiReader(readers); + IndexSearcher searcher = new IndexSearcher(reader); + Hits hits = searcher.search(new TermQuery(new Term("content", "apache"))); + + assertEquals(numRuns * numDocsPerRun, hits.length()); + + int[] counts = new int[numDocsPerRun]; + for (int i = 0; i < hits.length(); i++) { + Document doc = hits.doc(i); + counts[Integer.parseInt(doc.get("id"))]++; + } + + for (int i = 0; i < numDocsPerRun; i++) { + assertEquals(numRuns, counts[i]); + } + + // max field length is 2, so "dot" is also indexed but not "org" + hits = searcher.search(new TermQuery(new Term("content", "dot"))); + assertEquals(numRuns, hits.length()); + + hits = searcher.search(new TermQuery(new Term("content", "org"))); + assertEquals(0, hits.length()); + + searcher.close(); + reader.close(); + + // open and close an index writer with KeepOnlyLastCommitDeletionPolicy + // to remove earlier checkpoints + for (int i = 0; i < shards.length; i++) { + Directory dir = + new FileSystemDirectory(fs, new Path(shards[i].getDirectory()), + false, conf); + IndexWriter writer = + new IndexWriter(dir, false, null, + new KeepOnlyLastCommitDeletionPolicy()); + writer.close(); + } + + // verify the number of segments, must be done after an writer with + // KeepOnlyLastCommitDeletionPolicy so that earlier checkpoints are removed + for (int i = 0; i < shards.length; i++) { + PathFilter cfsFilter = new PathFilter() { + public boolean accept(Path path) { + return path.getName().endsWith(".cfs"); + } + }; + FileStatus[] cfsFiles = + fs.listStatus(new Path(shards[i].getDirectory()), cfsFilter); + assertEquals(1, cfsFiles.length); + } + } + +} Propchange: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/contrib/vaidya/ ------------------------------------------------------------------------------ Merged /hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/contrib/vaidya:r1363593-1396941 Propchange: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/examples/ ------------------------------------------------------------------------------ Merged /hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/examples:r1363593-1396941 Propchange: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/java:r1363593-1396941 Modified: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/java/mapred-default.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/java/mapred-default.xml?rev=1398581&r1=1398580&r2=1398581&view=diff ============================================================================== --- hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/java/mapred-default.xml (original) +++ hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/java/mapred-default.xml Tue Oct 16 00:02:55 2012 @@ -701,7 +701,7 @@ mapreduce.job.maxtaskfailures.per.tracker - 4 + 3 The number of task-failures on a tasktracker of a given job after which new tasks of that job aren't assigned to it. Propchange: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/ ------------------------------------------------------------------------------ Merged /hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/test/mapred:r1363593-1396941 Propchange: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/ ------------------------------------------------------------------------------ Merged /hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs:r1363593-1396941 Propchange: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs:r1363593-1396941 Propchange: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/ ------------------------------------------------------------------------------ Merged /hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc:r1363593-1396941 Modified: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java?rev=1398581&r1=1398580&r2=1398581&view=diff ============================================================================== --- hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java (original) +++ hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java Tue Oct 16 00:02:55 2012 @@ -98,7 +98,7 @@ public class TestJobQueueInformation ext dfsCluster.shutdown(); } - public void testJobQueues() throws IOException { + public void testJobQueues() throws Exception { JobClient jc = new JobClient(mrCluster.createJobConf()); String expectedQueueInfo = "Maximum Tasks Per Job :: 10"; JobQueueInfo[] queueInfos = jc.getQueues(); Modified: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java?rev=1398581&r1=1398580&r2=1398581&view=diff ============================================================================== --- hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java (original) +++ hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java Tue Oct 16 00:02:55 2012 @@ -149,7 +149,7 @@ public class TestSetupAndCleanupFailure private void testSetupAndCleanupKill(MiniMRCluster mr, MiniDFSCluster dfs, boolean commandLineKill) - throws IOException { + throws Exception { // launch job with waiting setup/cleanup RunningJob job = launchJobWithWaitingSetupAndCleanup(mr); @@ -223,7 +223,7 @@ public class TestSetupAndCleanupFailure // Also Tests the command-line kill for setup/cleanup attempts. // tests the setup/cleanup attempts getting killed if // they were running on a lost tracker - public void testWithDFS() throws IOException { + public void testWithDFS() throws Exception { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; Modified: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java?rev=1398581&r1=1398580&r2=1398581&view=diff ============================================================================== --- hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java (original) +++ hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java Tue Oct 16 00:02:55 2012 @@ -449,7 +449,7 @@ public class UtilsForTests { static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, String mapSignalFile, String reduceSignalFile, int replication) - throws IOException { + throws Exception { writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile), (short)replication); writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), @@ -462,7 +462,7 @@ public class UtilsForTests { static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, boolean isMap, String mapSignalFile, String reduceSignalFile) - throws IOException { + throws Exception { // signal the maps to complete writeFile(dfs.getNameNode(), fileSys.getConf(), isMap @@ -483,7 +483,7 @@ public class UtilsForTests { } static void writeFile(NameNode namenode, Configuration conf, Path name, - short replication) throws IOException { + short replication) throws Exception { FileSystem fileSys = FileSystem.get(conf); SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf, name, Propchange: hadoop/common/branches/MR-3902/hadoop-mapreduce-project/src/webapps/job/ ------------------------------------------------------------------------------ Merged /hadoop/common/branches/HDFS-3077/hadoop-mapreduce-project/src/webapps/job:r1363593-1396941