Return-Path: Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: (qmail 91642 invoked from network); 23 Jun 2009 18:39:06 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.3) by minotaur.apache.org with SMTP; 23 Jun 2009 18:39:06 -0000 Received: (qmail 40763 invoked by uid 500); 23 Jun 2009 18:39:17 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 40746 invoked by uid 500); 23 Jun 2009 18:39:17 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 40710 invoked by uid 99); 23 Jun 2009 18:39:15 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 23 Jun 2009 18:39:15 +0000 X-ASF-Spam-Status: No, hits=-1998.9 required=10.0 tests=ALL_TRUSTED,FB_GET_MEDS X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 23 Jun 2009 18:39:05 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 0E11B23888E6; Tue, 23 Jun 2009 18:38:44 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r787781 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ Date: Tue, 23 Jun 2009 18:38:43 -0000 To: hdfs-commits@hadoop.apache.org From: rangadi@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20090623183844.0E11B23888E6@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: rangadi Date: Tue Jun 23 18:38:42 2009 New Revision: 787781 URL: http://svn.apache.org/viewvc?rev=787781&view=rev Log: HDFS-396. NameNode image and edits directories are specified as URIs. (Luca Telloli via rangadi) Modified: hadoop/hdfs/trunk/CHANGES.txt hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Modified: hadoop/hdfs/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/CHANGES.txt (original) +++ hadoop/hdfs/trunk/CHANGES.txt Tue Jun 23 18:38:42 2009 @@ -12,6 +12,9 @@ HDFS-381. Remove blocks from DataNode maps when corresponding file is deleted. (Suresh Srinivas via rangadi) + HDFS-396. NameNode image and edits directories are specified as URIs. + (Luca Telloli via rangadi) + BUG FIXES HDFS-76. Better error message to users when commands fail because of lack of quota. Allow quota to be set even if the limit is lower than Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java Tue Jun 23 18:38:42 2009 @@ -20,6 +20,7 @@ import java.io.DataInputStream; import java.io.File; import java.io.IOException; +import java.net.URI; import java.util.Collection; import java.util.Iterator; @@ -66,12 +67,12 @@ * Read VERSION and fstime files if exist.
* Do not load image or edits. * - * @param imageDirs list of image directories. - * @param editsDirs list of edits directories. + * @param imageDirs list of image directories as URI. + * @param editsDirs list of edits directories URI. * @throws IOException if the node should shutdown. */ - void recoverCreateRead(Collection imageDirs, - Collection editsDirs) throws IOException { + void recoverCreateRead(Collection imageDirs, + Collection editsDirs) throws IOException { setStorageDirectories(imageDirs, editsDirs); this.checkpointTime = 0L; for(Iterator it = dirIterator(); it.hasNext();) { Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Tue Jun 23 18:38:42 2009 @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.*; +import java.net.URI; import java.util.*; import org.apache.hadoop.conf.Configuration; @@ -85,8 +86,8 @@ directoryMetrics.setTag("sessionId", conf.get("session.id")); } - void loadFSImage(Collection dataDirs, - Collection editsDirs, + void loadFSImage(Collection dataDirs, + Collection editsDirs, StartupOption startOpt) throws IOException { // format before starting up if requested if (startOpt == StartupOption.FORMAT) { Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Tue Jun 23 18:38:42 2009 @@ -28,6 +28,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; +import java.net.URI; import java.nio.ByteBuffer; import java.text.SimpleDateFormat; import java.util.ArrayList; @@ -57,6 +58,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; +import org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType; import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; @@ -127,12 +129,13 @@ * list of failed (and thus removed) storages */ protected List removedStorageDirs = new ArrayList(); - + /** - * Directories for importing an image from a checkpoint. + * URIs for importing an image from a checkpoint. In the default case, + * URIs will represent directories. */ - private Collection checkpointDirs; - private Collection checkpointEditsDirs; + private Collection checkpointDirs; + private Collection checkpointEditsDirs; /** * Can fs-image be rolled? @@ -158,8 +161,10 @@ } /** + * @throws IOException */ - FSImage(Collection fsDirs, Collection fsEditsDirs) { + FSImage(Collection fsDirs, Collection fsEditsDirs) + throws IOException { this(); setStorageDirectories(fsDirs, fsEditsDirs); } @@ -170,11 +175,12 @@ /** * Represents an Image (image and edit file). + * @throws IOException */ - FSImage(File imageDir) { + FSImage(URI imageDir) throws IOException { this(); - ArrayList dirs = new ArrayList(1); - ArrayList editsDirs = new ArrayList(1); + ArrayList dirs = new ArrayList(1); + ArrayList editsDirs = new ArrayList(1); dirs.add(imageDir); editsDirs.add(imageDir); setStorageDirectories(dirs, editsDirs); @@ -197,14 +203,16 @@ return restoreFailedStorage; } - void setStorageDirectories(Collection fsNameDirs, - Collection fsEditsDirs) { + void setStorageDirectories(Collection fsNameDirs, + Collection fsEditsDirs) throws IOException { this.storageDirs = new ArrayList(); this.removedStorageDirs = new ArrayList(); + // Add all name dirs with appropriate NameNodeDirType - for (File dirName : fsNameDirs) { + for (URI dirName : fsNameDirs) { + checkSchemeConsistency(dirName); boolean isAlsoEdits = false; - for (File editsDirName : fsEditsDirs) { + for (URI editsDirName : fsEditsDirs) { if (editsDirName.compareTo(dirName) == 0) { isAlsoEdits = true; fsEditsDirs.remove(editsDirName); @@ -214,18 +222,49 @@ NameNodeDirType dirType = (isAlsoEdits) ? NameNodeDirType.IMAGE_AND_EDITS : NameNodeDirType.IMAGE; - this.addStorageDir(new StorageDirectory(dirName, dirType)); + // Add to the list of storage directories, only if the + // URI is of type file:// + if(dirName.getScheme().compareTo(JournalType.FILE.name().toLowerCase()) + == 0){ + this.addStorageDir(new StorageDirectory(new File(dirName.getPath()), + dirType)); + } } // Add edits dirs if they are different from name dirs - for (File dirName : fsEditsDirs) { - this.addStorageDir(new StorageDirectory(dirName, + for (URI dirName : fsEditsDirs) { + checkSchemeConsistency(dirName); + // Add to the list of storage directories, only if the + // URI is of type file:// + if(dirName.getScheme().compareTo(JournalType.FILE.name().toLowerCase()) + == 0) + this.addStorageDir(new StorageDirectory(new File(dirName.getPath()), NameNodeDirType.EDITS)); } } - void setCheckpointDirectories(Collection dirs, - Collection editsDirs) { + /* + * Checks the consistency of a URI, in particular if the scheme + * is specified and is supported by a concrete implementation + */ + static void checkSchemeConsistency(URI u) throws IOException { + String scheme = u.getScheme(); + // the URI should have a proper scheme + if(scheme == null) + throw new IOException("Undefined scheme for " + u); + else { + try { + // the scheme should be enumerated as JournalType + JournalType.valueOf(scheme.toUpperCase()); + } catch (IllegalArgumentException iae){ + throw new IOException("Unknown scheme " + scheme + + ". It should correspond to a JournalType enumeration value"); + } + } + }; + + void setCheckpointDirectories(Collection dirs, + Collection editsDirs) { checkpointDirs = dirs; checkpointEditsDirs = editsDirs; } @@ -235,7 +274,7 @@ } List getRemovedStorageDirs() { - return this.removedStorageDirs; + return this.removedStorageDirs; } File getEditFile(StorageDirectory sd) { @@ -256,21 +295,39 @@ return list; } - Collection getDirectories(NameNodeDirType dirType) { - ArrayList list = new ArrayList(); + Collection getDirectories(NameNodeDirType dirType) + throws IOException { + ArrayList list = new ArrayList(); Iterator it = (dirType == null) ? dirIterator() : dirIterator(dirType); for ( ;it.hasNext(); ) { - list.add(it.next().getRoot()); + StorageDirectory sd = it.next(); + try { + list.add(new URI("file://" + sd.getRoot().getAbsolutePath())); + } catch (Exception e) { + throw new IOException("Exception while processing " + + "StorageDirectory " + sd.getRoot().getAbsolutePath() + ". The" + + " full error message is " + e.getMessage()); + } } return list; } - Collection getImageDirectories() { + /** + * Retrieve current directories of type IMAGE + * @return Collection of URI representing image directories + * @throws IOException in case of URI processing error + */ + Collection getImageDirectories() throws IOException { return getDirectories(NameNodeDirType.IMAGE); } - Collection getEditsDirectories() { + /** + * Retrieve current directories of type EDITS + * @return Collection of URI representing edits directories + * @throws IOException in case of URI processing error + */ + Collection getEditsDirectories() throws IOException { return getDirectories(NameNodeDirType.EDITS); } @@ -300,8 +357,8 @@ * @throws IOException * @return true if the image needs to be saved or false otherwise */ - boolean recoverTransitionRead(Collection dataDirs, - Collection editsDirs, + boolean recoverTransitionRead(Collection dataDirs, + Collection editsDirs, StartupOption startOpt ) throws IOException { assert startOpt != StartupOption.FORMAT : @@ -740,7 +797,7 @@ } } } - // if there are some edit log streams to remove + // if there are some edit log streams to remove if(propagate && al != null) editLog.processIOError(al, false); @@ -1667,7 +1724,7 @@ return; //nothing to restore LOG.info("FSImage.attemptRestoreRemovedStorage: check removed(failed) " + - "storarge. removedStorages size = " + removedStorageDirs.size()); + "storarge. removedStorages size = " + removedStorageDirs.size()); for(Iterator it = this.removedStorageDirs.iterator(); it.hasNext();) { StorageDirectory sd = it.next(); File root = sd.getRoot(); @@ -1823,31 +1880,63 @@ + FSConstants.LAYOUT_VERSION + " is initialized."); } - static Collection getCheckpointDirs(Configuration conf, - String defaultName) { + /** + * Retrieve checkpoint dirs from configuration. + * + * @param conf, the Configuration + * @param defaultValue, a default value for the attribute, if null + * @return a Collection of URIs representing the values in + * fs.checkpoint.dir configuration property + */ + static Collection getCheckpointDirs(Configuration conf, + String defaultValue) { Collection dirNames = conf.getStringCollection("fs.checkpoint.dir"); - if (dirNames.size() == 0 && defaultName != null) { - dirNames.add(defaultName); + if (dirNames.size() == 0 && defaultValue != null) { + dirNames.add(defaultValue); } - Collection dirs = new ArrayList(dirNames.size()); + Collection dirs = new ArrayList(dirNames.size()); for(String name : dirNames) { - dirs.add(new File(name)); + try { + // process value as URI + URI u = new URI(name); + // if scheme is undefined, then assume it's file:// + if(u.getScheme() == null) + u = new URI("file://" + new File(name).getAbsolutePath()); + // check that scheme is not null (trivial) and supported + checkSchemeConsistency(u); + dirs.add(u); + } catch (Exception e) { + LOG.error("Error while processing URI: " + name + + ". The error message was: " + e.getMessage()); + } } return dirs; } - static Collection getCheckpointEditsDirs(Configuration conf, - String defaultName) { + static Collection getCheckpointEditsDirs(Configuration conf, + String defaultName) { Collection dirNames = - conf.getStringCollection("fs.checkpoint.edits.dir"); - if (dirNames.size() == 0 && defaultName != null) { - dirNames.add(defaultName); - } - Collection dirs = new ArrayList(dirNames.size()); - for(String name : dirNames) { - dirs.add(new File(name)); - } - return dirs; + conf.getStringCollection("fs.checkpoint.edits.dir"); + if (dirNames.size() == 0 && defaultName != null) { + dirNames.add(defaultName); + } + Collection dirs = new ArrayList(dirNames.size()); + for(String name : dirNames) { + try { + // process value as URI + URI u = new URI(name); + // if scheme is undefined, then assume it's file:// + if(u.getScheme() == null) + u = new URI("file://" + new File(name).getAbsolutePath()); + // check that scheme is not null (trivial) and supported + checkSchemeConsistency(u); + dirs.add(u); + } catch (Exception e) { + LOG.error("Error while processing URI: " + name + + ". The error message was: " + e.getMessage()); + } + } + return dirs; } static private final DeprecatedUTF8 U_STR = new DeprecatedUTF8(); Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Jun 23 18:38:42 2009 @@ -69,6 +69,7 @@ import java.io.PrintWriter; import java.io.DataOutputStream; import java.net.InetAddress; +import java.net.URI; import java.util.*; import java.util.Map.Entry; @@ -301,11 +302,11 @@ } } - public static Collection getNamespaceDirs(Configuration conf) { + public static Collection getNamespaceDirs(Configuration conf) { return getStorageDirs(conf, "dfs.name.dir"); } - public static Collection getStorageDirs(Configuration conf, + public static Collection getStorageDirs(Configuration conf, String propertyName) { Collection dirNames = conf.getStringCollection(propertyName); StartupOption startOpt = NameNode.getStartupOption(conf); @@ -331,14 +332,28 @@ "of the file system meta-data."); } else if (dirNames.isEmpty()) dirNames.add("/tmp/hadoop/dfs/name"); - Collection dirs = new ArrayList(dirNames.size()); + Collection dirs = new ArrayList(dirNames.size()); for(String name : dirNames) { - dirs.add(new File(name)); + try { + URI u = new URI(name); + // If the scheme was not declared, default to file:// + // and use the absolute path of the file, then warn the user + if(u.getScheme() == null) { + u = new URI("file://" + new File(name).getAbsolutePath()); + LOG.warn("Scheme is undefined for " + name); + LOG.warn("Please check your file system configuration in " + + "hdfs-site.xml"); + } + dirs.add(u); + } catch (Exception e) { + LOG.error("Error while processing URI: " + name + + ". The error message was: " + e.getMessage()); + } } return dirs; } - public static Collection getNamespaceEditsDirs(Configuration conf) { + public static Collection getNamespaceEditsDirs(Configuration conf) { return getStorageDirs(conf, "dfs.name.edits.dir"); } @@ -3687,7 +3702,7 @@ boolean regAllowed = getEditLog().checkBackupRegistration(registration); if(!regAllowed) throw new IOException("Registration is not allowed. " + - "Another node is registered as a backup."); + "Another node is registered as a backup."); } /** Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Jun 23 18:38:42 2009 @@ -1030,11 +1030,11 @@ private static boolean format(Configuration conf, boolean isConfirmationNeeded ) throws IOException { - Collection dirsToFormat = FSNamesystem.getNamespaceDirs(conf); - Collection editDirsToFormat = + Collection dirsToFormat = FSNamesystem.getNamespaceDirs(conf); + Collection editDirsToFormat = FSNamesystem.getNamespaceEditsDirs(conf); - for(Iterator it = dirsToFormat.iterator(); it.hasNext();) { - File curDir = it.next(); + for(Iterator it = dirsToFormat.iterator(); it.hasNext();) { + File curDir = new File(it.next().getPath()); if (!curDir.exists()) continue; if (isConfirmationNeeded) { @@ -1056,8 +1056,8 @@ private static boolean finalize(Configuration conf, boolean isConfirmationNeeded ) throws IOException { - Collection dirsToFormat = FSNamesystem.getNamespaceDirs(conf); - Collection editDirsToFormat = + Collection dirsToFormat = FSNamesystem.getNamespaceDirs(conf); + Collection editDirsToFormat = FSNamesystem.getNamespaceEditsDirs(conf); FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat, editDirsToFormat), conf); Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Tue Jun 23 18:38:42 2009 @@ -79,8 +79,8 @@ private int infoPort; private String infoBindAddress; - private Collection checkpointDirs; - private Collection checkpointEditsDirs; + private Collection checkpointDirs; + private Collection checkpointEditsDirs; private long checkpointPeriod; // in seconds private long checkpointSize; // size (in MB) of current Edit Log @@ -489,10 +489,10 @@ * @param editsDirs * @throws IOException */ - void recoverCreate(Collection dataDirs, - Collection editsDirs) throws IOException { - Collection tempDataDirs = new ArrayList(dataDirs); - Collection tempEditsDirs = new ArrayList(editsDirs); + void recoverCreate(Collection dataDirs, + Collection editsDirs) throws IOException { + Collection tempDataDirs = new ArrayList(dataDirs); + Collection tempEditsDirs = new ArrayList(editsDirs); this.storageDirs = new ArrayList(); setStorageDirectories(tempDataDirs, tempEditsDirs); for (Iterator it = Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Jun 23 18:38:42 2009 @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.net.URI; import java.util.ArrayList; import java.util.Collection; import java.nio.channels.FileChannel; @@ -704,14 +705,14 @@ /** * Get the directories where the namenode stores its image. */ - public Collection getNameDirs() { + public Collection getNameDirs() { return FSNamesystem.getNamespaceDirs(conf); } /** * Get the directories where the namenode stores its edits. */ - public Collection getNameEditsDirs() { + public Collection getNameEditsDirs() { return FSNamesystem.getNamespaceEditsDirs(conf); } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Tue Jun 23 18:38:42 2009 @@ -19,6 +19,8 @@ import java.io.File; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; @@ -52,7 +54,7 @@ public class CreateEditsLog { static final String BASE_PATH = "/createdViaInjectingInEditsLog"; static final String EDITS_DIR = "/tmp/EditsLogOut"; - static String edits_dir = EDITS_DIR; + static String edits_dir = "file:// " + EDITS_DIR; // process as URI static final public long BLOCK_GENERATION_STAMP = GenerationStamp.FIRST_VALID_STAMP; @@ -134,7 +136,8 @@ * @param args * @throws IOException */ - public static void main(String[] args) throws IOException { + public static void main(String[] args) + throws IOException { @@ -194,8 +197,14 @@ System.exit(-1); } } - - FSImage fsImage = new FSImage(new File(edits_dir)); + + FSImage fsImage = null; + try { + fsImage = new FSImage(new URI(edits_dir)); + } catch (URISyntaxException use) { + throw new IOException("Error while processing URI: " + edits_dir + + ". The full error message was: " + use.getMessage()); + } FileNameGenerator nameGenerator = new FileNameGenerator(BASE_PATH, 100); Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Tue Jun 23 18:38:42 2009 @@ -75,7 +75,7 @@ } static String getBackupNodeDir(StartupOption t, int i) { - return BASE_DIR + "name" + t.getName() + i; + return BASE_DIR + "name" + t.getName() + i + "/"; } BackupNode startBackupNode(Configuration conf, Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Tue Jun 23 18:38:42 2009 @@ -19,6 +19,7 @@ import junit.framework.TestCase; import java.io.*; +import java.net.URI; import java.util.Collection; import java.util.List; import java.util.Iterator; @@ -107,7 +108,7 @@ /* * Verify that namenode does not startup if one namedir is bad. */ - private void testNamedirError(Configuration conf, Collection namedirs) + private void testNamedirError(Configuration conf, Collection namedirs) throws IOException { System.out.println("Starting testNamedirError"); MiniDFSCluster cluster = null; @@ -119,7 +120,7 @@ // // Remove one namedir & Restart cluster. This should fail. // - File first = namedirs.iterator().next(); + File first = new File(namedirs.iterator().next().getPath()); removeOneNameDir(first); try { cluster = new MiniDFSCluster(conf, 0, false, null); @@ -477,12 +478,13 @@ // Remove current image and import a checkpoint. System.out.println("Import a checkpoint with existing primary image."); - List nameDirs = (List)FSNamesystem.getNamespaceDirs(conf); - List nameEditsDirs = (List)FSNamesystem. + List nameDirs = (List)FSNamesystem.getNamespaceDirs(conf); + List nameEditsDirs = (List)FSNamesystem. getNamespaceEditsDirs(conf); - long fsimageLength = new File(new File(nameDirs.get(0), "current"), + long fsimageLength = new File(new File(nameDirs.get(0).getPath(), "current"), NameNodeFile.IMAGE.getName()).length(); - for(File dir : nameDirs) { + for(URI uri : nameDirs) { + File dir = new File(uri.getPath()); if(dir.exists()) if(!(FileUtil.fullyDelete(dir))) throw new IOException("Cannot remove directory: " + dir); @@ -490,7 +492,8 @@ throw new IOException("Cannot create directory " + dir); } - for(File dir : nameEditsDirs) { + for(URI uri : nameEditsDirs) { + File dir = new File(uri.getPath()); if(dir.exists()) if(!(FileUtil.fullyDelete(dir))) throw new IOException("Cannot remove directory: " + dir); @@ -512,26 +515,30 @@ // recover failed checkpoint nn = startNameNode(conf, primaryDirs, primaryEditsDirs, StartupOption.REGULAR); - Collection secondaryDirs = FSImage.getCheckpointDirs(conf, null); - for(File dir : secondaryDirs) { + Collection secondaryDirs = FSImage.getCheckpointDirs(conf, null); + for(URI uri : secondaryDirs) { + File dir = new File(uri.getPath()); Storage.rename(new File(dir, "current"), new File(dir, "lastcheckpoint.tmp")); } secondary = startSecondaryNameNode(conf); secondary.shutdown(); - for(File dir : secondaryDirs) { + for(URI uri : secondaryDirs) { + File dir = new File(uri.getPath()); assertTrue(new File(dir, "current").exists()); assertFalse(new File(dir, "lastcheckpoint.tmp").exists()); } // complete failed checkpoint - for(File dir : secondaryDirs) { + for(URI uri : secondaryDirs) { + File dir = new File(uri.getPath()); Storage.rename(new File(dir, "previous.checkpoint"), new File(dir, "lastcheckpoint.tmp")); } secondary = startSecondaryNameNode(conf); secondary.shutdown(); - for(File dir : secondaryDirs) { + for(URI uri : secondaryDirs) { + File dir = new File(uri.getPath()); assertTrue(new File(dir, "current").exists()); assertTrue(new File(dir, "previous.checkpoint").exists()); assertFalse(new File(dir, "lastcheckpoint.tmp").exists()); @@ -574,7 +581,7 @@ public void testCheckpoint() throws IOException { Path file1 = new Path("checkpoint.dat"); Path file2 = new Path("checkpoint2.dat"); - Collection namedirs = null; + Collection namedirs = null; Configuration conf = new Configuration(); conf.set("dfs.secondary.http.address", "0.0.0.0:0"); @@ -690,8 +697,9 @@ writeFile(fs, file, replication); checkFile(fs, file, replication); // verify that the edits file is NOT empty - Collection editsDirs = cluster.getNameEditsDirs(); - for(File ed : editsDirs) { + Collection editsDirs = cluster.getNameEditsDirs(); + for(URI uri : editsDirs) { + File ed = new File(uri.getPath()); assertTrue(new File(ed, "current/edits").length() > Integer.SIZE/Byte.SIZE); } @@ -703,7 +711,8 @@ throw new IOException(e); } // verify that the edits file is empty - for(File ed : editsDirs) { + for(URI uri : editsDirs) { + File ed = new File(uri.getPath()); assertTrue(new File(ed, "current/edits").length() == Integer.SIZE/Byte.SIZE); } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Tue Jun 23 18:38:42 2009 @@ -19,6 +19,7 @@ import junit.framework.TestCase; import java.io.*; +import java.net.URI; import java.util.Iterator; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -92,8 +93,8 @@ fileSys = cluster.getFileSystem(); final FSNamesystem namesystem = cluster.getNamesystem(); - for (Iterator it = cluster.getNameDirs().iterator(); it.hasNext(); ) { - File dir = it.next(); + for (Iterator it = cluster.getNameDirs().iterator(); it.hasNext(); ) { + File dir = new File(it.next().getPath()); System.out.println(dir); } Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Tue Jun 23 18:38:42 2009 @@ -2,6 +2,7 @@ import java.io.File; import java.io.IOException; +import java.net.URI; import java.util.Iterator; import java.util.List; import java.util.Random; @@ -126,11 +127,11 @@ */ private void corruptNameNodeFiles() throws IOException { // now corrupt/delete the directrory - List nameDirs = (List)FSNamesystem.getNamespaceDirs(config); - List nameEditsDirs = (List)FSNamesystem.getNamespaceEditsDirs(config); + List nameDirs = (List)FSNamesystem.getNamespaceDirs(config); + List nameEditsDirs = (List)FSNamesystem.getNamespaceEditsDirs(config); // get name dir and its length, then delete and recreate the directory - File dir = nameDirs.get(0); // has only one + File dir = new File(nameDirs.get(0).getPath()); // has only one this.fsimageLength = new File(new File(dir, "current"), NameNodeFile.IMAGE.getName()).length(); @@ -142,7 +143,7 @@ if (!dir.mkdirs()) throw new IOException("Cannot create directory " + dir); - dir = nameEditsDirs.get(0); //has only one + dir = new File( nameEditsDirs.get(0).getPath()); //has only one this.editsLength = new File(new File(dir, "current"), NameNodeFile.EDITS.getName()).length(); Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=787781&r1=787780&r2=787781&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Tue Jun 23 18:38:42 2009 @@ -28,6 +28,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.net.URI; import java.util.HashMap; import java.util.Set; @@ -124,8 +125,8 @@ cluster.getNameNode().saveNamespace(); // Determine location of fsimage file - File [] files = cluster.getNameDirs().toArray(new File[0]); - orig = new File(files[0], "current/fsimage"); + URI [] files = cluster.getNameDirs().toArray(new URI[0]); + orig = new File(files[0].getPath(), "current/fsimage"); if(!orig.exists()) fail("Didn't generate or can't find fsimage.");