Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 630A3101F4 for ; Fri, 7 Feb 2014 02:43:46 +0000 (UTC) Received: (qmail 71611 invoked by uid 500); 7 Feb 2014 02:43:44 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 71559 invoked by uid 500); 7 Feb 2014 02:43:43 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 71550 invoked by uid 99); 7 Feb 2014 02:43:43 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 07 Feb 2014 02:43:42 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 07 Feb 2014 02:43:32 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 962B623889CB; Fri, 7 Feb 2014 02:43:09 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1565519 [1/2] - in /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/server/blockm... Date: Fri, 07 Feb 2014 02:43:08 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140207024309.962B623889CB@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Fri Feb 7 02:43:04 2014 New Revision: 1565519 URL: http://svn.apache.org/r1565519 Log: Merge r1555021 through r1565516 from trunk. Added: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java - copied unchanged from r1565516, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java - copied, changed from r1565516, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-2-reserved.tgz - copied unchanged from r1565516, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-2-reserved.tgz Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java Propchange: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1563385-1565516 Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Feb 7 02:43:04 2014 @@ -308,6 +308,9 @@ Release 2.4.0 - UNRELEASED HDFS-5746. Add ShortCircuitSharedMemorySegment (cmccabe) + HDFS-4911. Reduce PeerCache timeout to be commensurate with + dfs.datanode.socket.reuse.keepalive (cmccabe) + OPTIMIZATIONS HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery @@ -324,6 +327,27 @@ Release 2.4.0 - UNRELEASED HDFS-5856. DataNode.checkDiskError might throw NPE. (Josh Elser via suresh) + HDFS-5828. BlockPlacementPolicyWithNodeGroup can place multiple replicas on + the same node group when dfs.namenode.avoid.write.stale.datanode is true. + (Buddy via junping_du) + + HDFS-5767. NFS implementation assumes userName userId mapping to be unique, + which is not true sometimes (Yongjun Zhang via brandonli) + + HDFS-5791. TestHttpsFileSystem should use a random port to avoid binding + error during testing (Haohui Mai via brandonli) + + HDFS-5709. Improve NameNode upgrade with existing reserved paths and path + components. (Andrew Wang via atm) + + HDFS-5881. Fix skip() of the short-circuit local reader(legacy). (kihwal) + + HDFS-5895. HDFS cacheadmin -listPools has exit_code of 1 when the command + returns 0 result. (Tassapol Athiapinya via cnauroth) + + HDFS-5807. TestBalancerWithNodeGroup.testBalancerWithNodeGroup fails + intermittently. (Chen He via kihwal) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -853,6 +877,15 @@ Release 2.3.0 - UNRELEASED HDFS-5842. Cannot create hftp filesystem when using a proxy user ugi and a doAs on a secure cluster. (jing9) + HDFS-5399. Revisit SafeModeException and corresponding retry policies. + (Jing Zhao via todd) + + HDFS-5876. SecureDataNodeStarter does not pick up configuration in + hdfs-site.xml. (Haohui Mai via jing9) + + HDFS-5873. dfs.http.policy should have higher precedence over dfs.https.enable. + (Haohui Mai via jing9) + BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS HDFS-4985. Add storage type to the protocol and expose it in block report Propchange: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1563385-1565516 Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java Fri Feb 7 02:43:04 2014 @@ -629,7 +629,7 @@ class BlockReaderLocalLegacy implements skipBuf = new byte[bytesPerChecksum]; } int ret = read(skipBuf, 0, (int)(n - remaining)); - return ret; + return (remaining + ret); } // optimize for big gap: discard the current buffer, skip to @@ -660,9 +660,9 @@ class BlockReaderLocalLegacy implements int ret = read(skipBuf, 0, myOffsetFromChunkBoundary); if (ret == -1) { // EOS - return toskip; + return (toskip + remaining); } else { - return (toskip + ret); + return (toskip + remaining + ret); } } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Fri Feb 7 02:43:04 2014 @@ -36,6 +36,8 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY; @@ -260,6 +262,7 @@ public class DFSClient implements java.i public static class Conf { final int hdfsTimeout; // timeout value for a DFS operation. final int maxFailoverAttempts; + final int maxRetryAttempts; final int failoverSleepBaseMillis; final int failoverSleepMaxMillis; final int maxBlockAcquireFailures; @@ -305,6 +308,9 @@ public class DFSClient implements java.i maxFailoverAttempts = conf.getInt( DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); + maxRetryAttempts = conf.getInt( + DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY, + DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT); failoverSleepBaseMillis = conf.getInt( DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri Feb 7 02:43:04 2014 @@ -82,9 +82,11 @@ public class DFSConfigKeys extends Commo public static final int DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT = 0; public static final String DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = "dfs.client.failover.connection.retries.on.timeouts"; public static final int DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0; + public static final String DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.client.retry.max.attempts"; + public static final int DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10; public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec"; - public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 2 * 60 * 1000; + public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000; public static final String DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL = "dfs.client.write.exclude.nodes.cache.expiry.interval.millis"; public static final long DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT = 10 * 60 * 1000; // 10 minutes, in ms public static final String DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address"; @@ -213,7 +215,7 @@ public class DFSConfigKeys extends Commo public static final String DFS_DATANODE_SYNCONCLOSE_KEY = "dfs.datanode.synconclose"; public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false; public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive"; - public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000; + public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 4000; public static final String DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY = "dfs.namenode.datanode.registration.ip-hostname-check"; public static final boolean DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT = true; @@ -576,6 +578,8 @@ public class DFSConfigKeys extends Commo public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... public static final String DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.http.client.failover.max.attempts"; public static final int DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15; + public static final String DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.http.client.retry.max.attempts"; + public static final int DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10; public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.http.client.failover.sleep.base.millis"; public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500; public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.http.client.failover.sleep.max.millis"; Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java Fri Feb 7 02:43:04 2014 @@ -1345,6 +1345,14 @@ implements ByteBufferReadable, CanSetDro pos += blockReader.skip(diff); if (pos == targetPos) { done = true; + } else { + // The range was already checked. If the block reader returns + // something unexpected instead of throwing an exception, it is + // most likely a bug. + String errMsg = "BlockReader failed to seek to " + + targetPos + ". Instead, it seeked to " + pos + "."; + DFSClient.LOG.warn(errMsg); + throw new IOException(errMsg); } } catch (IOException e) {//make following read to retry if(DFSClient.LOG.isDebugEnabled()) { Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Fri Feb 7 02:43:04 2014 @@ -262,6 +262,47 @@ public class DFSUtil { } /** + * Checks if a string is a valid path component. For instance, components + * cannot contain a ":" or "/", and cannot be equal to a reserved component + * like ".snapshot". + *

+ * The primary use of this method is for validating paths when loading the + * FSImage. During normal NN operation, paths are sometimes allowed to + * contain reserved components. + * + * @return If component is valid + */ + public static boolean isValidNameForComponent(String component) { + if (component.equals(".") || + component.equals("..") || + component.indexOf(":") >= 0 || + component.indexOf("/") >= 0) { + return false; + } + return !isReservedPathComponent(component); + } + + + /** + * Returns if the component is reserved. + * + *

+ * Note that some components are only reserved under certain directories, e.g. + * "/.reserved" is reserved, while "/hadoop/.reserved" is not. + * + * @param component + * @return if the component is reserved + */ + public static boolean isReservedPathComponent(String component) { + for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) { + if (component.equals(reserved)) { + return true; + } + } + return false; + } + + /** * Converts a byte array to a string using UTF8 encoding. */ public static String bytes2String(byte[] bytes) { @@ -312,7 +353,25 @@ public class DFSUtil { } return result.toString(); } - + + /** + * Converts a list of path components into a path using Path.SEPARATOR. + * + * @param components Path components + * @return Combined path as a UTF-8 string + */ + public static String strings2PathString(String[] components) { + if (components.length == 0) { + return ""; + } + if (components.length == 1) { + if (components[0] == null || components[0].isEmpty()) { + return Path.SEPARATOR; + } + } + return Joiner.on(Path.SEPARATOR).join(components); + } + /** * Given a list of path components returns a byte array */ @@ -1508,31 +1567,34 @@ public class DFSUtil { * configuration settings. */ public static HttpConfig.Policy getHttpPolicy(Configuration conf) { - String httpPolicy = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY, - DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT); - - HttpConfig.Policy policy = HttpConfig.Policy.fromString(httpPolicy); - - if (policy == HttpConfig.Policy.HTTP_ONLY) { - boolean httpsEnabled = conf.getBoolean( - DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, + String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY); + if (policyStr == null) { + boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT); - boolean hadoopSslEnabled = conf.getBoolean( + boolean hadoopSsl = conf.getBoolean( CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY, CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT); - if (hadoopSslEnabled) { + if (hadoopSsl) { LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY - + " is deprecated. Please use " - + DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + "."); - policy = HttpConfig.Policy.HTTPS_ONLY; - } else if (httpsEnabled) { + + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY + + "."); + } + if (https) { LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY - + " is deprecated. Please use " - + DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + "."); - policy = HttpConfig.Policy.HTTP_AND_HTTPS; + + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY + + "."); } + + return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS + : HttpConfig.Policy.HTTP_ONLY; + } + + HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr); + if (policy == null) { + throw new HadoopIllegalArgumentException("Unregonized value '" + + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY); } conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name()); Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java Fri Feb 7 02:43:04 2014 @@ -24,6 +24,8 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT; import java.io.IOException; import java.lang.reflect.Constructor; @@ -144,9 +146,10 @@ public class NameNodeProxies { .createFailoverProxyProvider(conf, failoverProxyProviderClass, xface, nameNodeUri); Conf config = new Conf(conf); - T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies - .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, - config.maxFailoverAttempts, config.failoverSleepBaseMillis, + T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, + RetryPolicies.failoverOnNetworkException( + RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts, + config.maxRetryAttempts, config.failoverSleepBaseMillis, config.failoverSleepMaxMillis)); Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); @@ -192,11 +195,14 @@ public class NameNodeProxies { int maxFailoverAttempts = config.getInt( DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); + int maxRetryAttempts = config.getInt( + DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY, + DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT); InvocationHandler dummyHandler = new LossyRetryInvocationHandler( numResponseToDrop, failoverProxyProvider, RetryPolicies.failoverOnNetworkException( - RetryPolicies.TRY_ONCE_THEN_FAIL, - Math.max(numResponseToDrop + 1, maxFailoverAttempts), delay, + RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, + Math.max(numResponseToDrop + 1, maxRetryAttempts), delay, maxCap)); T proxy = (T) Proxy.newProxyInstance( Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java Fri Feb 7 02:43:04 2014 @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs; +import java.io.IOException; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; @@ -25,6 +26,7 @@ import java.util.Map.Entry; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.LinkedListMultimap; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -118,6 +120,11 @@ class PeerCache { return instance; } + @VisibleForTesting + public static synchronized void setInstance(int c, long e) { + instance = new PeerCache(c, e); + } + private boolean isDaemonStarted() { return (daemon == null)? false: true; } @@ -171,8 +178,17 @@ class PeerCache { while (iter.hasNext()) { Value candidate = iter.next(); iter.remove(); - if (!candidate.getPeer().isClosed()) { - return candidate.getPeer(); + long ageMs = Time.monotonicNow() - candidate.getTime(); + Peer peer = candidate.getPeer(); + if (ageMs >= expiryPeriod) { + try { + peer.close(); + } catch (IOException e) { + LOG.warn("got IOException closing stale peer " + peer + + ", which is " + ageMs + " ms old"); + } + } else if (!peer.isClosed()) { + return peer; } } return null; Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java Fri Feb 7 02:43:04 2014 @@ -25,10 +25,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory; /************************************ * Some handy constants @@ -143,6 +142,16 @@ public class HdfsConstants { = DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION; /** + * Path components that are reserved in HDFS. + *

+ * .reserved is only reserved under root ("/"). + */ + public static final String[] RESERVED_PATH_COMPONENTS = new String[] { + HdfsConstants.DOT_SNAPSHOT_DIR, + FSDirectory.DOT_RESERVED_STRING + }; + + /** * A special path component contained in the path for a snapshot file/dir */ public static final String DOT_SNAPSHOT_DIR = ".snapshot"; Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Fri Feb 7 02:43:04 2014 @@ -317,7 +317,7 @@ public class BlockPlacementPolicyDefault // We need to additionally exclude the nodes that were added to the // result list in the successful calls to choose*() above. for (DatanodeStorageInfo resultStorage : results) { - oldExcludedNodes.add(resultStorage.getDatanodeDescriptor()); + addToExcludedNodes(resultStorage.getDatanodeDescriptor(), oldExcludedNodes); } // Set numOfReplicas, since it can get out of sync with the result list // if the NotEnoughReplicasException was thrown in chooseRandom(). Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java Fri Feb 7 02:43:04 2014 @@ -79,7 +79,8 @@ public final class HdfsServerConstants { INITIALIZESHAREDEDITS("-initializeSharedEdits"), RECOVER ("-recover"), FORCE("-force"), - NONINTERACTIVE("-nonInteractive"); + NONINTERACTIVE("-nonInteractive"), + RENAMERESERVED("-renameReserved"); private final String name; Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Feb 7 02:43:04 2014 @@ -362,13 +362,13 @@ public class DataNode extends Configured .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))); HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); - InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf); - String infoHost = infoSocAddr.getHostName(); if (policy.isHttpEnabled()) { if (secureResources == null) { + InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf); int port = infoSocAddr.getPort(); - builder.addEndpoint(URI.create("http://" + infoHost + ":" + port)); + builder.addEndpoint(URI.create("http://" + + NetUtils.getHostPortString(infoSocAddr))); if (port == 0) { builder.setFindPort(true); } @@ -381,7 +381,7 @@ public class DataNode extends Configured if (policy.isHttpsEnabled()) { InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( - DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0)); + DFS_DATANODE_HTTPS_ADDRESS_KEY, DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)); Configuration sslConf = DFSUtil.loadSslConfiguration(conf); DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf); @@ -390,7 +390,8 @@ public class DataNode extends Configured if (port == 0) { builder.setFindPort(true); } - builder.addEndpoint(URI.create("https://" + infoHost + ":" + port)); + builder.addEndpoint(URI.create("https://" + + NetUtils.getHostPortString(secInfoSocAddr))); } this.infoServer = builder.build(); Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java Fri Feb 7 02:43:04 2014 @@ -25,6 +25,7 @@ import org.apache.commons.daemon.DaemonC import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpServer2; @@ -62,7 +63,9 @@ public class SecureDataNodeStarter imple @Override public void init(DaemonContext context) throws Exception { System.err.println("Initializing secure datanode resources"); - Configuration conf = new Configuration(); + // Create a new HdfsConfiguration object to ensure that the configuration in + // hdfs-site.xml is picked up. + Configuration conf = new HdfsConfiguration(); // Stash command-line arguments for regular datanode args = context.getArguments(); Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Fri Feb 7 02:43:04 2014 @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.namenode.FSImageFormat.renameReservedPathsOnUpgrade; import static org.apache.hadoop.util.Time.now; import java.io.FilterInputStream; @@ -320,8 +321,10 @@ public class FSEditLogLoader { switch (op.opCode) { case OP_ADD: { AddCloseOp addCloseOp = (AddCloseOp)op; + final String path = + renameReservedPathsOnUpgrade(addCloseOp.path, logVersion); if (FSNamesystem.LOG.isDebugEnabled()) { - FSNamesystem.LOG.debug(op.opCode + ": " + addCloseOp.path + + FSNamesystem.LOG.debug(op.opCode + ": " + path + " numblocks : " + addCloseOp.blocks.length + " clientHolder " + addCloseOp.clientName + " clientMachine " + addCloseOp.clientMachine); @@ -332,9 +335,9 @@ public class FSEditLogLoader { // 3. OP_ADD to open file for append // See if the file already exists (persistBlocks call) - final INodesInPath iip = fsDir.getLastINodeInPath(addCloseOp.path); + final INodesInPath iip = fsDir.getLastINodeInPath(path); final INodeFile oldFile = INodeFile.valueOf( - iip.getINode(0), addCloseOp.path, true); + iip.getINode(0), path, true); INodeFile newFile = oldFile; if (oldFile == null) { // this is OP_ADD on a new file (case 1) // versions > 0 support per file replication @@ -347,10 +350,10 @@ public class FSEditLogLoader { inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion, lastInodeId); newFile = fsDir.unprotectedAddFile(inodeId, - addCloseOp.path, addCloseOp.permissions, replication, + path, addCloseOp.permissions, replication, addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true, addCloseOp.clientName, addCloseOp.clientMachine); - fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path); + fsNamesys.leaseManager.addLease(addCloseOp.clientName, path); // add the op into retry cache if necessary if (toAddRetryCache) { @@ -366,11 +369,11 @@ public class FSEditLogLoader { FSNamesystem.LOG.debug("Reopening an already-closed file " + "for append"); } - LocatedBlock lb = fsNamesys.prepareFileForWrite(addCloseOp.path, + LocatedBlock lb = fsNamesys.prepareFileForWrite(path, oldFile, addCloseOp.clientName, addCloseOp.clientMachine, null, false, iip.getLatestSnapshotId(), false); - newFile = INodeFile.valueOf(fsDir.getINode(addCloseOp.path), - addCloseOp.path, true); + newFile = INodeFile.valueOf(fsDir.getINode(path), + path, true); // add the op into retry cache is necessary if (toAddRetryCache) { @@ -391,16 +394,17 @@ public class FSEditLogLoader { } case OP_CLOSE: { AddCloseOp addCloseOp = (AddCloseOp)op; - + final String path = + renameReservedPathsOnUpgrade(addCloseOp.path, logVersion); if (FSNamesystem.LOG.isDebugEnabled()) { - FSNamesystem.LOG.debug(op.opCode + ": " + addCloseOp.path + + FSNamesystem.LOG.debug(op.opCode + ": " + path + " numblocks : " + addCloseOp.blocks.length + " clientHolder " + addCloseOp.clientName + " clientMachine " + addCloseOp.clientMachine); } - final INodesInPath iip = fsDir.getLastINodeInPath(addCloseOp.path); - final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path); + final INodesInPath iip = fsDir.getLastINodeInPath(path); + final INodeFile file = INodeFile.valueOf(iip.getINode(0), path); // Update the salient file attributes. file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); @@ -414,24 +418,26 @@ public class FSEditLogLoader { // could show up twice in a row. But after that version, this // should be fixed, so we should treat it as an error. throw new IOException( - "File is not under construction: " + addCloseOp.path); + "File is not under construction: " + path); } // One might expect that you could use removeLease(holder, path) here, // but OP_CLOSE doesn't serialize the holder. So, remove by path. if (file.isUnderConstruction()) { - fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path); + fsNamesys.leaseManager.removeLeaseWithPrefixPath(path); file.toCompleteFile(file.getModificationTime()); } break; } case OP_UPDATE_BLOCKS: { UpdateBlocksOp updateOp = (UpdateBlocksOp)op; + final String path = + renameReservedPathsOnUpgrade(updateOp.path, logVersion); if (FSNamesystem.LOG.isDebugEnabled()) { - FSNamesystem.LOG.debug(op.opCode + ": " + updateOp.path + + FSNamesystem.LOG.debug(op.opCode + ": " + path + " numblocks : " + updateOp.blocks.length); } - INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(updateOp.path), - updateOp.path); + INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path), + path); // Update in-memory data structures updateBlocks(fsDir, updateOp, oldFile); @@ -442,7 +448,7 @@ public class FSEditLogLoader { } case OP_ADD_BLOCK: { AddBlockOp addBlockOp = (AddBlockOp) op; - String path = addBlockOp.getPath(); + String path = renameReservedPathsOnUpgrade(addBlockOp.getPath(), logVersion); if (FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug(op.opCode + ": " + path + " new block id : " + addBlockOp.getLastBlock().getBlockId()); @@ -456,14 +462,20 @@ public class FSEditLogLoader { SetReplicationOp setReplicationOp = (SetReplicationOp)op; short replication = fsNamesys.getBlockManager().adjustReplication( setReplicationOp.replication); - fsDir.unprotectedSetReplication(setReplicationOp.path, + fsDir.unprotectedSetReplication( + renameReservedPathsOnUpgrade(setReplicationOp.path, logVersion), replication, null); break; } case OP_CONCAT_DELETE: { ConcatDeleteOp concatDeleteOp = (ConcatDeleteOp)op; - fsDir.unprotectedConcat(concatDeleteOp.trg, concatDeleteOp.srcs, - concatDeleteOp.timestamp); + String trg = renameReservedPathsOnUpgrade(concatDeleteOp.trg, logVersion); + String[] srcs = new String[concatDeleteOp.srcs.length]; + for (int i=0; i removedINodes = new ChunkedArrayList(); + final String snapshotRoot = + renameReservedPathsOnUpgrade(deleteSnapshotOp.snapshotRoot, + logVersion); fsNamesys.getSnapshotManager().deleteSnapshot( - deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName, + snapshotRoot, deleteSnapshotOp.snapshotName, collectedBlocks, removedINodes); fsNamesys.removeBlocksAndUpdateSafemodeTotal(collectedBlocks); collectedBlocks.clear(); @@ -645,8 +675,11 @@ public class FSEditLogLoader { } case OP_RENAME_SNAPSHOT: { RenameSnapshotOp renameSnapshotOp = (RenameSnapshotOp) op; + final String snapshotRoot = + renameReservedPathsOnUpgrade(renameSnapshotOp.snapshotRoot, + logVersion); fsNamesys.getSnapshotManager().renameSnapshot( - renameSnapshotOp.snapshotRoot, renameSnapshotOp.snapshotOldName, + snapshotRoot, renameSnapshotOp.snapshotOldName, renameSnapshotOp.snapshotNewName); if (toAddRetryCache) { @@ -657,14 +690,19 @@ public class FSEditLogLoader { } case OP_ALLOW_SNAPSHOT: { AllowSnapshotOp allowSnapshotOp = (AllowSnapshotOp) op; + final String snapshotRoot = + renameReservedPathsOnUpgrade(allowSnapshotOp.snapshotRoot, logVersion); fsNamesys.getSnapshotManager().setSnapshottable( - allowSnapshotOp.snapshotRoot, false); + snapshotRoot, false); break; } case OP_DISALLOW_SNAPSHOT: { DisallowSnapshotOp disallowSnapshotOp = (DisallowSnapshotOp) op; + final String snapshotRoot = + renameReservedPathsOnUpgrade(disallowSnapshotOp.snapshotRoot, + logVersion); fsNamesys.getSnapshotManager().resetSnapshottable( - disallowSnapshotOp.snapshotRoot); + snapshotRoot); break; } case OP_SET_GENSTAMP_V2: { Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Fri Feb 7 02:43:04 2014 @@ -32,12 +32,13 @@ import java.security.DigestOutputStream; import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.TreeMap; import org.apache.commons.logging.Log; -import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -45,13 +46,15 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.LayoutFlags; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.protocol.LayoutFlags; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; @@ -67,6 +70,10 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.Text; +import org.apache.hadoop.util.StringUtils; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; /** * Contains inner classes for reading or writing the on-disk format for @@ -411,7 +418,8 @@ public class FSImageFormat { } /** - * load fsimage files assuming only local names are stored + * load fsimage files assuming only local names are stored. Used when + * snapshots are not supported by the layout version. * * @param numFiles number of files expected to be read * @param in image input stream @@ -527,6 +535,8 @@ public class FSImageFormat { */ private int loadDirectory(DataInput in, Counter counter) throws IOException { String parentPath = FSImageSerialization.readString(in); + // Rename .snapshot paths if we're doing an upgrade + parentPath = renameReservedPathsOnUpgrade(parentPath, getLayoutVersion()); final INodeDirectory parent = INodeDirectory.valueOf( namesystem.dir.rootDir.getNode(parentPath, true), parentPath); return loadChildren(parent, in, counter); @@ -586,11 +596,9 @@ public class FSImageFormat { */ private void addToParent(INodeDirectory parent, INode child) { FSDirectory fsDir = namesystem.dir; - if (parent == fsDir.rootDir && FSDirectory.isReservedName(child)) { - throw new HadoopIllegalArgumentException("File name \"" - + child.getLocalName() + "\" is reserved. Please " - + " change the name of the existing file or directory to another " - + "name before upgrading to this release."); + if (parent == fsDir.rootDir) { + child.setLocalName(renameReservedRootComponentOnUpgrade( + child.getLocalNameBytes(), getLayoutVersion())); } // NOTE: This does not update space counts for parents if (!parent.addChild(child)) { @@ -627,7 +635,9 @@ public class FSImageFormat { public INode loadINodeWithLocalName(boolean isSnapshotINode, DataInput in, boolean updateINodeMap, Counter counter) throws IOException { - final byte[] localName = FSImageSerialization.readLocalName(in); + byte[] localName = FSImageSerialization.readLocalName(in); + localName = + renameReservedComponentOnUpgrade(localName, getLayoutVersion()); INode inode = loadINode(localName, isSnapshotINode, in, counter); if (updateINodeMap && NameNodeLayoutVersion.supports( @@ -944,7 +954,156 @@ public class FSImageFormat { return snapshotMap.get(in.readInt()); } } - + + @VisibleForTesting + public static TreeMap renameReservedMap = + new TreeMap(); + + /** + * Use the default key-value pairs that will be used to determine how to + * rename reserved paths on upgrade. + */ + @VisibleForTesting + public static void useDefaultRenameReservedPairs() { + renameReservedMap.clear(); + for (String key: HdfsConstants.RESERVED_PATH_COMPONENTS) { + renameReservedMap.put( + key, + key + "." + HdfsConstants.NAMENODE_LAYOUT_VERSION + "." + + "UPGRADE_RENAMED"); + } + } + + /** + * Set the key-value pairs that will be used to determine how to rename + * reserved paths on upgrade. + */ + @VisibleForTesting + public static void setRenameReservedPairs(String renameReserved) { + // Clear and set the default values + useDefaultRenameReservedPairs(); + // Overwrite with provided values + setRenameReservedMapInternal(renameReserved); + } + + private static void setRenameReservedMapInternal(String renameReserved) { + Collection pairs = + StringUtils.getTrimmedStringCollection(renameReserved); + for (String p : pairs) { + String[] pair = StringUtils.split(p, '/', '='); + Preconditions.checkArgument(pair.length == 2, + "Could not parse key-value pair " + p); + String key = pair[0]; + String value = pair[1]; + Preconditions.checkArgument(DFSUtil.isReservedPathComponent(key), + "Unknown reserved path " + key); + Preconditions.checkArgument(DFSUtil.isValidNameForComponent(value), + "Invalid rename path for " + key + ": " + value); + LOG.info("Will rename reserved path " + key + " to " + value); + renameReservedMap.put(key, value); + } + } + + /** + * When upgrading from an old version, the filesystem could contain paths + * that are now reserved in the new version (e.g. .snapshot). This renames + * these new reserved paths to a user-specified value to avoid collisions + * with the reserved name. + * + * @param path Old path potentially containing a reserved path + * @return New path with reserved path components renamed to user value + */ + static String renameReservedPathsOnUpgrade(String path, + final int layoutVersion) { + final String oldPath = path; + // If any known LVs aren't supported, we're doing an upgrade + if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) { + String[] components = INode.getPathNames(path); + // Only need to worry about the root directory + if (components.length > 1) { + components[1] = DFSUtil.bytes2String( + renameReservedRootComponentOnUpgrade( + DFSUtil.string2Bytes(components[1]), + layoutVersion)); + path = DFSUtil.strings2PathString(components); + } + } + if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) { + String[] components = INode.getPathNames(path); + // Special case the root path + if (components.length == 0) { + return path; + } + for (int i=0; i getNamespaceDirs(Configuration conf) { return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY); } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Feb 7 02:43:04 2014 @@ -212,7 +212,9 @@ public class NameNode implements NameNod + StartupOption.CLUSTERID.getName() + " cid ] [" + StartupOption.FORCE.getName() + "] [" + StartupOption.NONINTERACTIVE.getName() + "] ] | [" - + StartupOption.UPGRADE.getName() + "] | [" + + StartupOption.UPGRADE.getName() + + " [" + StartupOption.CLUSTERID.getName() + " cid]" + + " [" + StartupOption.RENAMERESERVED.getName() + "] ] | [" + StartupOption.ROLLBACK.getName() + "] | [" + StartupOption.FINALIZE.getName() + "] | [" + StartupOption.IMPORT.getName() + "] | [" @@ -1056,7 +1058,8 @@ public class NameNode implements NameNod out.println(USAGE + "\n"); } - private static StartupOption parseArguments(String args[]) { + @VisibleForTesting + static StartupOption parseArguments(String args[]) { int argsLen = (args == null) ? 0 : args.length; StartupOption startOpt = StartupOption.REGULAR; for(int i=0; i < argsLen; i++) { @@ -1103,11 +1106,33 @@ public class NameNode implements NameNod startOpt = StartupOption.CHECKPOINT; } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.UPGRADE; - // might be followed by two args - if (i + 2 < argsLen - && args[i + 1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) { - i += 2; - startOpt.setClusterId(args[i]); + /* Can be followed by CLUSTERID with a required parameter or + * RENAMERESERVED with an optional parameter + */ + while (i + 1 < argsLen) { + String flag = args[i + 1]; + if (flag.equalsIgnoreCase(StartupOption.CLUSTERID.getName())) { + if (i + 2 < argsLen) { + i += 2; + startOpt.setClusterId(args[i]); + } else { + LOG.fatal("Must specify a valid cluster ID after the " + + StartupOption.CLUSTERID.getName() + " flag"); + return null; + } + } else if (flag.equalsIgnoreCase(StartupOption.RENAMERESERVED + .getName())) { + if (i + 2 < argsLen) { + FSImageFormat.setRenameReservedPairs(args[i + 2]); + i += 2; + } else { + FSImageFormat.useDefaultRenameReservedPairs(); + i += 1; + } + } else { + LOG.fatal("Unknown upgrade flag " + flag); + return null; + } } } else if (StartupOption.ROLLINGUPGRADE.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.ROLLINGUPGRADE; Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java Fri Feb 7 02:43:04 2014 @@ -962,9 +962,8 @@ public class CacheAdmin extends Configur if (numResults > 0) { System.out.print(listing); } - // If there are no results, we return 1 (failure exit code); - // otherwise we return 0 (success exit code). - return (numResults == 0) ? 1 : 0; + // If list pools succeed, we return 0 (success exit code) + return 0; } } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Fri Feb 7 02:43:04 2014 @@ -188,6 +188,9 @@ public class WebHdfsFileSystem extends F int maxFailoverAttempts = conf.getInt( DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); + int maxRetryAttempts = conf.getInt( + DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY, + DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT); int failoverSleepBaseMillis = conf.getInt( DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); @@ -197,7 +200,7 @@ public class WebHdfsFileSystem extends F this.retryPolicy = RetryPolicies .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, - maxFailoverAttempts, failoverSleepBaseMillis, + maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis, failoverSleepMaxMillis); } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm Fri Feb 7 02:43:04 2014 @@ -435,7 +435,7 @@ HDFS Users Guide state it was in before the upgrade. HDFS upgrade is described in more detail in {{{http://wiki.apache.org/hadoop/Hadoop_Upgrade}Hadoop Upgrade}} Wiki page. HDFS can have one such backup at a time. Before upgrading, - administrators need to remove existing backupusing bin/hadoop dfsadmin + administrators need to remove existing backup using bin/hadoop dfsadmin <<<-finalizeUpgrade>>> command. The following briefly describes the typical upgrade procedure: @@ -459,6 +459,33 @@ HDFS Users Guide * start the cluster with rollback option. (<<>>). + When upgrading to a new version of HDFS, it is necessary to rename or + delete any paths that are reserved in the new version of HDFS. If the + NameNode encounters a reserved path during upgrade, it will print an + error like the following: + + <<< /.reserved is a reserved path and .snapshot is a + reserved path component in this version of HDFS. Please rollback and delete + or rename this path, or upgrade with the -renameReserved [key-value pairs] + option to automatically rename these paths during upgrade.>>> + + Specifying <<<-upgrade -renameReserved [optional key-value pairs]>>> causes + the NameNode to automatically rename any reserved paths found during + startup. For example, to rename all paths named <<<.snapshot>>> to + <<<.my-snapshot>>> and <<<.reserved>>> to <<<.my-reserved>>>, a user would + specify <<<-upgrade -renameReserved + .snapshot=.my-snapshot,.reserved=.my-reserved>>>. + + If no key-value pairs are specified with <<<-renameReserved>>>, the + NameNode will then suffix reserved paths with + <<<..UPGRADE_RENAMED>>>, e.g. + <<<.snapshot.-51.UPGRADE_RENAMED>>>. + + There are some caveats to this renaming process. It's recommended, + if possible, to first <<>> before upgrading. + This is because data inconsistency can result if an edit log operation + refers to the destination of an automatically renamed file. + * File Permissions and Security The file permissions are designed to be similar to file permissions on Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml Fri Feb 7 02:43:04 2014 @@ -20,7 +20,7 @@ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd"> - HFDS Snapshots + HDFS Snapshots @@ -99,15 +99,22 @@

  • Copying a file from snapshot s0: hdfs dfs -cp /foo/.snapshot/s0/bar /tmp
  • -

    - Note that the name ".snapshot" is now a reserved file name in HDFS - so that users cannot create a file/directory with ".snapshot" as the name. - If ".snapshot" is used in a previous version of HDFS, it must be renamed before upgrade; - otherwise, upgrade will fail. -

    +
    + +

    + The HDFS snapshot feature introduces a new reserved path name used to + interact with snapshots: .snapshot. When upgrading from an + older version of HDFS, existing paths named .snapshot need + to first be renamed or deleted to avoid conflicting with the reserved path. + See the upgrade section in + the HDFS user guide + for more information.

    + +
    +

    Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1565519&r1=1565518&r2=1565519&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Fri Feb 7 02:43:04 2014 @@ -27,6 +27,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedList; import java.util.TreeMap; @@ -43,7 +44,9 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Logger; import org.junit.Test; @@ -67,6 +70,7 @@ public class TestDFSUpgradeFromImage { private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt"; private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz"; private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz"; + private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz"; private static class ReferenceFileInfo { String path; @@ -320,6 +324,87 @@ public class TestDFSUpgradeFromImage { assertEquals("Upgrade did not fail with bad MD5", 1, md5failures); } } + + /** + * Test upgrade from 2.0 image with a variety of .snapshot and .reserved + * paths to test renaming on upgrade + */ + @Test + public void testUpgradeFromRel2ReservedImage() throws IOException { + unpackStorage(HADOOP2_RESERVED_IMAGE); + MiniDFSCluster cluster = null; + // Try it once without setting the upgrade flag to ensure it fails + try { + cluster = + new MiniDFSCluster.Builder(new Configuration()) + .format(false) + .startupOption(StartupOption.UPGRADE) + .numDataNodes(0).build(); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains( + "reserved path component in this version", + e); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + // Try it again with a custom rename string + try { + FSImageFormat.setRenameReservedPairs( + ".snapshot=.user-snapshot," + + ".reserved=.my-reserved"); + cluster = + new MiniDFSCluster.Builder(new Configuration()) + .format(false) + .startupOption(StartupOption.UPGRADE) + .numDataNodes(0).build(); + // Make sure the paths were renamed as expected + DistributedFileSystem dfs = cluster.getFileSystem(); + ArrayList toList = new ArrayList(); + ArrayList found = new ArrayList(); + toList.add(new Path("/")); + while (!toList.isEmpty()) { + Path p = toList.remove(0); + FileStatus[] statuses = dfs.listStatus(p); + for (FileStatus status: statuses) { + final String path = status.getPath().toUri().getPath(); + System.out.println("Found path " + path); + found.add(path); + if (status.isDirectory()) { + toList.add(status.getPath()); + } + } + } + String[] expected = new String[] { + "/edits", + "/edits/.reserved", + "/edits/.user-snapshot", + "/edits/.user-snapshot/editsdir", + "/edits/.user-snapshot/editsdir/editscontents", + "/edits/.user-snapshot/editsdir/editsdir2", + "/image", + "/image/.reserved", + "/image/.user-snapshot", + "/image/.user-snapshot/imagedir", + "/image/.user-snapshot/imagedir/imagecontents", + "/image/.user-snapshot/imagedir/imagedir2", + "/.my-reserved", + "/.my-reserved/edits-touch", + "/.my-reserved/image-touch" + }; + + for (String s: expected) { + assertTrue("Did not find expected path " + s, found.contains(s)); + } + assertEquals("Found an unexpected path while listing filesystem", + found.size(), expected.length); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } static void recoverAllLeases(DFSClient dfs, Path path) throws IOException {