Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 0C4AD10367 for ; Wed, 13 Nov 2013 01:15:05 +0000 (UTC) Received: (qmail 76947 invoked by uid 500); 13 Nov 2013 01:15:04 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 76907 invoked by uid 500); 13 Nov 2013 01:15:04 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 76899 invoked by uid 99); 13 Nov 2013 01:15:04 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 13 Nov 2013 01:15:04 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 13 Nov 2013 01:15:00 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 25E3C23889D5; Wed, 13 Nov 2013 01:14:40 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1541342 [1/2] - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/ hadoop-hdfs-nfs/src/test/... Date: Wed, 13 Nov 2013 01:14:38 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20131113011440.25E3C23889D5@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Wed Nov 13 01:14:37 2013 New Revision: 1541342 URL: http://svn.apache.org/r1541342 Log: merge r1535792 through r1541341 from trunk. Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileInputStreamCache.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1540910-1541341 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java Wed Nov 13 01:14:37 2013 @@ -18,7 +18,6 @@ package org.apache.hadoop.fs.http.server; -import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.http.client.HttpFSFileSystem; import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator; @@ -36,6 +35,7 @@ import org.apache.hadoop.test.TestDir; import org.apache.hadoop.test.TestDirHelper; import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; +import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java Wed Nov 13 01:14:37 2013 @@ -17,7 +17,6 @@ */ package org.apache.hadoop.fs.http.server; -import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.DelegationTokenRenewer; @@ -40,6 +39,7 @@ import org.apache.hadoop.test.TestJettyH import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; import org.junit.After; +import org.junit.Assert; import org.junit.Test; import org.mortbay.jetty.Server; import org.mortbay.jetty.webapp.WebAppContext; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java Wed Nov 13 01:14:37 2013 @@ -18,7 +18,6 @@ package org.apache.hadoop.lib.service.security; -import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.http.server.HttpFSServerWebApp; import org.apache.hadoop.lib.server.Server; @@ -30,6 +29,7 @@ import org.apache.hadoop.test.HTestCase; import org.apache.hadoop.test.TestDir; import org.apache.hadoop.test.TestDirHelper; import org.apache.hadoop.util.StringUtils; +import org.junit.Assert; import org.junit.Test; import java.net.InetAddress; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java Wed Nov 13 01:14:37 2013 @@ -26,8 +26,6 @@ import java.nio.ByteBuffer; import java.util.Arrays; import java.util.concurrent.ConcurrentNavigableMap; -import junit.framework.Assert; - import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -49,6 +47,7 @@ import org.apache.hadoop.nfs.nfs3.respon import org.apache.hadoop.nfs.nfs3.response.READ3Response; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.SecurityHandler; +import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Nov 13 01:14:37 2013 @@ -190,6 +190,8 @@ Trunk (Unreleased) HDFS-5326. add modifyDirective to cacheAdmin. (cmccabe) + HDFS-5450. Better API for getting the cached blocks locations. (wang) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) @@ -468,6 +470,9 @@ Release 2.3.0 - UNRELEASED HDFS-5467. Remove tab characters in hdfs-default.xml. (Shinichi Yamashita via Andrew Wang) + HDFS-5495. Remove further JUnit3 usages from HDFS. + (Jarek Jarcec Cecho via wang) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) @@ -526,6 +531,9 @@ Release 2.3.0 - UNRELEASED HDFS-5488. Clean up TestHftpURLTimeout. (Haohui Mai via jing9) + HDFS-5425. Renaming underconstruction file with snapshots can make NN failure on + restart. (jing9 and Vinay) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1540910-1541341 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java Wed Nov 13 01:14:37 2013 @@ -37,8 +37,7 @@ public class HdfsBlockLocation extends B public HdfsBlockLocation(BlockLocation loc, LocatedBlock block) throws IOException { // Initialize with data from passed in BlockLocation - super(loc.getNames(), loc.getHosts(), loc.getTopologyPaths(), - loc.getOffset(), loc.getLength(), loc.isCorrupt()); + super(loc); this.block = block; } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Nov 13 01:14:37 2013 @@ -436,7 +436,13 @@ public class DFSUtil { locations[hCnt].getNetworkLocation()); racks[hCnt] = node.toString(); } - blkLocations[idx] = new BlockLocation(xferAddrs, hosts, racks, + DatanodeInfo[] cachedLocations = blk.getCachedLocations(); + String[] cachedHosts = new String[cachedLocations.length]; + for (int i=0; i entries = entriesByPath.get(path); if (entries == null || !entries.remove(existing)) { - throw new IdNotFoundException("removeInternal: failed to locate entry " + + throw new InvalidRequestException("Failed to locate entry " + existing.getEntryId() + " by path " + existing.getPath()); } if (entries.size() == 0) { @@ -413,32 +433,17 @@ public final class CacheManager { throws IOException { assert namesystem.hasWriteLock(); try { - // Check for invalid IDs. - if (id <= 0) { - throw new IdNotFoundException("removeDirective " + id + ": invalid " + - "non-positive directive ID."); - } - // Find the entry. - PathBasedCacheEntry existing = entriesById.get(id); - if (existing == null) { - throw new IdNotFoundException("removeDirective " + id + - ": id not found."); - } - if ((pc != null) && - (!pc.checkPermission(existing.getPool(), FsAction.WRITE))) { - throw new AccessControlException("removeDirective " + id + - ": write permission denied on pool " + - existing.getPool().getPoolName()); - } + PathBasedCacheEntry existing = getById(id); + checkWritePermission(pc, existing.getPool()); removeInternal(existing); } catch (IOException e) { - LOG.warn("removeDirective " + id + " failed.", e); + LOG.warn("removeDirective of " + id + " failed: ", e); throw e; } if (monitor != null) { monitor.kick(); } - LOG.info("removeDirective " + id + ": succeeded."); + LOG.info("removeDirective of " + id + " successful."); } public BatchedListEntries @@ -449,18 +454,13 @@ public final class CacheManager { final int NUM_PRE_ALLOCATED_ENTRIES = 16; String filterPath = null; if (filter.getId() != null) { - throw new IOException("we currently don't support filtering by ID"); + throw new IOException("Filtering by ID is unsupported."); } if (filter.getPath() != null) { - filterPath = filter.getPath().toUri().getPath(); - if (!DFSUtil.isValidName(filterPath)) { - throw new IOException("listPathBasedCacheDirectives: invalid " + - "path name '" + filterPath + "'"); - } + filterPath = validatePath(filter); } if (filter.getReplication() != null) { - throw new IOException("we currently don't support filtering " + - "by replication"); + throw new IOException("Filtering by replication is unsupported."); } ArrayList replies = new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); @@ -481,8 +481,15 @@ public final class CacheManager { !directive.getPath().toUri().getPath().equals(filterPath)) { continue; } - if ((pc == null) || - (pc.checkPermission(curEntry.getPool(), FsAction.READ))) { + boolean hasPermission = true; + if (pc != null) { + try { + pc.checkPermission(curEntry.getPool(), FsAction.READ); + } catch (AccessControlException e) { + hasPermission = false; + } + } + if (hasPermission) { replies.add(cur.getValue().toDirective()); numReplies++; } @@ -505,12 +512,13 @@ public final class CacheManager { String poolName = info.getPoolName(); CachePool pool = cachePools.get(poolName); if (pool != null) { - throw new IOException("cache pool " + poolName + " already exists."); + throw new InvalidRequestException("Cache pool " + poolName + + " already exists."); } pool = CachePool.createFromInfoAndDefaults(info); cachePools.put(pool.getPoolName(), pool); - LOG.info("created new cache pool " + pool); - return pool.getInfo(true); + LOG.info("Created new cache pool " + pool); + return pool.getInfo(null); } /** @@ -528,7 +536,8 @@ public final class CacheManager { String poolName = info.getPoolName(); CachePool pool = cachePools.get(poolName); if (pool == null) { - throw new IOException("cache pool " + poolName + " does not exist."); + throw new InvalidRequestException("Cache pool " + poolName + + " does not exist."); } StringBuilder bld = new StringBuilder(); String prefix = ""; @@ -575,7 +584,8 @@ public final class CacheManager { CachePoolInfo.validateName(poolName); CachePool pool = cachePools.remove(poolName); if (pool == null) { - throw new IOException("can't remove non-existent cache pool " + poolName); + throw new InvalidRequestException( + "Cannot remove non-existent cache pool " + poolName); } // Remove entries using this pool @@ -607,11 +617,7 @@ public final class CacheManager { if (numListed++ >= maxListCachePoolsResponses) { return new BatchedListEntries(results, true); } - if (pc == null) { - results.add(cur.getValue().getInfo(true)); - } else { - results.add(cur.getValue().getInfo(pc)); - } + results.add(cur.getValue().getInfo(pc)); } return new BatchedListEntries(results, false); } @@ -755,7 +761,7 @@ public final class CacheManager { Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); out.writeInt(cachePools.size()); for (CachePool pool: cachePools.values()) { - pool.getInfo(true).writeTo(out); + pool.getInfo(null).writeTo(out); counter.increment(); } prog.endStep(Phase.SAVING_CHECKPOINT, step); Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java Wed Nov 13 01:14:37 2013 @@ -27,6 +27,7 @@ import org.apache.hadoop.classification. import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import com.google.common.base.Preconditions; @@ -162,7 +163,7 @@ public final class CachePool { } /** - * Get information about this cache pool. + * Get either full or partial information about this CachePool. * * @param fullInfo * If true, only the name will be returned (i.e., what you @@ -170,7 +171,7 @@ public final class CachePool { * @return * Cache pool information. */ - public CachePoolInfo getInfo(boolean fullInfo) { + private CachePoolInfo getInfo(boolean fullInfo) { CachePoolInfo info = new CachePoolInfo(poolName); if (!fullInfo) { return info; @@ -181,8 +182,25 @@ public final class CachePool { setWeight(weight); } + /** + * Returns a CachePoolInfo describing this CachePool based on the permissions + * of the calling user. Unprivileged users will see only minimal descriptive + * information about the pool. + * + * @param pc Permission checker to be used to validate the user's permissions, + * or null + * @return CachePoolInfo describing this CachePool + */ public CachePoolInfo getInfo(FSPermissionChecker pc) { - return getInfo(pc.checkPermission(this, FsAction.READ)); + boolean hasPermission = true; + if (pc != null) { + try { + pc.checkPermission(this, FsAction.READ); + } catch (AccessControlException e) { + hasPermission = false; + } + } + return getInfo(hasPermission); } public String toString() { Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Wed Nov 13 01:14:37 2013 @@ -30,7 +30,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -84,7 +83,6 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.util.ChunkedArrayList; import org.apache.hadoop.hdfs.util.Holder; -import org.apache.jasper.tagplugins.jstl.core.Remove; import com.google.common.base.Joiner; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Wed Nov 13 01:14:37 2013 @@ -824,7 +824,12 @@ public class FSImageFormat { final INodesInPath iip = fsDir.getLastINodeInPath(path); INodeFile oldnode = INodeFile.valueOf(iip.getINode(0), path); cons.setLocalName(oldnode.getLocalNameBytes()); - cons.setParent(oldnode.getParent()); + INodeReference parentRef = oldnode.getParentReference(); + if (parentRef != null) { + cons.setParentReference(parentRef); + } else { + cons.setParent(oldnode.getParent()); + } if (oldnode instanceof INodeFileWithSnapshot) { cons = new INodeFileUnderConstructionWithSnapshot(cons, Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Nov 13 01:14:37 2013 @@ -2557,7 +2557,7 @@ public class FSNamesystem implements Nam final INode[] inodes = analyzeFileState( src, fileId, clientName, previous, onRetryBlock).getINodes(); final INodeFileUnderConstruction pendingFile = - (INodeFileUnderConstruction) inodes[inodes.length - 1]; + (INodeFileUnderConstruction) inodes[inodes.length - 1].asFile(); if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) { // This is a retry. Just return the last block if having locations. @@ -2595,7 +2595,7 @@ public class FSNamesystem implements Nam analyzeFileState(src, fileId, clientName, previous, onRetryBlock); INode[] inodes = inodesInPath.getINodes(); final INodeFileUnderConstruction pendingFile = - (INodeFileUnderConstruction) inodes[inodes.length - 1]; + (INodeFileUnderConstruction) inodes[inodes.length - 1].asFile(); if (onRetryBlock[0] != null) { if (onRetryBlock[0].getLocations().length > 0) { Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java Wed Nov 13 01:14:37 2013 @@ -261,24 +261,27 @@ class FSPermissionChecker { * * @param pool CachePool being accessed * @param access type of action being performed on the cache pool - * @return if the pool can be accessed + * @throws AccessControlException if pool cannot be accessed */ - public boolean checkPermission(CachePool pool, FsAction access) { + public void checkPermission(CachePool pool, FsAction access) + throws AccessControlException { FsPermission mode = pool.getMode(); if (isSuperUser()) { - return true; + return; } if (user.equals(pool.getOwnerName()) && mode.getUserAction().implies(access)) { - return true; + return; } if (groups.contains(pool.getGroupName()) && mode.getGroupAction().implies(access)) { - return true; + return; } if (mode.getOtherAction().implies(access)) { - return true; + return; } - return false; + throw new AccessControlException("Permission denied while accessing pool " + + pool.getPoolName() + ": user " + user + " does not have " + + access.toString() + " permissions."); } } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Wed Nov 13 01:14:37 2013 @@ -595,7 +595,15 @@ public class INodeDirectoryWithSnapshot public void replaceChild(final INode oldChild, final INode newChild, final INodeMap inodeMap) { super.replaceChild(oldChild, newChild, inodeMap); - diffs.replaceChild(ListType.CREATED, oldChild, newChild); + if (oldChild.getParentReference() != null && !newChild.isReference()) { + // oldChild is referred by a Reference node. Thus we are replacing the + // referred inode, e.g., + // INodeFileWithSnapshot -> INodeFileUnderConstructionWithSnapshot + // in this case, we do not need to update the diff list + return; + } else { + diffs.replaceChild(ListType.CREATED, oldChild, newChild); + } } /** Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Wed Nov 13 01:14:37 2013 @@ -395,6 +395,7 @@ public class JsonUtil { m.put("startOffset", locatedblock.getStartOffset()); m.put("block", toJsonMap(locatedblock.getBlock())); m.put("locations", toJsonArray(locatedblock.getLocations())); + m.put("cachedLocations", toJsonArray(locatedblock.getCachedLocations())); return m; } @@ -409,8 +410,11 @@ public class JsonUtil { (Object[])m.get("locations")); final long startOffset = (Long)m.get("startOffset"); final boolean isCorrupt = (Boolean)m.get("isCorrupt"); + final DatanodeInfo[] cachedLocations = toDatanodeInfoArray( + (Object[])m.get("cachedLocations")); - final LocatedBlock locatedblock = new LocatedBlock(b, locations, startOffset, isCorrupt); + final LocatedBlock locatedblock = new LocatedBlock(b, locations, + startOffset, isCorrupt, cachedLocations); locatedblock.setBlockToken(toBlockToken((Map)m.get("blockToken"))); return locatedblock; } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Wed Nov 13 01:14:37 2013 @@ -407,23 +407,23 @@ message ListPathBasedCacheDirectivesResp required bool hasMore = 2; } -message AddCachePoolRequestProto { - required string poolName = 1; +message CachePoolInfoProto { + optional string poolName = 1; optional string ownerName = 2; optional string groupName = 3; optional int32 mode = 4; optional int32 weight = 5; } +message AddCachePoolRequestProto { + required CachePoolInfoProto info = 1; +} + message AddCachePoolResponseProto { // void response } message ModifyCachePoolRequestProto { - required string poolName = 1; - optional string ownerName = 2; - optional string groupName = 3; - optional int32 mode = 4; - optional int32 weight = 5; + required CachePoolInfoProto info = 1; } message ModifyCachePoolResponseProto { // void response @@ -446,11 +446,7 @@ message ListCachePoolsResponseProto { } message ListCachePoolsResponseElementProto { - required string poolName = 1; - required string ownerName = 2; - required string groupName = 3; - required int32 mode = 4; - required int32 weight = 5; + required CachePoolInfoProto info = 1; } message GetFileLinkInfoRequestProto { Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java Wed Nov 13 01:14:37 2013 @@ -21,8 +21,6 @@ import static org.junit.Assert.fail; import java.io.IOException; -import junit.framework.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -33,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.L import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java Wed Nov 13 01:14:37 2013 @@ -22,8 +22,6 @@ import static org.junit.Assert.assertEqu import java.io.IOException; import java.net.InetSocketAddress; -import junit.framework.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; @@ -32,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.E import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.security.token.Token; +import org.junit.Assert; import org.junit.Test; import org.mockito.Matchers; import org.mockito.Mockito; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java Wed Nov 13 01:14:37 2013 @@ -23,8 +23,6 @@ import java.io.IOException; import java.io.OutputStream; import java.util.List; -import junit.framework.Assert; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -33,6 +31,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu import org.apache.hadoop.util.ThreadUtil; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java Wed Nov 13 01:14:37 2013 @@ -20,12 +20,11 @@ package org.apache.hadoop.hdfs; import java.io.IOException; import java.util.concurrent.atomic.AtomicReference; -import junit.framework.Assert; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.internal.util.reflection.Whitebox; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileInputStreamCache.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileInputStreamCache.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileInputStreamCache.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileInputStreamCache.java Wed Nov 13 01:14:37 2013 @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.unix.TemporarySocketDirectory; +import org.junit.Assert; import org.junit.Test; import java.io.FileInputStream; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Wed Nov 13 01:14:37 2013 @@ -32,8 +32,6 @@ import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeoutException; -import junit.framework.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -64,6 +62,7 @@ import org.apache.hadoop.test.GenericTes import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java Wed Nov 13 01:14:37 2013 @@ -22,8 +22,6 @@ import java.util.Arrays; import java.util.Map; import java.util.TreeMap; -import junit.framework.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -39,6 +37,7 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheTracker; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Wed Nov 13 01:14:37 2013 @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static junit.framework.Assert.assertTrue; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; @@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.protocolPB import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock.Mlocker; import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand; @@ -87,6 +88,8 @@ public class TestFsDatasetCache { private static DatanodeProtocolClientSideTranslatorPB spyNN; private static PageRounder rounder = new PageRounder(); + private Mlocker mlocker; + @Before public void setUp() throws Exception { assumeTrue(!Path.WINDOWS); @@ -110,6 +113,8 @@ public class TestFsDatasetCache { fsd = dn.getFSDataset(); spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn); + // Save the current mlocker and replace it at the end of the test + mlocker = MappableBlock.mlocker; } @After @@ -120,6 +125,8 @@ public class TestFsDatasetCache { if (cluster != null) { cluster.shutdown(); } + // Restore the original mlocker + MappableBlock.mlocker = mlocker; } private static void setHeartbeatResponse(DatanodeCommand[] cmds) Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java Wed Nov 13 01:14:37 2013 @@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.server.da import java.io.IOException; -import junit.framework.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -34,12 +32,10 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner; import static org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.SLEEP_PERIOD_MS; import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.junit.Assert; import org.junit.Test; -import org.junit.Ignore; import static org.junit.Assert.fail; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java Wed Nov 13 01:14:37 2013 @@ -23,12 +23,11 @@ import static org.apache.hadoop.hdfs.DFS import java.util.ArrayList; import java.util.List; -import junit.framework.Assert; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ReflectionUtils; +import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java Wed Nov 13 01:14:37 2013 @@ -17,42 +17,47 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS; -import static junit.framework.Assert.assertTrue; -import static junit.framework.Assert.fail; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; +import java.nio.MappedByteBuffer; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedList; import java.util.List; -import junit.framework.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; -import org.apache.hadoop.fs.IdNotFoundException; +import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.security.AccessControlException; @@ -77,6 +82,15 @@ public class TestPathBasedCacheRequests static private DistributedFileSystem dfs; static private NamenodeProtocols proto; + static { + MappableBlock.mlocker = new MappableBlock.Mlocker() { + @Override + public void mlock(MappedByteBuffer mmap, long length) throws IOException { + // Stubbed out for testing + } + }; + } + @Before public void setup() throws Exception { conf = new HdfsConfiguration(); @@ -187,15 +201,15 @@ public class TestPathBasedCacheRequests fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { - GenericTestUtils.assertExceptionContains("can't remove " + + GenericTestUtils.assertExceptionContains("Cannot remove " + "non-existent cache pool", ioe); } try { dfs.removeCachePool(poolName); - Assert.fail("expected to get an exception when " + + fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { - GenericTestUtils.assertExceptionContains("can't remove " + + GenericTestUtils.assertExceptionContains("Cannot remove " + "non-existent cache pool", ioe); } try { @@ -272,18 +286,18 @@ public class TestPathBasedCacheRequests try { proto.removeCachePool("pool99"); - Assert.fail("expected to get an exception when " + + fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { - GenericTestUtils.assertExceptionContains("can't remove non-existent", + GenericTestUtils.assertExceptionContains("Cannot remove non-existent", ioe); } try { proto.removeCachePool(poolName); - Assert.fail("expected to get an exception when " + + fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { - GenericTestUtils.assertExceptionContains("can't remove non-existent", + GenericTestUtils.assertExceptionContains("Cannot remove non-existent", ioe); } @@ -351,8 +365,8 @@ public class TestPathBasedCacheRequests setPool("no_such_pool"). build()); fail("expected an error when adding to a non-existent pool."); - } catch (IdNotFoundException ioe) { - GenericTestUtils.assertExceptionContains("no such pool as", ioe); + } catch (InvalidRequestException ioe) { + GenericTestUtils.assertExceptionContains("Unknown pool", ioe); } try { @@ -364,7 +378,7 @@ public class TestPathBasedCacheRequests "mode 0 (no permissions for anyone)."); } catch (AccessControlException e) { GenericTestUtils. - assertExceptionContains("permission denied for pool", e); + assertExceptionContains("Permission denied while accessing pool", e); } try { @@ -384,10 +398,10 @@ public class TestPathBasedCacheRequests setReplication((short)1). setPool(""). build()); - Assert.fail("expected an error when adding a PathBasedCache " + + fail("expected an error when adding a PathBasedCache " + "directive with an empty pool name."); - } catch (IdNotFoundException e) { - GenericTestUtils.assertExceptionContains("pool name was empty", e); + } catch (InvalidRequestException e) { + GenericTestUtils.assertExceptionContains("Invalid empty pool name", e); } long deltaId = addAsUnprivileged(delta); @@ -405,7 +419,7 @@ public class TestPathBasedCacheRequests validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId ); iter = dfs.listPathBasedCacheDirectives( new PathBasedCacheDirective.Builder().setPool("pool3").build()); - Assert.assertFalse(iter.hasNext()); + assertFalse(iter.hasNext()); iter = dfs.listPathBasedCacheDirectives( new PathBasedCacheDirective.Builder().setPool("pool1").build()); validateListAll(iter, alphaId, alphaId2, deltaId, relativeId ); @@ -416,27 +430,27 @@ public class TestPathBasedCacheRequests dfs.removePathBasedCacheDirective(betaId); iter = dfs.listPathBasedCacheDirectives( new PathBasedCacheDirective.Builder().setPool("pool2").build()); - Assert.assertFalse(iter.hasNext()); + assertFalse(iter.hasNext()); try { dfs.removePathBasedCacheDirective(betaId); - Assert.fail("expected an error when removing a non-existent ID"); - } catch (IdNotFoundException e) { - GenericTestUtils.assertExceptionContains("id not found", e); + fail("expected an error when removing a non-existent ID"); + } catch (InvalidRequestException e) { + GenericTestUtils.assertExceptionContains("No directive with ID", e); } try { proto.removePathBasedCacheDirective(-42l); - Assert.fail("expected an error when removing a negative ID"); - } catch (IdNotFoundException e) { + fail("expected an error when removing a negative ID"); + } catch (InvalidRequestException e) { GenericTestUtils.assertExceptionContains( - "invalid non-positive directive ID", e); + "Invalid negative ID", e); } try { proto.removePathBasedCacheDirective(43l); - Assert.fail("expected an error when removing a non-existent ID"); - } catch (IdNotFoundException e) { - GenericTestUtils.assertExceptionContains("id not found", e); + fail("expected an error when removing a non-existent ID"); + } catch (InvalidRequestException e) { + GenericTestUtils.assertExceptionContains("No directive with ID", e); } dfs.removePathBasedCacheDirective(alphaId); @@ -529,6 +543,14 @@ public class TestPathBasedCacheRequests assertFalse("Unexpected # of cache directives found", dit.hasNext()); } + /** + * Wait for the NameNode to have an expected number of cached blocks + * and replicas. + * @param nn NameNode + * @param expectedCachedBlocks + * @param expectedCachedReplicas + * @throws Exception + */ private static void waitForCachedBlocks(NameNode nn, final int expectedCachedBlocks, final int expectedCachedReplicas) throws Exception { @@ -569,6 +591,37 @@ public class TestPathBasedCacheRequests }, 500, 60000); } + private static void checkNumCachedReplicas(final DistributedFileSystem dfs, + final List paths, final int expectedBlocks, + final int expectedReplicas) + throws Exception { + int numCachedBlocks = 0; + int numCachedReplicas = 0; + for (Path p: paths) { + final FileStatus f = dfs.getFileStatus(p); + final long len = f.getLen(); + final long blockSize = f.getBlockSize(); + // round it up to full blocks + final long numBlocks = (len + blockSize - 1) / blockSize; + BlockLocation[] locs = dfs.getFileBlockLocations(p, 0, len); + assertEquals("Unexpected number of block locations for path " + p, + numBlocks, locs.length); + for (BlockLocation l: locs) { + if (l.getCachedHosts().length > 0) { + numCachedBlocks++; + } + numCachedReplicas += l.getCachedHosts().length; + } + } + LOG.info("Found " + numCachedBlocks + " of " + expectedBlocks + " blocks"); + LOG.info("Found " + numCachedReplicas + " of " + expectedReplicas + + " replicas"); + assertEquals("Unexpected number of cached blocks", expectedBlocks, + numCachedBlocks); + assertEquals("Unexpected number of cached replicas", expectedReplicas, + numCachedReplicas); + } + private static final long BLOCK_SIZE = 512; private static final int NUM_DATANODES = 4; @@ -745,4 +798,110 @@ public class TestPathBasedCacheRequests } } + /** + * Tests stepping the cache replication factor up and down, checking the + * number of cached replicas and blocks as well as the advertised locations. + * @throws Exception + */ + @Test(timeout=120000) + public void testReplicationFactor() throws Exception { + Assume.assumeTrue(canTestDatanodeCaching()); + HdfsConfiguration conf = createCachingConf(); + MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build(); + + try { + cluster.waitActive(); + DistributedFileSystem dfs = cluster.getFileSystem(); + NameNode namenode = cluster.getNameNode(); + // Create the pool + final String pool = "friendlyPool"; + dfs.addCachePool(new CachePoolInfo(pool)); + // Create some test files + final List paths = new LinkedList(); + paths.add(new Path("/foo/bar")); + paths.add(new Path("/foo/baz")); + paths.add(new Path("/foo2/bar2")); + paths.add(new Path("/foo2/baz2")); + dfs.mkdir(new Path("/foo"), FsPermission.getDirDefault()); + dfs.mkdir(new Path("/foo2"), FsPermission.getDirDefault()); + final int numBlocksPerFile = 2; + for (Path path : paths) { + FileSystemTestHelper.createFile(dfs, path, numBlocksPerFile, + (int)BLOCK_SIZE, (short)3, false); + } + waitForCachedBlocks(namenode, 0, 0); + checkNumCachedReplicas(dfs, paths, 0, 0); + // cache directory + long id = dfs.addPathBasedCacheDirective( + new PathBasedCacheDirective.Builder(). + setPath(new Path("/foo")). + setReplication((short)1). + setPool(pool). + build()); + waitForCachedBlocks(namenode, 4, 4); + checkNumCachedReplicas(dfs, paths, 4, 4); + // step up the replication factor + for (int i=2; i<=3; i++) { + dfs.modifyPathBasedCacheDirective( + new PathBasedCacheDirective.Builder(). + setId(id). + setReplication((short)i). + build()); + waitForCachedBlocks(namenode, 4, 4*i); + checkNumCachedReplicas(dfs, paths, 4, 4*i); + } + // step it down + for (int i=2; i>=1; i--) { + dfs.modifyPathBasedCacheDirective( + new PathBasedCacheDirective.Builder(). + setId(id). + setReplication((short)i). + build()); + waitForCachedBlocks(namenode, 4, 4*i); + checkNumCachedReplicas(dfs, paths, 4, 4*i); + } + // remove and watch numCached go to 0 + dfs.removePathBasedCacheDirective(id); + waitForCachedBlocks(namenode, 0, 0); + checkNumCachedReplicas(dfs, paths, 0, 0); + } finally { + cluster.shutdown(); + } + } + + @Test(timeout=60000) + public void testListCachePoolPermissions() throws Exception { + final UserGroupInformation myUser = UserGroupInformation + .createRemoteUser("myuser"); + final DistributedFileSystem myDfs = + (DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser, conf); + final String poolName = "poolparty"; + dfs.addCachePool(new CachePoolInfo(poolName) + .setMode(new FsPermission((short)0700))); + // Should only see partial info + RemoteIterator it = myDfs.listCachePools(); + CachePoolInfo info = it.next(); + assertFalse(it.hasNext()); + assertEquals("Expected pool name", poolName, info.getPoolName()); + assertNull("Unexpected owner name", info.getOwnerName()); + assertNull("Unexpected group name", info.getGroupName()); + assertNull("Unexpected mode", info.getMode()); + assertNull("Unexpected weight", info.getWeight()); + // Modify the pool so myuser is now the owner + dfs.modifyCachePool(new CachePoolInfo(poolName) + .setOwnerName(myUser.getShortUserName()) + .setWeight(99)); + // Should see full info + it = myDfs.listCachePools(); + info = it.next(); + assertFalse(it.hasNext()); + assertEquals("Expected pool name", poolName, info.getPoolName()); + assertEquals("Mismatched owner name", myUser.getShortUserName(), + info.getOwnerName()); + assertNotNull("Expected group name", info.getGroupName()); + assertEquals("Mismatched mode", (short) 0700, + info.getMode().toShort()); + assertEquals("Mismatched weight", 99, (int)info.getWeight()); + } } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Wed Nov 13 01:14:37 2013 @@ -29,6 +29,7 @@ import static org.mockito.Mockito.spy; import java.io.File; import java.io.IOException; +import java.util.EnumSet; import java.util.List; import java.util.Random; @@ -40,9 +41,12 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @@ -102,6 +106,7 @@ public class TestRenameWithSnapshots { @Before public void setUp() throws Exception { + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).format(true) .build(); cluster.waitActive(); @@ -2289,4 +2294,49 @@ public class TestRenameWithSnapshots { assertEquals(0, diff.getChildrenDiff().getList(ListType.DELETED).size()); assertEquals(0, diff.getChildrenDiff().getList(ListType.CREATED).size()); } + + /** + * Rename of the underconstruction file in snapshot should not fail NN restart + * after checkpoint. Unit test for HDFS-5425. + */ + @Test + public void testRenameUCFileInSnapshot() throws Exception { + final Path test = new Path("/test"); + final Path foo = new Path(test, "foo"); + final Path bar = new Path(foo, "bar"); + hdfs.mkdirs(foo); + // create a file and keep it as underconstruction. + hdfs.create(bar); + SnapshotTestHelper.createSnapshot(hdfs, test, "s0"); + // rename bar --> bar2 + final Path bar2 = new Path(foo, "bar2"); + hdfs.rename(bar, bar2); + + // save namespace and restart + restartClusterAndCheckImage(true); + } + + /** + * Similar with testRenameUCFileInSnapshot, but do renaming first and then + * append file without closing it. Unit test for HDFS-5425. + */ + @Test + public void testAppendFileAfterRenameInSnapshot() throws Exception { + final Path test = new Path("/test"); + final Path foo = new Path(test, "foo"); + final Path bar = new Path(foo, "bar"); + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED); + SnapshotTestHelper.createSnapshot(hdfs, test, "s0"); + // rename bar --> bar2 + final Path bar2 = new Path(foo, "bar2"); + hdfs.rename(bar, bar2); + // append file and keep it as underconstruction. + FSDataOutputStream out = hdfs.append(bar2); + out.writeByte(0); + ((DFSOutputStream) out.getWrappedStream()).hsync( + EnumSet.of(SyncFlag.UPDATE_LENGTH)); + + // save namespace and restart + restartClusterAndCheckImage(true); + } } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java?rev=1541342&r1=1541341&r2=1541342&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java Wed Nov 13 01:14:37 2013 @@ -17,9 +17,8 @@ */ package org.apache.hadoop.hdfs.util; -import junit.framework.Assert; - import org.apache.hadoop.hdfs.util.XMLUtils.UnmanglingError; +import org.junit.Assert; import org.junit.Test; public class TestXMLUtils {