Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E372510558 for ; Fri, 8 Nov 2013 01:44:50 +0000 (UTC) Received: (qmail 83953 invoked by uid 500); 8 Nov 2013 01:44:50 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 83925 invoked by uid 500); 8 Nov 2013 01:44:50 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 83917 invoked by uid 99); 8 Nov 2013 01:44:50 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 08 Nov 2013 01:44:50 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 08 Nov 2013 01:44:48 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id E6BDF2388B4E; Fri, 8 Nov 2013 01:44:27 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1539898 [3/3] - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/ hadoop-hdfs-nfs/src/test/java/org/apache/had... Date: Fri, 08 Nov 2013 01:44:26 -0000 To: hdfs-commits@hadoop.apache.org From: arp@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20131108014427.E6BDF2388B4E@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java Fri Nov 8 01:44:24 2013 @@ -42,6 +42,7 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.IdNotFoundException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; @@ -49,17 +50,12 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.GSet; @@ -86,7 +82,7 @@ public class TestPathBasedCacheRequests conf = new HdfsConfiguration(); // set low limits here for testing purposes conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES, 2); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); @@ -296,21 +292,21 @@ public class TestPathBasedCacheRequests } private static void validateListAll( - RemoteIterator iter, - PathBasedCacheDescriptor... descriptors) throws Exception { - for (PathBasedCacheDescriptor descriptor: descriptors) { + RemoteIterator iter, + Long... ids) throws Exception { + for (Long id: ids) { assertTrue("Unexpectedly few elements", iter.hasNext()); - assertEquals("Unexpected descriptor", descriptor, iter.next()); + assertEquals("Unexpected directive ID", id, iter.next().getId()); } assertFalse("Unexpectedly many list elements", iter.hasNext()); } - private static PathBasedCacheDescriptor addAsUnprivileged( + private static long addAsUnprivileged( final PathBasedCacheDirective directive) throws Exception { return unprivilegedUser - .doAs(new PrivilegedExceptionAction() { + .doAs(new PrivilegedExceptionAction() { @Override - public PathBasedCacheDescriptor run() throws IOException { + public Long run() throws IOException { DistributedFileSystem myDfs = (DistributedFileSystem) FileSystem.get(conf); return myDfs.addPathBasedCacheDirective(directive); @@ -342,12 +338,12 @@ public class TestPathBasedCacheRequests setPool("pool1"). build(); - PathBasedCacheDescriptor alphaD = addAsUnprivileged(alpha); - PathBasedCacheDescriptor alphaD2 = addAsUnprivileged(alpha); - assertFalse("Expected to get unique descriptors when re-adding an " + long alphaId = addAsUnprivileged(alpha); + long alphaId2 = addAsUnprivileged(alpha); + assertFalse("Expected to get unique directives when re-adding an " + "existing PathBasedCacheDirective", - alphaD.getEntryId() == alphaD2.getEntryId()); - PathBasedCacheDescriptor betaD = addAsUnprivileged(beta); + alphaId == alphaId2); + long betaId = addAsUnprivileged(beta); try { addAsUnprivileged(new PathBasedCacheDirective.Builder(). @@ -355,8 +351,8 @@ public class TestPathBasedCacheRequests setPool("no_such_pool"). build()); fail("expected an error when adding to a non-existent pool."); - } catch (IOException ioe) { - assertTrue(ioe instanceof InvalidPoolNameError); + } catch (IdNotFoundException ioe) { + GenericTestUtils.assertExceptionContains("no such pool as", ioe); } try { @@ -366,8 +362,9 @@ public class TestPathBasedCacheRequests build()); fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone)."); - } catch (IOException ioe) { - assertTrue(ioe instanceof PoolWritePermissionDeniedError); + } catch (AccessControlException e) { + GenericTestUtils. + assertExceptionContains("permission denied for pool", e); } try { @@ -378,7 +375,7 @@ public class TestPathBasedCacheRequests fail("expected an error when adding a malformed path " + "to the cache directives."); } catch (IllegalArgumentException e) { - // expected + GenericTestUtils.assertExceptionContains("is not a valid DFS filename", e); } try { @@ -389,59 +386,74 @@ public class TestPathBasedCacheRequests build()); Assert.fail("expected an error when adding a PathBasedCache " + "directive with an empty pool name."); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof InvalidPoolNameError); + } catch (IdNotFoundException e) { + GenericTestUtils.assertExceptionContains("pool name was empty", e); } - PathBasedCacheDescriptor deltaD = addAsUnprivileged(delta); + long deltaId = addAsUnprivileged(delta); // We expect the following to succeed, because DistributedFileSystem // qualifies the path. - PathBasedCacheDescriptor relativeD = addAsUnprivileged( + long relativeId = addAsUnprivileged( new PathBasedCacheDirective.Builder(). setPath(new Path("relative")). setPool("pool1"). build()); - RemoteIterator iter; - iter = dfs.listPathBasedCacheDescriptors(null, null); - validateListAll(iter, alphaD, alphaD2, betaD, deltaD, relativeD); - iter = dfs.listPathBasedCacheDescriptors("pool3", null); + RemoteIterator iter; + iter = dfs.listPathBasedCacheDirectives(null); + validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId ); + iter = dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder().setPool("pool3").build()); Assert.assertFalse(iter.hasNext()); - iter = dfs.listPathBasedCacheDescriptors("pool1", null); - validateListAll(iter, alphaD, alphaD2, deltaD, relativeD); - iter = dfs.listPathBasedCacheDescriptors("pool2", null); - validateListAll(iter, betaD); - - dfs.removePathBasedCacheDescriptor(betaD); - iter = dfs.listPathBasedCacheDescriptors("pool2", null); + iter = dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder().setPool("pool1").build()); + validateListAll(iter, alphaId, alphaId2, deltaId, relativeId ); + iter = dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder().setPool("pool2").build()); + validateListAll(iter, betaId); + + dfs.removePathBasedCacheDirective(betaId); + iter = dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder().setPool("pool2").build()); Assert.assertFalse(iter.hasNext()); try { - dfs.removePathBasedCacheDescriptor(betaD); + dfs.removePathBasedCacheDirective(betaId); Assert.fail("expected an error when removing a non-existent ID"); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof NoSuchIdException); + } catch (IdNotFoundException e) { + GenericTestUtils.assertExceptionContains("id not found", e); } try { - proto.removePathBasedCacheDescriptor(-42l); + proto.removePathBasedCacheDirective(-42l); Assert.fail("expected an error when removing a negative ID"); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof InvalidIdException); + } catch (IdNotFoundException e) { + GenericTestUtils.assertExceptionContains( + "invalid non-positive directive ID", e); } try { - proto.removePathBasedCacheDescriptor(43l); + proto.removePathBasedCacheDirective(43l); Assert.fail("expected an error when removing a non-existent ID"); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof NoSuchIdException); + } catch (IdNotFoundException e) { + GenericTestUtils.assertExceptionContains("id not found", e); } - dfs.removePathBasedCacheDescriptor(alphaD); - dfs.removePathBasedCacheDescriptor(alphaD2); - dfs.removePathBasedCacheDescriptor(deltaD); - dfs.removePathBasedCacheDescriptor(relativeD); - iter = dfs.listPathBasedCacheDescriptors(null, null); + dfs.removePathBasedCacheDirective(alphaId); + dfs.removePathBasedCacheDirective(alphaId2); + dfs.removePathBasedCacheDirective(deltaId); + + dfs.modifyPathBasedCacheDirective(new PathBasedCacheDirective.Builder(). + setId(relativeId). + setReplication((short)555). + build()); + iter = dfs.listPathBasedCacheDirectives(null); + assertTrue(iter.hasNext()); + PathBasedCacheDirective modified = iter.next(); + assertEquals(relativeId, modified.getId().longValue()); + assertEquals((short)555, modified.getReplication().shortValue()); + dfs.removePathBasedCacheDirective(relativeId); + iter = dfs.listPathBasedCacheDirectives(null); assertFalse(iter.hasNext()); } @@ -481,16 +493,16 @@ public class TestPathBasedCacheRequests new PathBasedCacheDirective.Builder(). setPath(new Path(entryPrefix + i)).setPool(pool).build()); } - RemoteIterator dit - = dfs.listPathBasedCacheDescriptors(null, null); + RemoteIterator dit + = dfs.listPathBasedCacheDirectives(null); for (int i=0; i entries = - nnRpc.listPathBasedCacheDescriptors(0, null, null); + RemoteIterator entries = + nnRpc.listPathBasedCacheDirectives(0, null); for (int i=0; i cacheSet = (LightWeightCache) fsn0.getRetryCache().getCacheSet(); - assertEquals(19, cacheSet.size()); + assertEquals(20, cacheSet.size()); Map oldEntries = new HashMap(); @@ -172,7 +171,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn1 = cluster.getNamesystem(1); cacheSet = (LightWeightCache) fsn1 .getRetryCache().getCacheSet(); - assertEquals(19, cacheSet.size()); + assertEquals(20, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); @@ -740,35 +739,34 @@ public class TestRetryCacheWithHA { /** addPathBasedCacheDirective */ class AddPathBasedCacheDirectiveOp extends AtMostOnceOp { - private String pool; - private String path; - private PathBasedCacheDescriptor descriptor; + private PathBasedCacheDirective directive; + private Long result; - AddPathBasedCacheDirectiveOp(DFSClient client, String pool, String path) { + AddPathBasedCacheDirectiveOp(DFSClient client, + PathBasedCacheDirective directive) { super("addPathBasedCacheDirective", client); - this.pool = pool; - this.path = path; + this.directive = directive; } @Override void prepare() throws Exception { - dfs.addCachePool(new CachePoolInfo(pool)); + dfs.addCachePool(new CachePoolInfo(directive.getPool())); } @Override void invoke() throws Exception { - descriptor = client.addPathBasedCacheDirective( - new PathBasedCacheDirective.Builder(). - setPath(new Path(path)). - setPool(pool). - build()); + result = client.addPathBasedCacheDirective(directive); } @Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { - RemoteIterator iter = - dfs.listPathBasedCacheDescriptors(pool, new Path(path)); + RemoteIterator iter = + dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder(). + setPool(directive.getPool()). + setPath(directive.getPath()). + build()); if (iter.hasNext()) { return true; } @@ -779,43 +777,99 @@ public class TestRetryCacheWithHA { @Override Object getResult() { - return descriptor; + return result; } } - /** removePathBasedCacheDescriptor */ - class RemovePathBasedCacheDescriptorOp extends AtMostOnceOp { - private String pool; - private String path; - private PathBasedCacheDescriptor descriptor; + /** modifyPathBasedCacheDirective */ + class ModifyPathBasedCacheDirectiveOp extends AtMostOnceOp { + private final PathBasedCacheDirective directive; + private final short newReplication; + private long id; - RemovePathBasedCacheDescriptorOp(DFSClient client, String pool, - String path) { - super("removePathBasedCacheDescriptor", client); - this.pool = pool; - this.path = path; + ModifyPathBasedCacheDirectiveOp(DFSClient client, + PathBasedCacheDirective directive, short newReplication) { + super("modifyPathBasedCacheDirective", client); + this.directive = directive; + this.newReplication = newReplication; } @Override void prepare() throws Exception { - dfs.addCachePool(new CachePoolInfo(pool)); - descriptor = dfs.addPathBasedCacheDirective( + dfs.addCachePool(new CachePoolInfo(directive.getPool())); + id = client.addPathBasedCacheDirective(directive); + } + + @Override + void invoke() throws Exception { + client.modifyPathBasedCacheDirective( new PathBasedCacheDirective.Builder(). - setPath(new Path(path)). - setPool(pool). + setId(id). + setReplication(newReplication). build()); } @Override + boolean checkNamenodeBeforeReturn() throws Exception { + for (int i = 0; i < CHECKTIMES; i++) { + RemoteIterator iter = + dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder(). + setPool(directive.getPool()). + setPath(directive.getPath()). + build()); + while (iter.hasNext()) { + PathBasedCacheDirective result = iter.next(); + if ((result.getId() == id) && + (result.getReplication().shortValue() == newReplication)) { + return true; + } + } + Thread.sleep(1000); + } + return false; + } + + @Override + Object getResult() { + return null; + } + } + + /** removePathBasedCacheDirective */ + class RemovePathBasedCacheDirectiveOp extends AtMostOnceOp { + private PathBasedCacheDirective directive; + private long id; + + RemovePathBasedCacheDirectiveOp(DFSClient client, String pool, + String path) { + super("removePathBasedCacheDirective", client); + this.directive = new PathBasedCacheDirective.Builder(). + setPool(pool). + setPath(new Path(path)). + build(); + } + + @Override + void prepare() throws Exception { + dfs.addCachePool(new CachePoolInfo(directive.getPool())); + id = dfs.addPathBasedCacheDirective(directive); + } + + @Override void invoke() throws Exception { - client.removePathBasedCacheDescriptor(descriptor.getEntryId()); + client.removePathBasedCacheDirective(id); } @Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { - RemoteIterator iter = - dfs.listPathBasedCacheDescriptors(pool, new Path(path)); + RemoteIterator iter = + dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder(). + setPool(directive.getPool()). + setPath(directive.getPath()). + build()); if (!iter.hasNext()) { return true; } @@ -1020,14 +1074,30 @@ public class TestRetryCacheWithHA { @Test (timeout=60000) public void testAddPathBasedCacheDirective() throws Exception { DFSClient client = genClientWithDummyHandler(); - AtMostOnceOp op = new AddPathBasedCacheDirectiveOp(client, "pool", "/path"); + AtMostOnceOp op = new AddPathBasedCacheDirectiveOp(client, + new PathBasedCacheDirective.Builder(). + setPool("pool"). + setPath(new Path("/path")). + build()); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testModifyPathBasedCacheDirective() throws Exception { + DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new ModifyPathBasedCacheDirectiveOp(client, + new PathBasedCacheDirective.Builder(). + setPool("pool"). + setPath(new Path("/path")). + setReplication((short)1).build(), + (short)555); testClientRetryWithFailover(op); } @Test (timeout=60000) public void testRemovePathBasedCacheDescriptor() throws Exception { DFSClient client = genClientWithDummyHandler(); - AtMostOnceOp op = new RemovePathBasedCacheDescriptorOp(client, "pool", + AtMostOnceOp op = new RemovePathBasedCacheDirectiveOp(client, "pool", "/path"); testClientRetryWithFailover(op); } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java Fri Nov 8 01:44:24 2013 @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; @@ -274,4 +275,76 @@ public class TestSnapshotBlocksMap { "s2/bar"); DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath)); } + + /** + * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot + */ + @Test + public void testDeletionWithZeroSizeBlock() throws Exception { + final Path foo = new Path("/foo"); + final Path bar = new Path(foo, "bar"); + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); + + SnapshotTestHelper.createSnapshot(hdfs, foo, "s0"); + hdfs.append(bar); + + INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); + BlockInfo[] blks = barNode.getBlocks(); + assertEquals(1, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); + cluster.getNameNodeRpc() + .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, + null, barNode.getId(), null); + + SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); + + barNode = fsdir.getINode4Write(bar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(2, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + assertEquals(0, blks[1].getNumBytes()); + + hdfs.delete(bar, true); + final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", + bar.getName()); + barNode = fsdir.getINode(sbar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(1, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + } + + /** Make sure we delete 0-sized block when deleting an INodeFileUC */ + @Test + public void testDeletionWithZeroSizeBlock2() throws Exception { + final Path foo = new Path("/foo"); + final Path subDir = new Path(foo, "sub"); + final Path bar = new Path(subDir, "bar"); + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); + + hdfs.append(bar); + + INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); + BlockInfo[] blks = barNode.getBlocks(); + assertEquals(1, blks.length); + ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); + cluster.getNameNodeRpc() + .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, + null, barNode.getId(), null); + + SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); + + barNode = fsdir.getINode4Write(bar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(2, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + assertEquals(0, blks[1].getNumBytes()); + + hdfs.delete(subDir, true); + final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar"); + barNode = fsdir.getINode(sbar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(1, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + } } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== Binary files - no diff available. Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Fri Nov 8 01:44:24 2013 @@ -843,6 +843,7 @@ OP_ADD_PATH_BASED_CACHE_DIRECTIVE 63 + 1 /bar 1 poolparty @@ -851,10 +852,20 @@ - OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR + OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE 64 1 + 2 + + -2 + + + + OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE + + 65 + 1 27ac79f0-d378-4933-824b-c2a188968d97 78 @@ -862,7 +873,7 @@ OP_REMOVE_CACHE_POOL - 65 + 66 poolparty 27ac79f0-d378-4933-824b-c2a188968d97 79 @@ -871,7 +882,7 @@ OP_ADD - 66 + 67 0 16393 /hard-lease-recovery-test @@ -893,21 +904,21 @@ OP_ALLOCATE_BLOCK_ID - 67 + 68 1073741834 OP_SET_GENSTAMP_V2 - 68 + 69 1010 OP_UPDATE_BLOCKS - 69 + 70 /hard-lease-recovery-test 1073741834 @@ -921,7 +932,7 @@ OP_UPDATE_BLOCKS - 70 + 71 /hard-lease-recovery-test 1073741834 @@ -935,14 +946,14 @@ OP_SET_GENSTAMP_V2 - 71 + 72 1011 OP_REASSIGN_LEASE - 72 + 73 DFSClient_NONMAPREDUCE_-134124999_1 /hard-lease-recovery-test HDFS_NameNode @@ -951,14 +962,14 @@ OP_SET_GENSTAMP_V2 - 73 + 74 1012 OP_REASSIGN_LEASE - 74 + 75 HDFS_NameNode /hard-lease-recovery-test HDFS_NameNode @@ -967,7 +978,7 @@ OP_CLOSE - 75 + 76 0 0 /hard-lease-recovery-test @@ -992,7 +1003,7 @@ OP_END_LOG_SEGMENT - 76 + 77 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml Fri Nov 8 01:44:24 2013 @@ -358,5 +358,20 @@ + + Testing the help usage + + -help -addPool + + + + + + SubstringComparator + Add a new cache pool. + + + +