Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 6C63A200D27 for ; Tue, 10 Oct 2017 19:40:46 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 6ACBD160BE0; Tue, 10 Oct 2017 17:40:46 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 1318A1609CB for ; Tue, 10 Oct 2017 19:40:44 +0200 (CEST) Received: (qmail 69050 invoked by uid 500); 10 Oct 2017 17:40:44 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 69041 invoked by uid 99); 10 Oct 2017 17:40:44 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 10 Oct 2017 17:40:44 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 1534AF5771; Tue, 10 Oct 2017 17:40:44 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: jitendra@apache.org To: common-commits@hadoop.apache.org Date: Tue, 10 Oct 2017 17:40:44 -0000 Message-Id: <974b11e7bd284848963c3fa4fc054eac@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [1/2] hadoop git commit: HDFS-11575. Supporting HDFS NFS gateway with Federated HDFS. Contributed by Mukul Kumar Singh. archived-at: Tue, 10 Oct 2017 17:40:46 -0000 Repository: hadoop Updated Branches: refs/heads/trunk ec8bf9e48 -> d6602b5f3 http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6602b5f/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java index f308763..30ecc0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java @@ -186,7 +186,8 @@ public class TestRpcProgramNfs3 { public void testGetattr() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); XDR xdr_req = new XDR(); GETATTR3Request req = new GETATTR3Request(handle); req.serialize(xdr_req); @@ -209,8 +210,9 @@ public class TestRpcProgramNfs3 { public void testSetattr() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); + int namenodeId = Nfs3Utils.getNamenodeId(config); XDR xdr_req = new XDR(); - FileHandle handle = new FileHandle(dirId); + FileHandle handle = new FileHandle(dirId, namenodeId); SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null, EnumSet.of(SetAttrField.UID)); SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null); @@ -234,7 +236,8 @@ public class TestRpcProgramNfs3 { public void testLookup() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar"); XDR xdr_req = new XDR(); lookupReq.serialize(xdr_req); @@ -257,7 +260,8 @@ public class TestRpcProgramNfs3 { public void testAccess() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); XDR xdr_req = new XDR(); ACCESS3Request req = new ACCESS3Request(handle); req.serialize(xdr_req); @@ -281,8 +285,9 @@ public class TestRpcProgramNfs3 { // Create a symlink first. HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); + int namenodeId = Nfs3Utils.getNamenodeId(config); XDR xdr_req = new XDR(); - FileHandle handle = new FileHandle(dirId); + FileHandle handle = new FileHandle(dirId, namenodeId); SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(), "bar"); req.serialize(xdr_req); @@ -316,7 +321,8 @@ public class TestRpcProgramNfs3 { public void testRead() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); READ3Request readReq = new READ3Request(handle, 0, 5); XDR xdr_req = new XDR(); @@ -373,7 +379,8 @@ public class TestRpcProgramNfs3 { final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName); final long dirId = status.getFileId(); - final FileHandle handle = new FileHandle(dirId); + final int namenodeId = Nfs3Utils.getNamenodeId(config); + final FileHandle handle = new FileHandle(dirId, namenodeId); final WRITE3Request writeReq = new WRITE3Request(handle, 0, buffer.length, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer)); @@ -390,7 +397,8 @@ public class TestRpcProgramNfs3 { throws Exception { final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName); final long dirId = status.getFileId(); - final FileHandle handle = new FileHandle(dirId); + final int namenodeId = Nfs3Utils.getNamenodeId(config); + final FileHandle handle = new FileHandle(dirId, namenodeId); final READ3Request readReq = new READ3Request(handle, 0, len); final XDR xdr_req = new XDR(); @@ -422,7 +430,8 @@ public class TestRpcProgramNfs3 { private void commit(String fileName, int len) throws Exception { final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName); final long dirId = status.getFileId(); - final FileHandle handle = new FileHandle(dirId); + final int namenodeId = Nfs3Utils.getNamenodeId(config); + final FileHandle handle = new FileHandle(dirId, namenodeId); final XDR xdr_req = new XDR(); final COMMIT3Request req = new COMMIT3Request(handle, 0, len); req.serialize(xdr_req); @@ -439,7 +448,8 @@ public class TestRpcProgramNfs3 { public void testWrite() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); byte[] buffer = new byte[10]; for (int i = 0; i < 10; i++) { @@ -469,8 +479,9 @@ public class TestRpcProgramNfs3 { public void testCreate() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); + int namenodeId = Nfs3Utils.getNamenodeId(config); XDR xdr_req = new XDR(); - FileHandle handle = new FileHandle(dirId); + FileHandle handle = new FileHandle(dirId, namenodeId); CREATE3Request req = new CREATE3Request(handle, "fubar", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0); req.serialize(xdr_req); @@ -493,8 +504,9 @@ public class TestRpcProgramNfs3 { public void testMkdir() throws Exception {//FixME HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); + int namenodeId = Nfs3Utils.getNamenodeId(config); XDR xdr_req = new XDR(); - FileHandle handle = new FileHandle(dirId); + FileHandle handle = new FileHandle(dirId, namenodeId); MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3()); req.serialize(xdr_req); @@ -520,8 +532,9 @@ public class TestRpcProgramNfs3 { public void testSymlink() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); + int namenodeId = Nfs3Utils.getNamenodeId(config); XDR xdr_req = new XDR(); - FileHandle handle = new FileHandle(dirId); + FileHandle handle = new FileHandle(dirId, namenodeId); SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(), "bar"); req.serialize(xdr_req); @@ -544,8 +557,9 @@ public class TestRpcProgramNfs3 { public void testRemove() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); + int namenodeId = Nfs3Utils.getNamenodeId(config); XDR xdr_req = new XDR(); - FileHandle handle = new FileHandle(dirId); + FileHandle handle = new FileHandle(dirId, namenodeId); REMOVE3Request req = new REMOVE3Request(handle, "bar"); req.serialize(xdr_req); @@ -567,8 +581,9 @@ public class TestRpcProgramNfs3 { public void testRmdir() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); + int namenodeId = Nfs3Utils.getNamenodeId(config); XDR xdr_req = new XDR(); - FileHandle handle = new FileHandle(dirId); + FileHandle handle = new FileHandle(dirId, namenodeId); RMDIR3Request req = new RMDIR3Request(handle, "foo"); req.serialize(xdr_req); @@ -590,8 +605,9 @@ public class TestRpcProgramNfs3 { public void testRename() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); + int namenodeId = Nfs3Utils.getNamenodeId(config); XDR xdr_req = new XDR(); - FileHandle handle = new FileHandle(dirId); + FileHandle handle = new FileHandle(dirId, namenodeId); RENAME3Request req = new RENAME3Request(handle, "bar", handle, "fubar"); req.serialize(xdr_req); @@ -613,7 +629,8 @@ public class TestRpcProgramNfs3 { public void testReaddir() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); XDR xdr_req = new XDR(); READDIR3Request req = new READDIR3Request(handle, 0, 0, 100); req.serialize(xdr_req); @@ -636,7 +653,8 @@ public class TestRpcProgramNfs3 { public void testReaddirplus() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); XDR xdr_req = new XDR(); READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2); req.serialize(xdr_req); @@ -659,7 +677,8 @@ public class TestRpcProgramNfs3 { public void testFsstat() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); XDR xdr_req = new XDR(); FSSTAT3Request req = new FSSTAT3Request(handle); req.serialize(xdr_req); @@ -682,7 +701,8 @@ public class TestRpcProgramNfs3 { public void testFsinfo() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); XDR xdr_req = new XDR(); FSINFO3Request req = new FSINFO3Request(handle); req.serialize(xdr_req); @@ -705,7 +725,8 @@ public class TestRpcProgramNfs3 { public void testPathconf() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); XDR xdr_req = new XDR(); PATHCONF3Request req = new PATHCONF3Request(handle); req.serialize(xdr_req); @@ -728,7 +749,8 @@ public class TestRpcProgramNfs3 { public void testCommit() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); - FileHandle handle = new FileHandle(dirId); + int namenodeId = Nfs3Utils.getNamenodeId(config); + FileHandle handle = new FileHandle(dirId, namenodeId); XDR xdr_req = new XDR(); COMMIT3Request req = new COMMIT3Request(handle, 0, 5); req.serialize(xdr_req); http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6602b5f/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestViewfsWithNfs3.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestViewfsWithNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestViewfsWithNfs3.java new file mode 100644 index 0000000..a5997b4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestViewfsWithNfs3.java @@ -0,0 +1,330 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.nfs.nfs3; + +import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.viewfs.ConfigUtil; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; +import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; +import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.nfs.nfs3.FileHandle; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant; +import org.apache.hadoop.nfs.nfs3.Nfs3Status; +import org.apache.hadoop.nfs.nfs3.request.GETATTR3Request; +import org.apache.hadoop.nfs.nfs3.request.RENAME3Request; +import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response; +import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; +import org.apache.hadoop.nfs.nfs3.response.RENAME3Response; +import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; +import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.SecurityHandler; +import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.junit.BeforeClass; +import org.junit.AfterClass; +import org.junit.Test; +import org.junit.Assert; +import org.mockito.Mockito; + +import java.io.File; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; + +import static org.junit.Assert.assertEquals; + + +/** + * Tests for {@link RpcProgramNfs3} with + * {@link org.apache.hadoop.fs.viewfs.ViewFileSystem}. + */ +public class TestViewfsWithNfs3 { + private static DistributedFileSystem hdfs1; + private static DistributedFileSystem hdfs2; + private static MiniDFSCluster cluster = null; + private static NfsConfiguration config = new NfsConfiguration(); + private static HdfsAdmin dfsAdmin1; + private static HdfsAdmin dfsAdmin2; + private static FileSystem viewFs; + + private static NameNode nn1; + private static NameNode nn2; + private static Nfs3 nfs; + private static RpcProgramNfs3 nfsd; + private static RpcProgramMountd mountd; + private static SecurityHandler securityHandler; + private static FileSystemTestHelper fsHelper; + private static File testRootDir; + + @BeforeClass + public static void setup() throws Exception { + String currentUser = System.getProperty("user.name"); + + config.set("fs.permissions.umask-mode", "u=rwx,g=,o="); + config.set(DefaultImpersonationProvider.getTestProvider() + .getProxySuperuserGroupConfKey(currentUser), "*"); + config.set(DefaultImpersonationProvider.getTestProvider() + .getProxySuperuserIpConfKey(currentUser), "*"); + fsHelper = new FileSystemTestHelper(); + // Set up java key store + String testRoot = fsHelper.getTestRootDir(); + testRootDir = new File(testRoot).getAbsoluteFile(); + final Path jksPath = new Path(testRootDir.toString(), "test.jks"); + config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, + JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); + ProxyUsers.refreshSuperUserGroupsConfiguration(config); + + cluster = + new MiniDFSCluster.Builder(config).nnTopology( + MiniDFSNNTopology.simpleFederatedTopology(2)) + .numDataNodes(2) + .build(); + cluster.waitActive(); + hdfs1 = cluster.getFileSystem(0); + hdfs2 = cluster.getFileSystem(1); + + nn1 = cluster.getNameNode(0); + nn2 = cluster.getNameNode(1); + nn2.getServiceRpcAddress(); + dfsAdmin1 = new HdfsAdmin(cluster.getURI(0), config); + dfsAdmin2 = new HdfsAdmin(cluster.getURI(1), config); + + // Use ephemeral ports in case tests are running in parallel + config.setInt("nfs3.mountd.port", 0); + config.setInt("nfs3.server.port", 0); + config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, + FsConstants.VIEWFS_URI.toString()); + // Start NFS with allowed.hosts set to "* rw" + config.set("dfs.nfs.exports.allowed.hosts", "* rw"); + + Path base1 = new Path("/user1"); + Path base2 = new Path("/user2"); + hdfs1.delete(base1, true); + hdfs2.delete(base2, true); + hdfs1.mkdirs(base1); + hdfs2.mkdirs(base2); + ConfigUtil.addLink(config, "/hdfs1", hdfs1.makeQualified(base1).toUri()); + ConfigUtil.addLink(config, "/hdfs2", hdfs2.makeQualified(base2).toUri()); + + + viewFs = FileSystem.get(config); + config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, + "/hdfs1", "/hdfs2"); + + nfs = new Nfs3(config); + nfs.startServiceInternal(false); + nfsd = (RpcProgramNfs3) nfs.getRpcProgram(); + mountd = (RpcProgramMountd) nfs.getMountd().getRpcProgram(); + + // Mock SecurityHandler which returns system user.name + securityHandler = Mockito.mock(SecurityHandler.class); + Mockito.when(securityHandler.getUser()).thenReturn(currentUser); + viewFs.delete(new Path("/hdfs2/dir2"), true); + viewFs.mkdirs(new Path("/hdfs2/dir2")); + DFSTestUtil.createFile(viewFs, new Path("/hdfs1/file1"), 0, (short) 1, 0); + DFSTestUtil.createFile(viewFs, new Path("/hdfs1/file2"), 0, (short) 1, 0); + DFSTestUtil.createFile(viewFs, new Path("/hdfs1/write1"), 0, (short) 1, 0); + DFSTestUtil.createFile(viewFs, new Path("/hdfs2/write2"), 0, (short) 1, 0); + DFSTestUtil.createFile(viewFs, new Path("/hdfs1/renameMultiNN"), + 0, (short) 1, 0); + DFSTestUtil.createFile(viewFs, new Path("/hdfs1/renameSingleNN"), + 0, (short) 1, 0); + } + + @AfterClass + public static void shutdown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testNumExports() throws Exception { + Assert.assertEquals(mountd.getExports().size(), + viewFs.getChildFileSystems().length); + } + + @Test + public void testPaths() throws Exception { + Assert.assertEquals(hdfs1.resolvePath(new Path("/user1/file1")), + viewFs.resolvePath(new Path("/hdfs1/file1"))); + Assert.assertEquals(hdfs1.resolvePath(new Path("/user1/file2")), + viewFs.resolvePath(new Path("/hdfs1/file2"))); + Assert.assertEquals(hdfs2.resolvePath(new Path("/user2/dir2")), + viewFs.resolvePath(new Path("/hdfs2/dir2"))); + } + + @Test + public void testFileStatus() throws Exception { + HdfsFileStatus status = nn1.getRpcServer().getFileInfo("/user1/file1"); + FileStatus st = viewFs.getFileStatus(new Path("/hdfs1/file1")); + Assert.assertEquals(st.isDirectory(), status.isDirectory()); + + HdfsFileStatus status2 = nn2.getRpcServer().getFileInfo("/user2/dir2"); + FileStatus st2 = viewFs.getFileStatus(new Path("/hdfs2/dir2")); + Assert.assertEquals(st2.isDirectory(), status2.isDirectory()); + } + + // Test for getattr + private void testNfsGetAttrResponse(long fileId, int namenodeId, + int expectedStatus) { + FileHandle handle = new FileHandle(fileId, namenodeId); + XDR xdrReq = new XDR(); + GETATTR3Request req = new GETATTR3Request(handle); + req.serialize(xdrReq); + GETATTR3Response response = nfsd.getattr(xdrReq.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + Assert.assertEquals("Incorrect return code", + expectedStatus, response.getStatus()); + } + + @Test (timeout = 60000) + public void testNfsAccessNN1() throws Exception { + HdfsFileStatus status = nn1.getRpcServer().getFileInfo("/user1/file1"); + int namenodeId = Nfs3Utils.getNamenodeId(config, hdfs1.getUri()); + testNfsGetAttrResponse(status.getFileId(), namenodeId, Nfs3Status.NFS3_OK); + } + + @Test (timeout = 60000) + public void testNfsAccessNN2() throws Exception { + HdfsFileStatus status = nn2.getRpcServer().getFileInfo("/user2/dir2"); + int namenodeId = Nfs3Utils.getNamenodeId(config, hdfs2.getUri()); + testNfsGetAttrResponse(status.getFileId(), namenodeId, Nfs3Status.NFS3_OK); + } + + @Test (timeout = 60000) + public void testWrongNfsAccess() throws Exception { + DFSTestUtil.createFile(viewFs, new Path("/hdfs1/file3"), 0, (short) 1, 0); + HdfsFileStatus status = nn1.getRpcServer().getFileInfo("/user1/file3"); + int namenodeId = Nfs3Utils.getNamenodeId(config, hdfs2.getUri()); + testNfsGetAttrResponse(status.getFileId(), namenodeId, + Nfs3Status.NFS3ERR_IO); + } + + // Test for write + private void testNfsWriteResponse(long dirId, int namenodeId) + throws Exception { + FileHandle handle = new FileHandle(dirId, namenodeId); + + byte[] buffer = new byte[10]; + for (int i = 0; i < 10; i++) { + buffer[i] = (byte) i; + } + + WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, + Nfs3Constant.WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer)); + XDR xdrReq = new XDR(); + writeReq.serialize(xdrReq); + + // Attempt by a priviledged user should pass. + WRITE3Response response = nfsd.write(xdrReq.asReadOnlyWrap(), + null, 1, securityHandler, + new InetSocketAddress("localhost", 1234)); + Assert.assertEquals("Incorrect response:", null, response); + } + + @Test (timeout = 60000) + public void testNfsWriteNN1() throws Exception { + HdfsFileStatus status = nn1.getRpcServer().getFileInfo("/user1/write1"); + int namenodeId = Nfs3Utils.getNamenodeId(config, hdfs1.getUri()); + testNfsWriteResponse(status.getFileId(), namenodeId); + } + + @Test (timeout = 60000) + public void testNfsWriteNN2() throws Exception { + HdfsFileStatus status = nn2.getRpcServer().getFileInfo("/user2/write2"); + int namenodeId = Nfs3Utils.getNamenodeId(config, hdfs2.getUri()); + testNfsWriteResponse(status.getFileId(), namenodeId); + } + + // Test for rename + private void testNfsRename(FileHandle fromDirHandle, String fromFileName, + FileHandle toDirHandle, String toFileName, + int expectedStatus) throws Exception { + XDR xdrReq = new XDR(); + RENAME3Request req = new RENAME3Request(fromDirHandle, fromFileName, + toDirHandle, toFileName); + req.serialize(xdrReq); + + // Attempt by a privileged user should pass. + RENAME3Response response = nfsd.rename(xdrReq.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals(expectedStatus, response.getStatus()); + } + + @Test (timeout = 60000) + public void testNfsRenameMultiNN() throws Exception { + HdfsFileStatus fromFileStatus = nn1.getRpcServer().getFileInfo("/user1"); + int fromNNId = Nfs3Utils.getNamenodeId(config, hdfs1.getUri()); + FileHandle fromHandle = + new FileHandle(fromFileStatus.getFileId(), fromNNId); + + HdfsFileStatus toFileStatus = nn2.getRpcServer().getFileInfo("/user2"); + int toNNId = Nfs3Utils.getNamenodeId(config, hdfs2.getUri()); + FileHandle toHandle = new FileHandle(toFileStatus.getFileId(), toNNId); + + HdfsFileStatus statusBeforeRename = + nn1.getRpcServer().getFileInfo("/user1/renameMultiNN"); + Assert.assertEquals(statusBeforeRename.isDirectory(), false); + + testNfsRename(fromHandle, "renameMultiNN", + toHandle, "renameMultiNNFail", Nfs3Status.NFS3ERR_INVAL); + + HdfsFileStatus statusAfterRename = + nn2.getRpcServer().getFileInfo("/user2/renameMultiNNFail"); + Assert.assertEquals(statusAfterRename, null); + + statusAfterRename = nn1.getRpcServer().getFileInfo("/user1/renameMultiNN"); + Assert.assertEquals(statusAfterRename.isDirectory(), false); + } + + @Test (timeout = 60000) + public void testNfsRenameSingleNN() throws Exception { + HdfsFileStatus fromFileStatus = nn1.getRpcServer().getFileInfo("/user1"); + int fromNNId = Nfs3Utils.getNamenodeId(config, hdfs1.getUri()); + FileHandle fromHandle = + new FileHandle(fromFileStatus.getFileId(), fromNNId); + + HdfsFileStatus statusBeforeRename = + nn1.getRpcServer().getFileInfo("/user1/renameSingleNN"); + Assert.assertEquals(statusBeforeRename.isDirectory(), false); + + testNfsRename(fromHandle, "renameSingleNN", + fromHandle, "renameSingleNNSucess", Nfs3Status.NFS3_OK); + + HdfsFileStatus statusAfterRename = + nn1.getRpcServer().getFileInfo("/user1/renameSingleNNSucess"); + Assert.assertEquals(statusAfterRename.isDirectory(), false); + + statusAfterRename = + nn1.getRpcServer().getFileInfo("/user1/renameSingleNN"); + Assert.assertEquals(statusAfterRename, null); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6602b5f/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java index 9c327c4..f7a92fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java @@ -481,6 +481,7 @@ public class TestWrites { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); client = new DFSClient(DFSUtilClient.getNNAddress(config), config); + int namenodeId = Nfs3Utils.getNamenodeId(config); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); @@ -492,7 +493,7 @@ public class TestWrites { nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); HdfsFileStatus status = client.getFileInfo("/"); - FileHandle rootHandle = new FileHandle(status.getFileId()); + FileHandle rootHandle = new FileHandle(status.getFileId(), namenodeId); // Create file1 CREATE3Request createReq = new CREATE3Request(rootHandle, "file1", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0); @@ -598,8 +599,9 @@ public class TestWrites { DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config); + int namenodeId = Nfs3Utils.getNamenodeId(config); HdfsFileStatus status = dfsClient.getFileInfo("/"); - FileHandle rootHandle = new FileHandle(status.getFileId()); + FileHandle rootHandle = new FileHandle(status.getFileId(), namenodeId); CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + System.currentTimeMillis(), @@ -674,8 +676,9 @@ public class TestWrites { DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config); + int namenodeId = Nfs3Utils.getNamenodeId(config); HdfsFileStatus status = dfsClient.getFileInfo("/"); - FileHandle rootHandle = new FileHandle(status.getFileId()); + FileHandle rootHandle = new FileHandle(status.getFileId(), namenodeId); CREATE3Request createReq = new CREATE3Request(rootHandle, "overlapping-writes" + System.currentTimeMillis(), --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org