Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id A2F8311C64 for ; Wed, 18 Jun 2014 23:15:35 +0000 (UTC) Received: (qmail 18351 invoked by uid 500); 18 Jun 2014 23:15:35 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 18302 invoked by uid 500); 18 Jun 2014 23:15:35 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 18291 invoked by uid 99); 18 Jun 2014 23:15:35 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 18 Jun 2014 23:15:35 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 18 Jun 2014 23:15:31 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 583AB23889F7; Wed, 18 Jun 2014 23:15:11 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1603664 - in /hadoop/common/branches/fs-encryption/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/src/main/java/org/apache... Date: Wed, 18 Jun 2014 23:15:10 -0000 To: hdfs-commits@hadoop.apache.org From: wang@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140618231511.583AB23889F7@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: wang Date: Wed Jun 18 23:15:04 2014 New Revision: 1603664 URL: http://svn.apache.org/r1603664 Log: Merge trunk r1603663 to branch. Added: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java - copied unchanged from r1603663, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java - copied unchanged from r1603663, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java Removed: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfiguration.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1602934-1603663 Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java Wed Jun 18 23:15:04 2014 @@ -51,7 +51,8 @@ public class NfsConfigKeys { public static final String DFS_NFS_KEYTAB_FILE_KEY = "nfs.keytab.file"; public static final String DFS_NFS_KERBEROS_PRINCIPAL_KEY = "nfs.kerberos.principal"; public static final String DFS_NFS_REGISTRATION_PORT_KEY = "nfs.registration.port"; - public static final int DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned. - public static final String DFS_NFS_ALLOW_INSECURE_PORTS_KEY = "nfs.allow.insecure.ports"; - public static final boolean DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT = true; + public static final int DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned. + public static final String DFS_NFS_PORT_MONITORING_DISABLED_KEY = "nfs.port.monitoring.disabled"; + public static final boolean DFS_NFS_PORT_MONITORING_DISABLED_DEFAULT = true; + } \ No newline at end of file Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfiguration.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfiguration.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfiguration.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfiguration.java Wed Jun 18 23:15:04 2014 @@ -49,6 +49,8 @@ public class NfsConfiguration extends Hd new DeprecationDelta("dfs.nfs3.stream.timeout", NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY), new DeprecationDelta("dfs.nfs3.export.point", - NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY) }); + NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY), + new DeprecationDelta("nfs.allow.insecure.ports", + NfsConfigKeys.DFS_NFS_PORT_MONITORING_DISABLED_KEY) }); } } \ No newline at end of file Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java Wed Jun 18 23:15:04 2014 @@ -194,7 +194,13 @@ public class RpcProgramMountd extends Rp if (mntproc == MNTPROC.NULL) { out = nullOp(out, xid, client); } else if (mntproc == MNTPROC.MNT) { - out = mnt(xdr, out, xid, client); + // Only do port monitoring for MNT + if (!doPortMonitoring(info.remoteAddress())) { + out = MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, + xid, null); + } else { + out = mnt(xdr, out, xid, client); + } } else if (mntproc == MNTPROC.DUMP) { out = dump(out, xid, client); } else if (mntproc == MNTPROC.UMNT) { Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java Wed Jun 18 23:15:04 2014 @@ -61,8 +61,8 @@ public class Nfs3 extends Nfs3Base { StringUtils.startupShutdownMessage(Nfs3.class, args, LOG); NfsConfiguration conf = new NfsConfiguration(); boolean allowInsecurePorts = conf.getBoolean( - NfsConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_KEY, - NfsConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT); + NfsConfigKeys.DFS_NFS_PORT_MONITORING_DISABLED_KEY, + NfsConfigKeys.DFS_NFS_PORT_MONITORING_DISABLED_DEFAULT); final Nfs3 nfsServer = new Nfs3(conf, registrationSocket, allowInsecurePorts); nfsServer.startServiceInternal(true); Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Wed Jun 18 23:15:04 2014 @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.DatagramSocket; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.SocketAddress; import java.nio.ByteBuffer; import java.util.EnumSet; @@ -230,15 +231,15 @@ public class RpcProgramNfs3 extends RpcP } @Override - public GETATTR3Response getattr(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public GETATTR3Response getattr(XDR xdr, RpcInfo info) { GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -322,9 +323,9 @@ public class RpcProgramNfs3 extends RpcP } @Override - public SETATTR3Response setattr(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public SETATTR3Response setattr(XDR xdr, RpcInfo info) { SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK); + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -370,7 +371,7 @@ public class RpcProgramNfs3 extends RpcP } // check the write access privilege - if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { return new SETATTR3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( preOpWcc, preOpAttr)); } @@ -398,15 +399,15 @@ public class RpcProgramNfs3 extends RpcP } @Override - public LOOKUP3Response lookup(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public LOOKUP3Response lookup(XDR xdr, RpcInfo info) { LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -460,15 +461,15 @@ public class RpcProgramNfs3 extends RpcP } @Override - public ACCESS3Response access(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public ACCESS3Response access(XDR xdr, RpcInfo info) { ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -519,15 +520,16 @@ public class RpcProgramNfs3 extends RpcP } } - public READLINK3Response readlink(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + @Override + public READLINK3Response readlink(XDR xdr, RpcInfo info) { READLINK3Response response = new READLINK3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -591,12 +593,19 @@ public class RpcProgramNfs3 extends RpcP } @Override - public READ3Response read(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public READ3Response read(XDR xdr, RpcInfo info) { + SecurityHandler securityHandler = getSecurityHandler(info); + SocketAddress remoteAddress = info.remoteAddress(); + return read(xdr, securityHandler, remoteAddress); + } + + @VisibleForTesting + READ3Response read(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { READ3Response response = new READ3Response(Nfs3Status.NFS3_OK); final String userName = securityHandler.getUser(); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } @@ -715,8 +724,17 @@ public class RpcProgramNfs3 extends RpcP } @Override - public WRITE3Response write(XDR xdr, Channel channel, int xid, - SecurityHandler securityHandler, InetAddress client) { + public WRITE3Response write(XDR xdr, RpcInfo info) { + SecurityHandler securityHandler = getSecurityHandler(info); + RpcCall rpcCall = (RpcCall) info.header(); + int xid = rpcCall.getXid(); + SocketAddress remoteAddress = info.remoteAddress(); + return write(xdr, info.channel(), xid, securityHandler, remoteAddress); + } + + @VisibleForTesting + WRITE3Response write(XDR xdr, Channel channel, int xid, + SecurityHandler securityHandler, SocketAddress remoteAddress) { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); @@ -758,7 +776,7 @@ public class RpcProgramNfs3 extends RpcP return new WRITE3Response(Nfs3Status.NFS3ERR_STALE); } - if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new WRITE3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); @@ -791,8 +809,15 @@ public class RpcProgramNfs3 extends RpcP } @Override - public CREATE3Response create(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public CREATE3Response create(XDR xdr, RpcInfo info) { + SecurityHandler securityHandler = getSecurityHandler(info); + SocketAddress remoteAddress = info.remoteAddress(); + return create(xdr, securityHandler, remoteAddress); + } + + @VisibleForTesting + CREATE3Response create(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { @@ -838,7 +863,7 @@ public class RpcProgramNfs3 extends RpcP return new CREATE3Response(Nfs3Status.NFS3ERR_STALE); } - if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr)); @@ -922,9 +947,9 @@ public class RpcProgramNfs3 extends RpcP } @Override - public MKDIR3Response mkdir(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public MKDIR3Response mkdir(XDR xdr, RpcInfo info) { MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK); + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -960,7 +985,7 @@ public class RpcProgramNfs3 extends RpcP return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE); } - if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { return new MKDIR3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr)); } @@ -1012,15 +1037,15 @@ public class RpcProgramNfs3 extends RpcP } } - public READDIR3Response mknod(XDR xdr, - SecurityHandler securityHandler, InetAddress client) { + @Override + public READDIR3Response mknod(XDR xdr, RpcInfo info) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } @Override - public REMOVE3Response remove(XDR xdr, - SecurityHandler securityHandler, InetAddress client) { + public REMOVE3Response remove(XDR xdr, RpcInfo info) { REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK); + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -1093,9 +1118,9 @@ public class RpcProgramNfs3 extends RpcP } @Override - public RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public RMDIR3Response rmdir(XDR xdr, RpcInfo info) { RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK); + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -1129,7 +1154,7 @@ public class RpcProgramNfs3 extends RpcP WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc); } @@ -1175,9 +1200,9 @@ public class RpcProgramNfs3 extends RpcP } @Override - public RENAME3Response rename(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public RENAME3Response rename(XDR xdr, RpcInfo info) { RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK); + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -1221,7 +1246,7 @@ public class RpcProgramNfs3 extends RpcP return new RENAME3Response(Nfs3Status.NFS3ERR_STALE); } - if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { WccData fromWcc = new WccData(Nfs3Utils.getWccAttr(fromPreOpAttr), fromPreOpAttr); WccData toWcc = new WccData(Nfs3Utils.getWccAttr(toPreOpAttr), @@ -1263,15 +1288,15 @@ public class RpcProgramNfs3 extends RpcP } @Override - public SYMLINK3Response symlink(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public SYMLINK3Response symlink(XDR xdr, RpcInfo info) { SYMLINK3Response response = new SYMLINK3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -1322,8 +1347,8 @@ public class RpcProgramNfs3 extends RpcP } } - public READDIR3Response link(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + @Override + public READDIR3Response link(XDR xdr, RpcInfo info) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } @@ -1351,11 +1376,16 @@ public class RpcProgramNfs3 extends RpcP } @Override + public READDIR3Response readdir(XDR xdr, RpcInfo info) { + SecurityHandler securityHandler = getSecurityHandler(info); + SocketAddress remoteAddress = info.remoteAddress(); + return readdir(xdr, securityHandler, remoteAddress); + } public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + SocketAddress remoteAddress) { READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } @@ -1491,9 +1521,17 @@ public class RpcProgramNfs3 extends RpcP dirStatus.getModificationTime(), dirList); } - public READDIRPLUS3Response readdirplus(XDR xdr, - SecurityHandler securityHandler, InetAddress client) { - if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + @Override + public READDIRPLUS3Response readdirplus(XDR xdr, RpcInfo info) { + SecurityHandler securityHandler = getSecurityHandler(info); + SocketAddress remoteAddress = info.remoteAddress(); + return readdirplus(xdr, securityHandler, remoteAddress); + } + + @VisibleForTesting + READDIRPLUS3Response readdirplus(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES); } @@ -1643,15 +1681,15 @@ public class RpcProgramNfs3 extends RpcP } @Override - public FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public FSSTAT3Response fsstat(XDR xdr, RpcInfo info) { FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -1711,15 +1749,15 @@ public class RpcProgramNfs3 extends RpcP } @Override - public FSINFO3Response fsinfo(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public FSINFO3Response fsinfo(XDR xdr, RpcInfo info) { FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -1769,15 +1807,15 @@ public class RpcProgramNfs3 extends RpcP } @Override - public PATHCONF3Response pathconf(XDR xdr, SecurityHandler securityHandler, - InetAddress client) { + public PATHCONF3Response pathconf(XDR xdr, RpcInfo info) { PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -1816,9 +1854,11 @@ public class RpcProgramNfs3 extends RpcP } @Override - public COMMIT3Response commit(XDR xdr, Channel channel, int xid, - SecurityHandler securityHandler, InetAddress client) { + public COMMIT3Response commit(XDR xdr, RpcInfo info) { + //Channel channel, int xid, + // SecurityHandler securityHandler, InetAddress client) { COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK); + SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -1849,7 +1889,7 @@ public class RpcProgramNfs3 extends RpcP return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE); } - if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { return new COMMIT3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), Nfs3Constant.WRITE_COMMIT_VERF); @@ -1859,8 +1899,10 @@ public class RpcProgramNfs3 extends RpcP : (request.getOffset() + request.getCount()); // Insert commit as an async request - writeManager.handleCommit(dfsClient, handle, commitOffset, channel, xid, - preOpAttr); + RpcCall rpcCall = (RpcCall) info.header(); + int xid = rpcCall.getXid(); + writeManager.handleCommit(dfsClient, handle, commitOffset, + info.channel(), xid, preOpAttr); return null; } catch (IOException e) { LOG.warn("Exception ", e); @@ -1885,11 +1927,16 @@ public class RpcProgramNfs3 extends RpcP return null; } } + + private SecurityHandler getSecurityHandler(RpcInfo info) { + RpcCall rpcCall = (RpcCall) info.header(); + return getSecurityHandler(rpcCall.getCredential(), rpcCall.getVerifier()); + } @Override public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { RpcCall rpcCall = (RpcCall) info.header(); - final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure()); + final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure()); int xid = rpcCall.getXid(); byte[] data = new byte[info.data().readableBytes()]; info.data().readBytes(data); @@ -1897,9 +1944,8 @@ public class RpcProgramNfs3 extends RpcP XDR out = new XDR(); InetAddress client = ((InetSocketAddress) info.remoteAddress()) .getAddress(); - Channel channel = info.channel(); - Credentials credentials = rpcCall.getCredential(); + // Ignore auth only for NFSPROC3_NULL, especially for Linux clients. if (nfsproc3 != NFSPROC3.NULL) { if (credentials.getFlavor() != AuthFlavor.AUTH_SYS @@ -1937,27 +1983,24 @@ public class RpcProgramNfs3 extends RpcP } } - SecurityHandler securityHandler = getSecurityHandler(credentials, - rpcCall.getVerifier()); - NFS3Response response = null; if (nfsproc3 == NFSPROC3.NULL) { response = nullProcedure(); } else if (nfsproc3 == NFSPROC3.GETATTR) { - response = getattr(xdr, securityHandler, client); + response = getattr(xdr, info); } else if (nfsproc3 == NFSPROC3.SETATTR) { - response = setattr(xdr, securityHandler, client); + response = setattr(xdr, info); } else if (nfsproc3 == NFSPROC3.LOOKUP) { - response = lookup(xdr, securityHandler, client); + response = lookup(xdr, info); } else if (nfsproc3 == NFSPROC3.ACCESS) { - response = access(xdr, securityHandler, client); + response = access(xdr, info); } else if (nfsproc3 == NFSPROC3.READLINK) { - response = readlink(xdr, securityHandler, client); + response = readlink(xdr, info); } else if (nfsproc3 == NFSPROC3.READ) { if (LOG.isDebugEnabled()) { LOG.debug(Nfs3Utils.READ_RPC_START + xid); } - response = read(xdr, securityHandler, client); + response = read(xdr, info); if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) { LOG.debug(Nfs3Utils.READ_RPC_END + xid); } @@ -1965,36 +2008,36 @@ public class RpcProgramNfs3 extends RpcP if (LOG.isDebugEnabled()) { LOG.debug(Nfs3Utils.WRITE_RPC_START + xid); } - response = write(xdr, channel, xid, securityHandler, client); + response = write(xdr, info); // Write end debug trace is in Nfs3Utils.writeChannel } else if (nfsproc3 == NFSPROC3.CREATE) { - response = create(xdr, securityHandler, client); + response = create(xdr, info); } else if (nfsproc3 == NFSPROC3.MKDIR) { - response = mkdir(xdr, securityHandler, client); + response = mkdir(xdr, info); } else if (nfsproc3 == NFSPROC3.SYMLINK) { - response = symlink(xdr, securityHandler, client); + response = symlink(xdr, info); } else if (nfsproc3 == NFSPROC3.MKNOD) { - response = mknod(xdr, securityHandler, client); + response = mknod(xdr, info); } else if (nfsproc3 == NFSPROC3.REMOVE) { - response = remove(xdr, securityHandler, client); + response = remove(xdr, info); } else if (nfsproc3 == NFSPROC3.RMDIR) { - response = rmdir(xdr, securityHandler, client); + response = rmdir(xdr, info); } else if (nfsproc3 == NFSPROC3.RENAME) { - response = rename(xdr, securityHandler, client); + response = rename(xdr, info); } else if (nfsproc3 == NFSPROC3.LINK) { - response = link(xdr, securityHandler, client); + response = link(xdr, info); } else if (nfsproc3 == NFSPROC3.READDIR) { - response = readdir(xdr, securityHandler, client); + response = readdir(xdr, info); } else if (nfsproc3 == NFSPROC3.READDIRPLUS) { - response = readdirplus(xdr, securityHandler, client); + response = readdirplus(xdr, info); } else if (nfsproc3 == NFSPROC3.FSSTAT) { - response = fsstat(xdr, securityHandler, client); + response = fsstat(xdr, info); } else if (nfsproc3 == NFSPROC3.FSINFO) { - response = fsinfo(xdr, securityHandler, client); + response = fsinfo(xdr, info); } else if (nfsproc3 == NFSPROC3.PATHCONF) { - response = pathconf(xdr, securityHandler, client); + response = pathconf(xdr,info); } else if (nfsproc3 == NFSPROC3.COMMIT) { - response = commit(xdr, channel, xid, securityHandler, client); + response = commit(xdr, info); } else { // Invalid procedure RpcAcceptedReply.getInstance(xid, @@ -2027,8 +2070,21 @@ public class RpcProgramNfs3 extends RpcP return nfsproc3 == null || nfsproc3.isIdempotent(); } - private boolean checkAccessPrivilege(final InetAddress client, + private boolean checkAccessPrivilege(RpcInfo info, + final AccessPrivilege expected) { + SocketAddress remoteAddress = info.remoteAddress(); + return checkAccessPrivilege(remoteAddress, expected); + } + + private boolean checkAccessPrivilege(SocketAddress remoteAddress, final AccessPrivilege expected) { + // Port monitoring + if (!doPortMonitoring(remoteAddress)) { + return false; + } + + // Check export table + InetAddress client = ((InetSocketAddress) remoteAddress).getAddress(); AccessPrivilege access = exports.getAccessPrivilege(client); if (access == AccessPrivilege.NONE) { return false; Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java Wed Jun 18 23:15:04 2014 @@ -22,7 +22,7 @@ import static org.junit.Assert.assertTru import static org.junit.Assert.fail; import java.io.IOException; -import java.net.InetAddress; +import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.concurrent.ConcurrentNavigableMap; @@ -318,7 +318,7 @@ public class TestWrites { XDR createXdr = new XDR(); createReq.serialize(createXdr); CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), - securityHandler, InetAddress.getLocalHost()); + securityHandler, new InetSocketAddress("localhost", 1234)); FileHandle handle = createRsp.getObjHandle(); // Test DATA_SYNC @@ -331,7 +331,7 @@ public class TestWrites { XDR writeXdr = new XDR(); writeReq.serialize(writeXdr); nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, - InetAddress.getLocalHost()); + new InetSocketAddress("localhost", 1234)); waitWrite(nfsd, handle, 60000); @@ -340,7 +340,7 @@ public class TestWrites { XDR readXdr = new XDR(); readReq.serialize(readXdr); READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), - securityHandler, InetAddress.getLocalHost()); + securityHandler, new InetSocketAddress("localhost", 1234)); assertTrue(Arrays.equals(buffer, readRsp.getData().array())); @@ -352,7 +352,7 @@ public class TestWrites { XDR createXdr2 = new XDR(); createReq2.serialize(createXdr2); CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(), - securityHandler, InetAddress.getLocalHost()); + securityHandler, new InetSocketAddress("localhost", 1234)); FileHandle handle2 = createRsp2.getObjHandle(); WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10, @@ -360,7 +360,7 @@ public class TestWrites { XDR writeXdr2 = new XDR(); writeReq2.serialize(writeXdr2); nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler, - InetAddress.getLocalHost()); + new InetSocketAddress("localhost", 1234)); waitWrite(nfsd, handle2, 60000); @@ -369,7 +369,7 @@ public class TestWrites { XDR readXdr2 = new XDR(); readReq2.serialize(readXdr2); READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(), - securityHandler, InetAddress.getLocalHost()); + securityHandler, new InetSocketAddress("localhost", 1234)); assertTrue(Arrays.equals(buffer, readRsp2.getData().array())); // FILE_SYNC should sync the file size Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Jun 18 23:15:04 2014 @@ -443,6 +443,17 @@ Release 2.5.0 - UNRELEASED HDFS-6499. Use NativeIO#renameTo instead of File#renameTo in FileJournalManager. (Yongjun Zhang via atm) + HDFS-6518. TestCacheDirectives#testExceedsCapacity should + take FSN read lock when accessing pendingCached list. + (wang) + + HDFS-6528. Add XAttrs to TestOfflineImageViewer. (Stephen Chu via wang) + + HDFS-6545. Finalizing rolling upgrade can make NN unavailable for a long + duration. (kihwal) + + HDFS-6530. Fix Balancer documentation. (szetszwo) + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) @@ -636,6 +647,24 @@ Release 2.5.0 - UNRELEASED HDFS-6375. Listing extended attributes with the search permission. (Charles Lamb via wang) + HDFS-6539. test_native_mini_dfs is skipped in hadoop-hdfs/pom.xml + (decstery via cmccabe) + + HDFS-6527. Edit log corruption due to defered INode removal. (kihwal and + jing9 via jing9) + + HDFS-6552. add DN storage to a BlockInfo will not replace the different + storage from same DN. (Amir Langer via Arpit Agarwal) + + HDFS-6551. Rename with OVERWRITE option may throw NPE when the target + file/directory is a reference INode. (jing9) + + HDFS-6439. NFS should not reject NFS requests to the NULL procedure whether + port monitoring is enabled or not. (brandonli) + + HDFS-6559. Fix wrong option "dfsadmin -rollingUpgrade start" in the + document. (Akira Ajisaka via Arpit Agarwal) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml Wed Jun 18 23:15:04 2014 @@ -409,7 +409,7 @@ http://maven.apache.org/xsd/maven-4.0.0. - + Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1602934-1603663 Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java Wed Jun 18 23:15:04 2014 @@ -203,7 +203,7 @@ public class BlockInfo extends Block imp } else { // The block is on the DN but belongs to a different storage. // Update our state. - removeStorage(storage); + removeStorage(getStorageInfo(idx)); added = false; // Just updating storage. Return false. } } Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Jun 18 23:15:04 2014 @@ -45,7 +45,6 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; -import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -899,9 +898,10 @@ public class FSDirectory implements Clos boolean undoRemoveDst = false; INode removedDst = null; + long removedNum = 0; try { if (dstInode != null) { // dst exists remove it - if (removeLastINode(dstIIP) != -1) { + if ((removedNum = removeLastINode(dstIIP)) != -1) { removedDst = dstIIP.getLastINode(); undoRemoveDst = true; } @@ -941,13 +941,15 @@ public class FSDirectory implements Clos long filesDeleted = -1; if (removedDst != null) { undoRemoveDst = false; - BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); - List removedINodes = new ChunkedArrayList(); - filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID, - dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes, true) - .get(Quota.NAMESPACE); - getFSNamesystem().removePathAndBlocks(src, collectedBlocks, - removedINodes); + if (removedNum > 0) { + BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); + List removedINodes = new ChunkedArrayList(); + filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID, + dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes, + true).get(Quota.NAMESPACE); + getFSNamesystem().removePathAndBlocks(src, collectedBlocks, + removedINodes); + } } if (snapshottableDirs.size() > 0) { Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Jun 18 23:15:04 2014 @@ -3073,6 +3073,13 @@ public class FSNamesystem implements Nam + (lease != null ? lease.toString() : "Holder " + holder + " does not have any open files.")); } + // No further modification is allowed on a deleted file. + // A file is considered deleted, if it has no parent or is marked + // as deleted in the snapshot feature. + if (file.getParent() == null || (file.isWithSnapshot() && + file.getFileWithSnapshotFeature().isCurrentFileDeleted())) { + throw new FileNotFoundException(src); + } String clientName = file.getFileUnderConstructionFeature().getClientName(); if (holder != null && !clientName.equals(holder)) { throw new LeaseExpiredException("Lease mismatch on " + ident + @@ -3507,6 +3514,7 @@ public class FSNamesystem implements Nam getEditLog().logSync(); removeBlocks(collectedBlocks); // Incremental deletion of blocks collectedBlocks.clear(); + dir.writeLock(); try { dir.removeFromInodeMap(removedINodes); @@ -7694,14 +7702,20 @@ public class FSNamesystem implements Nam returnInfo = finalizeRollingUpgradeInternal(now()); getEditLog().logFinalizeRollingUpgrade(returnInfo.getFinalizeTime()); - getFSImage().saveNamespace(this); + if (haEnabled) { + // roll the edit log to make sure the standby NameNode can tail + getFSImage().rollEditLog(); + } getFSImage().renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE); } finally { writeUnlock(); } - // getEditLog().logSync() is not needed since it does saveNamespace + if (!haEnabled) { + // Sync not needed for ha since the edit was rolled after logging. + getEditLog().logSync(); + } if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(true, "finalizeRollingUpgrade", null, null, null); Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm Wed Jun 18 23:15:04 2014 @@ -287,13 +287,14 @@ HDFS Federation Policy could be: - * <<>> - this is the policy. This balances the storage at + * <<>> - this is the policy. This balances the storage at the datanode level. This is similar to balancing policy from prior releases. * <<>> - this balances the storage at the block pool level. Balancing at block pool level balances storage at the datanode level also. - Note that Balander only balances the data and does not balance the namespace. + Note that Balancer only balances the data and does not balance the namespace. + For the complete command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}. ** Decommissioning Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Wed Jun 18 23:15:04 2014 @@ -322,6 +322,22 @@ HDFS NFS Gateway Then the users can access HDFS as part of the local file system except that, hard link and random write are not supported yet. +* {Allow mounts from unprivileged clients} + + In environments where root access on client machines is not generally + available, some measure of security can be obtained by ensuring that only NFS + clients originating from privileged ports can connect to the NFS server. This + feature is referred to as "port monitoring." This feature is not enabled by default + in the HDFS NFS Gateway, but can be optionally enabled by setting the + following config in hdfs-site.xml on the NFS Gateway machine: + +------------------------------------------------------------------- + + nfs.port.monitoring.disabled + false + +------------------------------------------------------------------- + * {User authentication and mapping} NFS gateway in this release uses AUTH_UNIX style authentication. When the user on NFS client Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm Wed Jun 18 23:15:04 2014 @@ -77,7 +77,7 @@ HDFS Users Guide * <<>>: a utility to fetch DelegationToken and store it in a file on the local system. - * Rebalancer: tool to balance the cluster when the data is + * Balancer: tool to balance the cluster when the data is unevenly distributed among DataNodes. * Upgrade and rollback: after a software upgrade, it is possible @@ -316,7 +316,7 @@ HDFS Users Guide For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}. -* Rebalancer +* Balancer HDFS data might not always be be placed uniformly across the DataNode. One common reason is addition of new DataNodes to an existing cluster. @@ -338,7 +338,7 @@ HDFS Users Guide Due to multiple competing considerations, data might not be uniformly placed across the DataNodes. HDFS provides a tool for administrators that analyzes block placement and rebalanaces data across the DataNode. - A brief administrator's guide for rebalancer as a PDF is attached to + A brief administrator's guide for balancer is available at {{{https://issues.apache.org/jira/browse/HADOOP-1652}HADOOP-1652}}. For command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}. Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml Wed Jun 18 23:15:04 2014 @@ -217,7 +217,7 @@

dfsadmin -rollingUpgrade

- hdfs dfsadmin -rollingUpgrade <query|start|finalize> + hdfs dfsadmin -rollingUpgrade <query|prepare|finalize>

Execute a rolling upgrade action.

  • Options: Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Wed Jun 18 23:15:04 2014 @@ -34,6 +34,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; @@ -54,10 +55,16 @@ import org.apache.hadoop.util.ToolRunner import org.junit.Test; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; +import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.*; +import com.google.common.collect.Lists; + /** * This class tests commands from DFSShell. */ @@ -1621,11 +1628,13 @@ public class TestDFSShell { assertEquals("expected to fail -1", res , -1); } - // Preserve Copy Option is -ptopx (timestamps, ownership, permission, XATTR) + // Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR, + // ACLs) @Test (timeout = 120000) public void testCopyCommandsWithPreserveOption() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); FsShell shell = null; @@ -1638,6 +1647,14 @@ public class TestDFSShell { fs.mkdirs(hdfsTestDir); Path src = new Path(hdfsTestDir, "srcfile"); fs.create(src).close(); + + fs.setAcl(src, Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE), + aclEntry(ACCESS, OTHER, EXECUTE))); + FileStatus status = fs.getFileStatus(src); final long mtime = status.getModificationTime(); final long atime = status.getAccessTime(); @@ -1661,41 +1678,93 @@ public class TestDFSShell { assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); - assertTrue(perm.equals(targetStatus.getPermission())); + FsPermission targetPerm = targetStatus.getPermission(); + assertTrue(perm.equals(targetPerm)); Map xattrs = fs.getXAttrs(target1); assertTrue(xattrs.isEmpty()); + List acls = fs.getAclStatus(target1).getEntries(); + assertTrue(acls.isEmpty()); + assertFalse(targetPerm.getAclBit()); // -ptop Path target2 = new Path(hdfsTestDir, "targetfile2"); argv = new String[] { "-cp", "-ptop", src.toUri().toString(), target2.toUri().toString() }; ret = ToolRunner.run(shell, argv); - assertEquals("cp -p is not working", SUCCESS, ret); - targetStatus = fs.getFileStatus(target1); + assertEquals("cp -ptop is not working", SUCCESS, ret); + targetStatus = fs.getFileStatus(target2); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); - assertTrue(perm.equals(targetStatus.getPermission())); + targetPerm = targetStatus.getPermission(); + assertTrue(perm.equals(targetPerm)); xattrs = fs.getXAttrs(target2); assertTrue(xattrs.isEmpty()); - + acls = fs.getAclStatus(target2).getEntries(); + assertTrue(acls.isEmpty()); + assertFalse(targetPerm.getAclBit()); + // -ptopx Path target3 = new Path(hdfsTestDir, "targetfile3"); argv = new String[] { "-cp", "-ptopx", src.toUri().toString(), target3.toUri().toString() }; ret = ToolRunner.run(shell, argv); - assertEquals("cp -p is not working", SUCCESS, ret); - targetStatus = fs.getFileStatus(target1); + assertEquals("cp -ptopx is not working", SUCCESS, ret); + targetStatus = fs.getFileStatus(target3); assertEquals(mtime, targetStatus.getModificationTime()); assertEquals(atime, targetStatus.getAccessTime()); assertEquals(owner, targetStatus.getOwner()); assertEquals(group, targetStatus.getGroup()); - assertTrue(perm.equals(targetStatus.getPermission())); + targetPerm = targetStatus.getPermission(); + assertTrue(perm.equals(targetPerm)); xattrs = fs.getXAttrs(target3); assertEquals(xattrs.size(), 2); assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1")); assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1")); + acls = fs.getAclStatus(target3).getEntries(); + assertTrue(acls.isEmpty()); + assertFalse(targetPerm.getAclBit()); + + // -ptopa + Path target4 = new Path(hdfsTestDir, "targetfile4"); + argv = new String[] { "-cp", "-ptopa", src.toUri().toString(), + target4.toUri().toString() }; + ret = ToolRunner.run(shell, argv); + assertEquals("cp -ptopa is not working", SUCCESS, ret); + targetStatus = fs.getFileStatus(target4); + assertEquals(mtime, targetStatus.getModificationTime()); + assertEquals(atime, targetStatus.getAccessTime()); + assertEquals(owner, targetStatus.getOwner()); + assertEquals(group, targetStatus.getGroup()); + targetPerm = targetStatus.getPermission(); + assertTrue(perm.equals(targetPerm)); + xattrs = fs.getXAttrs(target4); + assertTrue(xattrs.isEmpty()); + acls = fs.getAclStatus(target4).getEntries(); + assertFalse(acls.isEmpty()); + assertTrue(targetPerm.getAclBit()); + assertEquals(fs.getAclStatus(src), fs.getAclStatus(target4)); + + // -ptoa (verify -pa option will preserve permissions also) + Path target5 = new Path(hdfsTestDir, "targetfile5"); + argv = new String[] { "-cp", "-ptoa", src.toUri().toString(), + target5.toUri().toString() }; + ret = ToolRunner.run(shell, argv); + assertEquals("cp -ptoa is not working", SUCCESS, ret); + targetStatus = fs.getFileStatus(target5); + assertEquals(mtime, targetStatus.getModificationTime()); + assertEquals(atime, targetStatus.getAccessTime()); + assertEquals(owner, targetStatus.getOwner()); + assertEquals(group, targetStatus.getGroup()); + targetPerm = targetStatus.getPermission(); + assertTrue(perm.equals(targetPerm)); + xattrs = fs.getXAttrs(target5); + assertTrue(xattrs.isEmpty()); + acls = fs.getAclStatus(target5).getEntries(); + assertFalse(acls.isEmpty()); + assertTrue(targetPerm.getAclBit()); + assertEquals(fs.getAclStatus(src), fs.getAclStatus(target5)); } finally { if (null != shell) { shell.close(); @@ -1709,6 +1778,90 @@ public class TestDFSShell { } } + // Verify cp -pa option will preserve both ACL and sticky bit. + @Test (timeout = 120000) + public void testCopyCommandsPreserveAclAndStickyBit() throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .format(true).build(); + FsShell shell = null; + FileSystem fs = null; + final String testdir = + "/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-" + + counter.getAndIncrement(); + final Path hdfsTestDir = new Path(testdir); + try { + fs = cluster.getFileSystem(); + fs.mkdirs(hdfsTestDir); + Path src = new Path(hdfsTestDir, "srcfile"); + fs.create(src).close(); + + fs.setAcl(src, Lists.newArrayList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, USER, "foo", ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE), + aclEntry(ACCESS, OTHER, EXECUTE))); + // set sticky bit + fs.setPermission(src, + new FsPermission(ALL, READ_EXECUTE, EXECUTE, true)); + + FileStatus status = fs.getFileStatus(src); + final long mtime = status.getModificationTime(); + final long atime = status.getAccessTime(); + final String owner = status.getOwner(); + final String group = status.getGroup(); + final FsPermission perm = status.getPermission(); + + shell = new FsShell(conf); + + // -p preserves sticky bit and doesn't preserve ACL + Path target1 = new Path(hdfsTestDir, "targetfile1"); + String[] argv = new String[] { "-cp", "-p", src.toUri().toString(), + target1.toUri().toString() }; + int ret = ToolRunner.run(shell, argv); + assertEquals("cp is not working", SUCCESS, ret); + FileStatus targetStatus = fs.getFileStatus(target1); + assertEquals(mtime, targetStatus.getModificationTime()); + assertEquals(atime, targetStatus.getAccessTime()); + assertEquals(owner, targetStatus.getOwner()); + assertEquals(group, targetStatus.getGroup()); + FsPermission targetPerm = targetStatus.getPermission(); + assertTrue(perm.equals(targetPerm)); + List acls = fs.getAclStatus(target1).getEntries(); + assertTrue(acls.isEmpty()); + assertFalse(targetPerm.getAclBit()); + + // -ptopa preserves both sticky bit and ACL + Path target2 = new Path(hdfsTestDir, "targetfile2"); + argv = new String[] { "-cp", "-ptopa", src.toUri().toString(), + target2.toUri().toString() }; + ret = ToolRunner.run(shell, argv); + assertEquals("cp -ptopa is not working", SUCCESS, ret); + targetStatus = fs.getFileStatus(target2); + assertEquals(mtime, targetStatus.getModificationTime()); + assertEquals(atime, targetStatus.getAccessTime()); + assertEquals(owner, targetStatus.getOwner()); + assertEquals(group, targetStatus.getGroup()); + targetPerm = targetStatus.getPermission(); + assertTrue(perm.equals(targetPerm)); + acls = fs.getAclStatus(target2).getEntries(); + assertFalse(acls.isEmpty()); + assertTrue(targetPerm.getAclBit()); + assertEquals(fs.getAclStatus(src), fs.getAclStatus(target2)); + } finally { + if (null != shell) { + shell.close(); + } + if (null != fs) { + fs.delete(hdfsTestDir, true); + fs.close(); + } + cluster.shutdown(); + } + } + // force Copy Option is -f @Test (timeout = 30000) public void testCopyCommandsWithForceOption() throws Exception { Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java Wed Jun 18 23:15:04 2014 @@ -29,6 +29,8 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.junit.Assert; import org.junit.Test; /** @@ -42,6 +44,34 @@ public class TestBlockInfo { private static final Log LOG = LogFactory .getLog("org.apache.hadoop.hdfs.TestBlockInfo"); + + @Test + public void testAddStorage() throws Exception { + BlockInfo blockInfo = new BlockInfo(3); + + final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1"); + + boolean added = blockInfo.addStorage(storage); + + Assert.assertTrue(added); + Assert.assertEquals(storage, blockInfo.getStorageInfo(0)); + } + + + @Test + public void testReplaceStorageIfDifferetnOneAlreadyExistedFromSameDataNode() throws Exception { + BlockInfo blockInfo = new BlockInfo(3); + + final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1"); + final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2")); + + blockInfo.addStorage(storage1); + boolean added = blockInfo.addStorage(storage2); + + Assert.assertFalse(added); + Assert.assertEquals(storage2, blockInfo.getStorageInfo(0)); + } + @Test public void testBlockListMoveToHead() throws Exception { LOG.info("BlockInfo moveToHead tests..."); Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java Wed Jun 18 23:15:04 2014 @@ -1408,12 +1408,17 @@ public class TestCacheDirectives { */ private void checkPendingCachedEmpty(MiniDFSCluster cluster) throws Exception { - final DatanodeManager datanodeManager = - cluster.getNamesystem().getBlockManager().getDatanodeManager(); - for (DataNode dn : cluster.getDataNodes()) { - DatanodeDescriptor descriptor = - datanodeManager.getDatanode(dn.getDatanodeId()); - Assert.assertTrue(descriptor.getPendingCached().isEmpty()); + cluster.getNamesystem().readLock(); + try { + final DatanodeManager datanodeManager = + cluster.getNamesystem().getBlockManager().getDatanodeManager(); + for (DataNode dn : cluster.getDataNodes()) { + DatanodeDescriptor descriptor = + datanodeManager.getDatanode(dn.getDatanodeId()); + Assert.assertTrue(descriptor.getPendingCached().isEmpty()); + } + } finally { + cluster.getNamesystem().readUnlock(); } } Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Wed Jun 18 23:15:04 2014 @@ -171,8 +171,6 @@ public class TestRenameWithSnapshots { private static boolean existsInDiffReport(List entries, DiffType type, String relativePath) { for (DiffReportEntry entry : entries) { - System.out.println("DiffEntry is:" + entry.getType() + "\"" - + new String(entry.getRelativePath()) + "\""); if ((entry.getType() == type) && ((new String(entry.getRelativePath())).compareTo(relativePath) == 0)) { return true; @@ -2374,4 +2372,46 @@ public class TestRenameWithSnapshots { // save namespace and restart restartClusterAndCheckImage(true); } + + @Test + public void testRenameWithOverWrite() throws Exception { + final Path root = new Path("/"); + final Path foo = new Path(root, "foo"); + final Path file1InFoo = new Path(foo, "file1"); + final Path file2InFoo = new Path(foo, "file2"); + final Path file3InFoo = new Path(foo, "file3"); + DFSTestUtil.createFile(hdfs, file1InFoo, 1L, REPL, SEED); + DFSTestUtil.createFile(hdfs, file2InFoo, 1L, REPL, SEED); + DFSTestUtil.createFile(hdfs, file3InFoo, 1L, REPL, SEED); + final Path bar = new Path(root, "bar"); + hdfs.mkdirs(bar); + + SnapshotTestHelper.createSnapshot(hdfs, root, "s0"); + // move file1 from foo to bar + final Path fileInBar = new Path(bar, "file1"); + hdfs.rename(file1InFoo, fileInBar); + // rename bar to newDir + final Path newDir = new Path(root, "newDir"); + hdfs.rename(bar, newDir); + // move file2 from foo to newDir + final Path file2InNewDir = new Path(newDir, "file2"); + hdfs.rename(file2InFoo, file2InNewDir); + // move file3 from foo to newDir and rename it to file1, this will overwrite + // the original file1 + final Path file1InNewDir = new Path(newDir, "file1"); + hdfs.rename(file3InFoo, file1InNewDir, Rename.OVERWRITE); + SnapshotTestHelper.createSnapshot(hdfs, root, "s1"); + + SnapshotDiffReport report = hdfs.getSnapshotDiffReport(root, "s0", "s1"); + LOG.info("DiffList is \n\"" + report.toString() + "\""); + List entries = report.getDiffList(); + assertEquals(7, entries.size()); + assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "")); + assertTrue(existsInDiffReport(entries, DiffType.MODIFY, foo.getName())); + assertTrue(existsInDiffReport(entries, DiffType.DELETE, bar.getName())); + assertTrue(existsInDiffReport(entries, DiffType.CREATE, newDir.getName())); + assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file1")); + assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file2")); + assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file3")); + } } Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1603664&r1=1603663&r2=1603664&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Wed Jun 18 23:15:04 2014 @@ -138,6 +138,13 @@ public class TestOfflineImageViewer { hdfs.mkdirs(new Path("/snapshot/1")); hdfs.delete(snapshot, true); + // Set XAttrs so the fsimage contains XAttr ops + final Path xattr = new Path("/xattr"); + hdfs.mkdirs(xattr); + hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 }); + hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 }); + writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr)); + // Write results to the fsimage file hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); hdfs.saveNamespace(); @@ -210,8 +217,8 @@ public class TestOfflineImageViewer { matcher = p.matcher(output.getBuffer()); assertTrue(matcher.find() && matcher.groupCount() == 1); int totalDirs = Integer.parseInt(matcher.group(1)); - // totalDirs includes root directory and empty directory - assertEquals(NUM_DIRS + 2, totalDirs); + // totalDirs includes root directory, empty directory, and xattr directory + assertEquals(NUM_DIRS + 3, totalDirs); FileStatus maxFile = Collections.max(writtenFiles.values(), new Comparator() { @@ -264,7 +271,7 @@ public class TestOfflineImageViewer { // verify the number of directories FileStatus[] statuses = webhdfs.listStatus(new Path("/")); - assertEquals(NUM_DIRS + 1, statuses.length); // contains empty directory + assertEquals(NUM_DIRS + 2, statuses.length); // contains empty and xattr directory // verify the number of files in the directory statuses = webhdfs.listStatus(new Path("/dir0"));