Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 6F368DFBF for ; Wed, 27 Jun 2012 02:52:10 +0000 (UTC) Received: (qmail 82657 invoked by uid 500); 27 Jun 2012 02:52:09 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 82456 invoked by uid 500); 27 Jun 2012 02:52:08 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 82425 invoked by uid 99); 27 Jun 2012 02:52:07 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 27 Jun 2012 02:52:07 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 27 Jun 2012 02:52:03 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 7B9B223889BB; Wed, 27 Jun 2012 02:51:41 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1354316 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/ src/test/java/org/apache/hadoop/hdfs/ser... Date: Wed, 27 Jun 2012 02:51:40 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120627025141.7B9B223889BB@eris.apache.org> Author: szetszwo Date: Wed Jun 27 02:51:39 2012 New Revision: 1354316 URL: http://svn.apache.org/viewvc?rev=1354316&view=rev Log: HDFS-3551. WebHDFS CREATE should use client location for HTTP redirection. Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1354316&r1=1354315&r2=1354316&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Jun 27 02:51:39 2012 @@ -381,6 +381,9 @@ Branch-2 ( Unreleased changes ) HDFS-3428. Move DelegationTokenRenewer to common (tucu) + HDFS-3551. WebHDFS CREATE should use client location for HTTP redirection. + (szetszwo) + Release 2.0.0-alpha - 05-23-2012 INCOMPATIBLE CHANGES Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1354316&r1=1354315&r2=1354316&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Wed Jun 27 02:51:39 2012 @@ -1053,4 +1053,8 @@ public class DatanodeManager { } } + @Override + public String toString() { + return getClass().getSimpleName() + ": " + host2DatanodeMap; + } } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java?rev=1354316&r1=1354315&r2=1354316&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java Wed Jun 27 02:51:39 2012 @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import java.util.Arrays; import java.util.HashMap; +import java.util.Map; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -156,4 +158,14 @@ class Host2NodesMap { hostmapLock.readLock().unlock(); } } + + @Override + public String toString() { + final StringBuilder b = new StringBuilder(getClass().getSimpleName()) + .append("["); + for(Map.Entry e : map.entrySet()) { + b.append("\n " + e.getKey() + " => " + Arrays.asList(e.getValue())); + } + return b.append("\n]").toString(); + } } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1354316&r1=1354315&r2=1354316&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Wed Jun 27 02:51:39 2012 @@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.protocol.H import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -115,6 +116,11 @@ public class NamenodeWebHdfsMethods { return REMOTE_ADDRESS.get(); } + /** Set the remote client address. */ + static void setRemoteAddress(String remoteAddress) { + REMOTE_ADDRESS.set(remoteAddress); + } + private @Context ServletContext context; private @Context HttpServletRequest request; private @Context HttpServletResponse response; @@ -134,12 +140,26 @@ public class NamenodeWebHdfsMethods { response.setContentType(null); } - private static DatanodeInfo chooseDatanode(final NameNode namenode, + static DatanodeInfo chooseDatanode(final NameNode namenode, final String path, final HttpOpParam.Op op, final long openOffset, - Configuration conf) throws IOException { - if (op == GetOpParam.Op.OPEN + final long blocksize, Configuration conf) throws IOException { + final BlockManager bm = namenode.getNamesystem().getBlockManager(); + + if (op == PutOpParam.Op.CREATE) { + //choose a datanode near to client + final DatanodeDescriptor clientNode = bm.getDatanodeManager( + ).getDatanodeByHost(getRemoteAddress()); + if (clientNode != null) { + final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy( + ).chooseTarget(path, 1, clientNode, null, blocksize); + if (datanodes.length > 0) { + return datanodes[0]; + } + } + } else if (op == GetOpParam.Op.OPEN || op == GetOpParam.Op.GETFILECHECKSUM || op == PostOpParam.Op.APPEND) { + //choose a datanode containing a replica final NamenodeProtocols np = namenode.getRpcServer(); final HdfsFileStatus status = np.getFileInfo(path); if (status == null) { @@ -158,14 +178,13 @@ public class NamenodeWebHdfsMethods { final LocatedBlocks locations = np.getBlockLocations(path, offset, 1); final int count = locations.locatedBlockCount(); if (count > 0) { - return JspHelper.bestNode(locations.get(0), conf); + return JspHelper.bestNode(locations.get(0).getLocations(), false, conf); } } } - return (DatanodeDescriptor)namenode.getNamesystem().getBlockManager( - ).getDatanodeManager().getNetworkTopology().chooseRandom( - NodeBase.ROOT); + return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology( + ).chooseRandom(NodeBase.ROOT); } private Token generateDelegationToken( @@ -183,9 +202,11 @@ public class NamenodeWebHdfsMethods { final UserGroupInformation ugi, final DelegationParam delegation, final UserParam username, final DoAsParam doAsUser, final String path, final HttpOpParam.Op op, final long openOffset, + final long blocksize, final Param... parameters) throws URISyntaxException, IOException { final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF); - final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset, conf); + final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset, + blocksize, conf); final String delegationQuery; if (!UserGroupInformation.isSecurityEnabled()) { @@ -356,7 +377,7 @@ public class NamenodeWebHdfsMethods { case CREATE: { final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, - fullpath, op.getValue(), -1L, + fullpath, op.getValue(), -1L, blockSize.getValue(conf), permission, overwrite, bufferSize, replication, blockSize); return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build(); } @@ -502,7 +523,7 @@ public class NamenodeWebHdfsMethods { case APPEND: { final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, - fullpath, op.getValue(), -1L, bufferSize); + fullpath, op.getValue(), -1L, -1L, bufferSize); return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build(); } default: @@ -598,7 +619,7 @@ public class NamenodeWebHdfsMethods { case OPEN: { final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, - fullpath, op.getValue(), offset.getValue(), offset, length, bufferSize); + fullpath, op.getValue(), offset.getValue(), -1L, offset, length, bufferSize); return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build(); } case GET_BLOCK_LOCATIONS: @@ -634,7 +655,7 @@ public class NamenodeWebHdfsMethods { case GETFILECHECKSUM: { final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, - fullpath, op.getValue(), -1L); + fullpath, op.getValue(), -1L, -1L); return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build(); } case GETDELEGATIONTOKEN: Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java?rev=1354316&view=auto ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java (added) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java Wed Jun 27 02:51:39 2012 @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.web.resources; + +import java.util.Arrays; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.LeaseManager; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; +import org.apache.hadoop.hdfs.web.resources.GetOpParam; +import org.apache.hadoop.hdfs.web.resources.PostOpParam; +import org.apache.hadoop.hdfs.web.resources.PutOpParam; +import org.apache.log4j.Level; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test WebHDFS which provides data locality using HTTP redirection. + */ +public class TestWebHdfsDataLocality { + static final Log LOG = LogFactory.getLog(TestWebHdfsDataLocality.class); + { + ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF); + ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF); + ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF); + } + + private static final String RACK0 = "/rack0"; + private static final String RACK1 = "/rack1"; + private static final String RACK2 = "/rack2"; + + @Test + public void testDataLocality() throws Exception { + final Configuration conf = WebHdfsTestUtil.createConf(); + final String[] racks = {RACK0, RACK0, RACK1, RACK1, RACK2, RACK2}; + final int nDataNodes = racks.length; + LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks)); + + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(nDataNodes) + .racks(racks) + .build(); + try { + cluster.waitActive(); + + final DistributedFileSystem dfs = cluster.getFileSystem(); + final NameNode namenode = cluster.getNameNode(); + final DatanodeManager dm = namenode.getNamesystem().getBlockManager( + ).getDatanodeManager(); + LOG.info("dm=" + dm); + + final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; + final String f = "/foo"; + + { //test CREATE + for(int i = 0; i < nDataNodes; i++) { + //set client address to a particular datanode + final DataNode dn = cluster.getDataNodes().get(i); + final String ipAddr = dm.getDatanode(dn.getDatanodeId()).getIpAddr(); + NamenodeWebHdfsMethods.setRemoteAddress(ipAddr); + + //The chosen datanode must be the same as the client address + final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode( + namenode, f, PutOpParam.Op.CREATE, -1L, blocksize, conf); + Assert.assertEquals(ipAddr, chosen.getIpAddr()); + } + } + + //create a file with one replica. + final Path p = new Path(f); + final FSDataOutputStream out = dfs.create(p, (short)1); + out.write(1); + out.close(); + + //get replica location. + final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations( + namenode, f, 0, 1); + final List lb = locatedblocks.getLocatedBlocks(); + Assert.assertEquals(1, lb.size()); + final DatanodeInfo[] locations = lb.get(0).getLocations(); + Assert.assertEquals(1, locations.length); + final DatanodeInfo expected = locations[0]; + + //For GETFILECHECKSUM, OPEN and APPEND, + //the chosen datanode must be the same as the replica location. + + { //test GETFILECHECKSUM + final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode( + namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, conf); + Assert.assertEquals(expected, chosen); + } + + { //test OPEN + final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode( + namenode, f, GetOpParam.Op.OPEN, 0, blocksize, conf); + Assert.assertEquals(expected, chosen); + } + + { //test APPEND + final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode( + namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, conf); + Assert.assertEquals(expected, chosen); + } + } finally { + cluster.shutdown(); + } + } +} \ No newline at end of file Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java?rev=1354316&r1=1354315&r2=1354316&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java Wed Jun 27 02:51:39 2012 @@ -40,6 +40,12 @@ import org.junit.Assert; public class WebHdfsTestUtil { public static final Log LOG = LogFactory.getLog(WebHdfsTestUtil.class); + public static Configuration createConf() { + final Configuration conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); + return conf; + } + public static WebHdfsFileSystem getWebHdfsFileSystem(final Configuration conf ) throws IOException, URISyntaxException { final String uri = WebHdfsFileSystem.SCHEME + "://"