Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 2F116113A2 for ; Mon, 12 May 2014 13:11:23 +0000 (UTC) Received: (qmail 76508 invoked by uid 500); 12 May 2014 12:44:43 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 76315 invoked by uid 500); 12 May 2014 12:44:43 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 76041 invoked by uid 99); 12 May 2014 12:44:43 -0000 Received: from Unknown (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 12 May 2014 12:44:43 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED,WEIRD_QUOTING X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 12 May 2014 12:44:35 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 8A9FC2388B6C; Mon, 12 May 2014 12:44:09 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1593948 [2/4] - in /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-nfs/ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ hadoop-hdfs/src/main/java/org/apache/h... Date: Mon, 12 May 2014 12:44:05 -0000 To: hdfs-commits@hadoop.apache.org From: umamahesh@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140512124409.8A9FC2388B6C@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Mon May 12 12:43:59 2014 @@ -18,58 +18,16 @@ package org.apache.hadoop.hdfs.server.common; -import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER; -import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.net.URL; -import java.net.URLEncoder; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; - -import javax.servlet.ServletContext; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.jsp.JspWriter; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.BlockReader; -import org.apache.hadoop.hdfs.BlockReaderFactory; -import org.apache.hadoop.hdfs.ClientContext; -import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.RemotePeerFactory; -import org.apache.hadoop.hdfs.net.Peer; -import org.apache.hadoop.hdfs.net.TcpPeerServer; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer; import org.apache.hadoop.hdfs.web.resources.DelegationParam; import org.apache.hadoop.hdfs.web.resources.DoAsParam; import org.apache.hadoop.hdfs.web.resources.UserParam; -import org.apache.hadoop.http.HtmlQuoting; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; @@ -78,484 +36,26 @@ import org.apache.hadoop.security.UserGr import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.VersionInfo; -import com.google.common.base.Charsets; +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.net.InetSocketAddress; + +import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER; +import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER; @InterfaceAudience.Private public class JspHelper { public static final String CURRENT_CONF = "current.conf"; public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME; public static final String NAMENODE_ADDRESS = "nnaddr"; - static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME + - "="; private static final Log LOG = LogFactory.getLog(JspHelper.class); /** Private constructor for preventing creating JspHelper object. */ - private JspHelper() {} - - // data structure to count number of blocks on datanodes. - private static class NodeRecord extends DatanodeInfo { - int frequency; - - public NodeRecord(DatanodeInfo info, int count) { - super(info); - this.frequency = count; - } - - @Override - public boolean equals(Object obj) { - // Sufficient to use super equality as datanodes are uniquely identified - // by DatanodeID - return (this == obj) || super.equals(obj); - } - @Override - public int hashCode() { - // Super implementation is sufficient - return super.hashCode(); - } - } - - // compare two records based on their frequency - private static class NodeRecordComparator implements Comparator { - - @Override - public int compare(NodeRecord o1, NodeRecord o2) { - if (o1.frequency < o2.frequency) { - return -1; - } else if (o1.frequency > o2.frequency) { - return 1; - } - return 0; - } - } - - /** - * convenience method for canonicalizing host name. - * @param addr name:port or name - * @return canonicalized host name - */ - public static String canonicalize(String addr) { - // default port 1 is supplied to allow addr without port. - // the port will be ignored. - return NetUtils.createSocketAddr(addr, 1).getAddress() - .getCanonicalHostName(); - } - - /** - * A helper class that generates the correct URL for different schema. - * - */ - public static final class Url { - public static String authority(String scheme, DatanodeID d) { - String fqdn = (d.getIpAddr() != null && !d.getIpAddr().isEmpty())? - canonicalize(d.getIpAddr()): - d.getHostName(); - if (scheme.equals("http")) { - return fqdn + ":" + d.getInfoPort(); - } else if (scheme.equals("https")) { - return fqdn + ":" + d.getInfoSecurePort(); - } else { - throw new IllegalArgumentException("Unknown scheme:" + scheme); - } - } - - public static String url(String scheme, DatanodeID d) { - return scheme + "://" + authority(scheme, d); - } - } - - public static DatanodeInfo bestNode(LocatedBlocks blks, Configuration conf) - throws IOException { - HashMap map = - new HashMap(); - for (LocatedBlock block : blks.getLocatedBlocks()) { - DatanodeInfo[] nodes = block.getLocations(); - for (DatanodeInfo node : nodes) { - NodeRecord record = map.get(node); - if (record == null) { - map.put(node, new NodeRecord(node, 1)); - } else { - record.frequency++; - } - } - } - NodeRecord[] nodes = map.values().toArray(new NodeRecord[map.size()]); - Arrays.sort(nodes, new NodeRecordComparator()); - return bestNode(nodes, false); - } - - public static DatanodeInfo bestNode(LocatedBlock blk, Configuration conf) - throws IOException { - DatanodeInfo[] nodes = blk.getLocations(); - return bestNode(nodes, true); - } - - private static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom) - throws IOException { - if (nodes == null || nodes.length == 0) { - throw new IOException("No nodes contain this block"); - } - int l = 0; - while (l < nodes.length && !nodes[l].isDecommissioned()) { - ++l; - } - - if (l == 0) { - throw new IOException("No active nodes contain this block"); - } - - int index = doRandom ? DFSUtil.getRandom().nextInt(l) : 0; - return nodes[index]; - } - - public static void streamBlockInAscii(InetSocketAddress addr, String poolId, - long blockId, Token blockToken, long genStamp, - long blockSize, long offsetIntoBlock, long chunkSizeToView, - JspWriter out, final Configuration conf, DFSClient.Conf dfsConf, - final DataEncryptionKey encryptionKey) - throws IOException { - if (chunkSizeToView == 0) return; - int amtToRead = (int)Math.min(chunkSizeToView, blockSize - offsetIntoBlock); - - BlockReader blockReader = new BlockReaderFactory(dfsConf). - setInetSocketAddress(addr). - setBlock(new ExtendedBlock(poolId, blockId, 0, genStamp)). - setFileName(BlockReaderFactory.getFileName(addr, poolId, blockId)). - setBlockToken(blockToken). - setStartOffset(offsetIntoBlock). - setLength(amtToRead). - setVerifyChecksum(true). - setClientName("JspHelper"). - setClientCacheContext(ClientContext.getFromConf(conf)). - setDatanodeInfo(new DatanodeInfo( - new DatanodeID(addr.getAddress().getHostAddress(), - addr.getHostName(), poolId, addr.getPort(), 0, 0, 0))). - setCachingStrategy(CachingStrategy.newDefaultStrategy()). - setConfiguration(conf). - setRemotePeerFactory(new RemotePeerFactory() { - @Override - public Peer newConnectedPeer(InetSocketAddress addr) - throws IOException { - Peer peer = null; - Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); - try { - sock.connect(addr, HdfsServerConstants.READ_TIMEOUT); - sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); - peer = TcpPeerServer.peerFromSocketAndKey(sock, encryptionKey); - } finally { - if (peer == null) { - IOUtils.closeSocket(sock); - } - } - return peer; - } - }). - build(); - - final byte[] buf = new byte[amtToRead]; - try { - int readOffset = 0; - int retries = 2; - while (amtToRead > 0) { - int numRead = amtToRead; - try { - blockReader.readFully(buf, readOffset, amtToRead); - } catch (IOException e) { - retries--; - if (retries == 0) - throw new IOException("Could not read data from datanode"); - continue; - } - amtToRead -= numRead; - readOffset += numRead; - } - } finally { - blockReader.close(); - } - out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8))); - } - - public static void addTableHeader(JspWriter out) throws IOException { - out.print(""); - out.print(""); - } - public static void addTableRow(JspWriter out, String[] columns) throws IOException { - out.print(""); - for (int i = 0; i < columns.length; i++) { - out.print(""); - } - out.print(""); - } - public static void addTableRow(JspWriter out, String[] columns, int row) throws IOException { - out.print(""); - - for (int i = 0; i < columns.length; i++) { - if (row/2*2 == row) {//even - out.print(""); - } else { - out.print(""); - - } - } - out.print(""); - } - public static void addTableFooter(JspWriter out) throws IOException { - out.print("
"+columns[i]+"
"+columns[i]+"
"+columns[i]+"
"); - } - - public static void sortNodeList(final List nodes, - String field, String order) { - - class NodeComapare implements Comparator { - static final int - FIELD_NAME = 1, - FIELD_LAST_CONTACT = 2, - FIELD_BLOCKS = 3, - FIELD_CAPACITY = 4, - FIELD_USED = 5, - FIELD_PERCENT_USED = 6, - FIELD_NONDFS_USED = 7, - FIELD_REMAINING = 8, - FIELD_PERCENT_REMAINING = 9, - FIELD_ADMIN_STATE = 10, - FIELD_DECOMMISSIONED = 11, - FIELD_BLOCKPOOL_USED = 12, - FIELD_PERBLOCKPOOL_USED = 13, - FIELD_FAILED_VOLUMES = 14, - SORT_ORDER_ASC = 1, - SORT_ORDER_DSC = 2; - - int sortField = FIELD_NAME; - int sortOrder = SORT_ORDER_ASC; - - public NodeComapare(String field, String order) { - if (field.equals("lastcontact")) { - sortField = FIELD_LAST_CONTACT; - } else if (field.equals("capacity")) { - sortField = FIELD_CAPACITY; - } else if (field.equals("used")) { - sortField = FIELD_USED; - } else if (field.equals("nondfsused")) { - sortField = FIELD_NONDFS_USED; - } else if (field.equals("remaining")) { - sortField = FIELD_REMAINING; - } else if (field.equals("pcused")) { - sortField = FIELD_PERCENT_USED; - } else if (field.equals("pcremaining")) { - sortField = FIELD_PERCENT_REMAINING; - } else if (field.equals("blocks")) { - sortField = FIELD_BLOCKS; - } else if (field.equals("adminstate")) { - sortField = FIELD_ADMIN_STATE; - } else if (field.equals("decommissioned")) { - sortField = FIELD_DECOMMISSIONED; - } else if (field.equals("bpused")) { - sortField = FIELD_BLOCKPOOL_USED; - } else if (field.equals("pcbpused")) { - sortField = FIELD_PERBLOCKPOOL_USED; - } else if (field.equals("volfails")) { - sortField = FIELD_FAILED_VOLUMES; - } else { - sortField = FIELD_NAME; - } - - if (order.equals("DSC")) { - sortOrder = SORT_ORDER_DSC; - } else { - sortOrder = SORT_ORDER_ASC; - } - } - - @Override - public int compare(DatanodeDescriptor d1, - DatanodeDescriptor d2) { - int ret = 0; - switch (sortField) { - case FIELD_LAST_CONTACT: - ret = (int) (d2.getLastUpdate() - d1.getLastUpdate()); - break; - case FIELD_CAPACITY: - long dlong = d1.getCapacity() - d2.getCapacity(); - ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0); - break; - case FIELD_USED: - dlong = d1.getDfsUsed() - d2.getDfsUsed(); - ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0); - break; - case FIELD_NONDFS_USED: - dlong = d1.getNonDfsUsed() - d2.getNonDfsUsed(); - ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0); - break; - case FIELD_REMAINING: - dlong = d1.getRemaining() - d2.getRemaining(); - ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0); - break; - case FIELD_PERCENT_USED: - double ddbl =((d1.getDfsUsedPercent())- - (d2.getDfsUsedPercent())); - ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0); - break; - case FIELD_PERCENT_REMAINING: - ddbl =((d1.getRemainingPercent())- - (d2.getRemainingPercent())); - ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0); - break; - case FIELD_BLOCKS: - ret = d1.numBlocks() - d2.numBlocks(); - break; - case FIELD_ADMIN_STATE: - ret = d1.getAdminState().toString().compareTo( - d2.getAdminState().toString()); - break; - case FIELD_DECOMMISSIONED: - ret = DFSUtil.DECOM_COMPARATOR.compare(d1, d2); - break; - case FIELD_NAME: - ret = d1.getHostName().compareTo(d2.getHostName()); - break; - case FIELD_BLOCKPOOL_USED: - dlong = d1.getBlockPoolUsed() - d2.getBlockPoolUsed(); - ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0); - break; - case FIELD_PERBLOCKPOOL_USED: - ddbl = d1.getBlockPoolUsedPercent() - d2.getBlockPoolUsedPercent(); - ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0); - break; - case FIELD_FAILED_VOLUMES: - int dint = d1.getVolumeFailures() - d2.getVolumeFailures(); - ret = (dint < 0) ? -1 : ((dint > 0) ? 1 : 0); - break; - default: - throw new IllegalArgumentException("Invalid sortField"); - } - return (sortOrder == SORT_ORDER_DSC) ? -ret : ret; - } - } - - Collections.sort(nodes, new NodeComapare(field, order)); - } - - public static void printPathWithLinks(String dir, JspWriter out, - int namenodeInfoPort, - String tokenString, - String nnAddress - ) throws IOException { - try { - String[] parts = dir.split(Path.SEPARATOR); - StringBuilder tempPath = new StringBuilder(dir.length()); - out.print("" + Path.SEPARATOR - + ""); - tempPath.append(Path.SEPARATOR); - for (int i = 0; i < parts.length-1; i++) { - if (!parts[i].equals("")) { - tempPath.append(parts[i]); - out.print("" + HtmlQuoting.quoteHtmlChars(parts[i]) + "" + Path.SEPARATOR); - tempPath.append(Path.SEPARATOR); - } - } - if(parts.length > 0) { - out.print(HtmlQuoting.quoteHtmlChars(parts[parts.length-1])); - } - } - catch (UnsupportedEncodingException ex) { - ex.printStackTrace(); - } - } - - public static void printGotoForm(JspWriter out, - int namenodeInfoPort, - String tokenString, - String file, - String nnAddress) throws IOException { - out.print("
"); - out.print("Goto : "); - out.print(""); - out.print(""); - out.print(""); - if (UserGroupInformation.isSecurityEnabled()) { - out.print(""); - } - out.print(""); - out.print("
"); - } - - public static void createTitle(JspWriter out, - HttpServletRequest req, - String file) throws IOException{ - if(file == null) file = ""; - int start = Math.max(0,file.length() - 100); - if(start != 0) - file = "..." + file.substring(start, file.length()); - out.print("HDFS:" + file + ""); - } - - /** Convert a String to chunk-size-to-view. */ - public static int string2ChunkSizeToView(String s, int defaultValue) { - int n = s == null? 0: Integer.parseInt(s); - return n > 0? n: defaultValue; - } - - /** Return a table containing version information. */ - public static String getVersionTable() { - return "
" - + "\n " - + "\n " - + "\n
Version:" + VersionInfo.getVersion() + ", " + VersionInfo.getRevision() + "
Compiled:" + VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch() + "
"; - } - - /** - * Validate filename. - * @return null if the filename is invalid. - * Otherwise, return the validated filename. - */ - public static String validatePath(String p) { - return p == null || p.length() == 0? - null: new Path(p).toUri().getPath(); - } - - /** - * Validate a long value. - * @return null if the value is invalid. - * Otherwise, return the validated Long object. - */ - public static Long validateLong(String value) { - return value == null? null: Long.parseLong(value); - } - - /** - * Validate a URL. - * @return null if the value is invalid. - * Otherwise, return the validated URL String. - */ - public static String validateURL(String value) { - try { - return URLEncoder.encode(new URL(value).toString(), "UTF-8"); - } catch (IOException e) { - return null; - } - } - - /** - * If security is turned off, what is the default web user? - * @param conf the configuration to look in - * @return the remote user that was configuration - */ - public static UserGroupInformation getDefaultWebUser(Configuration conf - ) throws IOException { - return UserGroupInformation.createRemoteUser(getDefaultWebUserName(conf)); - } + private JspHelper() {} private static String getDefaultWebUserName(Configuration conf ) throws IOException { @@ -736,56 +236,4 @@ public class JspHelper { return username; } - /** - * Returns the url parameter for the given token string. - * @param tokenString - * @return url parameter - */ - public static String getDelegationTokenUrlParam(String tokenString) { - if (tokenString == null ) { - return ""; - } - if (UserGroupInformation.isSecurityEnabled()) { - return SET_DELEGATION + tokenString; - } else { - return ""; - } - } - - /** - * Returns the url parameter for the given string, prefixed with - * paramSeparator. - * - * @param name parameter name - * @param val parameter value - * @param paramSeparator URL parameter prefix, i.e. either '?' or '&' - * @return url parameter - */ - public static String getUrlParam(String name, String val, String paramSeparator) { - return val == null ? "" : paramSeparator + name + "=" + val; - } - - /** - * Returns the url parameter for the given string, prefixed with '?' if - * firstParam is true, prefixed with '&' if firstParam is false. - * - * @param name parameter name - * @param val parameter value - * @param firstParam true if this is the first parameter in the list, false otherwise - * @return url parameter - */ - public static String getUrlParam(String name, String val, boolean firstParam) { - return getUrlParam(name, val, firstParam ? "?" : "&"); - } - - /** - * Returns the url parameter for the given string, prefixed with '&'. - * - * @param name parameter name - * @param val parameter value - * @return url parameter - */ - public static String getUrlParam(String name, String val) { - return getUrlParam(name, val, false); - } } Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java Mon May 12 12:43:59 2014 @@ -145,7 +145,11 @@ class BPOfferService { return null; } } - + + boolean hasBlockPoolId() { + return getNamespaceInfo() != null; + } + synchronized NamespaceInfo getNamespaceInfo() { return bpNSInfo; } @@ -679,4 +683,17 @@ class BPOfferService { return true; } + /* + * Let the actor retry for initialization until all namenodes of cluster have + * failed. + */ + boolean shouldRetryInit() { + if (hasBlockPoolId()) { + // One of the namenode registered successfully. lets continue retry for + // other. + return true; + } + return isAlive(); + } + } Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java Mon May 12 12:43:59 2014 @@ -90,8 +90,13 @@ class BPServiceActor implements Runnable Thread bpThread; DatanodeProtocolClientSideTranslatorPB bpNamenode; private volatile long lastHeartbeat = 0; - private volatile boolean initialized = false; - + + static enum RunningState { + CONNECTING, INIT_FAILED, RUNNING, EXITED, FAILED; + } + + private volatile RunningState runningState = RunningState.CONNECTING; + /** * Between block reports (which happen on the order of once an hour) the * DN reports smaller incremental changes to its block list. This map, @@ -118,17 +123,12 @@ class BPServiceActor implements Runnable this.dnConf = dn.getDnConf(); } - /** - * returns true if BP thread has completed initialization of storage - * and has registered with the corresponding namenode - * @return true if initialized - */ - boolean isInitialized() { - return initialized; - } - boolean isAlive() { - return shouldServiceRun && bpThread.isAlive(); + if (!shouldServiceRun || !bpThread.isAlive()) { + return false; + } + return runningState == BPServiceActor.RunningState.RUNNING + || runningState == BPServiceActor.RunningState.CONNECTING; } @Override @@ -805,19 +805,30 @@ class BPServiceActor implements Runnable LOG.info(this + " starting to offer service"); try { - // init stuff - try { - // setup storage - connectToNNAndHandshake(); - } catch (IOException ioe) { - // Initial handshake, storage recovery or registration failed - // End BPOfferService thread - LOG.fatal("Initialization failed for block pool " + this, ioe); - return; + while (true) { + // init stuff + try { + // setup storage + connectToNNAndHandshake(); + break; + } catch (IOException ioe) { + // Initial handshake, storage recovery or registration failed + runningState = RunningState.INIT_FAILED; + if (shouldRetryInit()) { + // Retry until all namenode's of BPOS failed initialization + LOG.error("Initialization failed for " + this + " " + + ioe.getLocalizedMessage()); + sleepAndLogInterrupts(5000, "initializing"); + } else { + runningState = RunningState.FAILED; + LOG.fatal("Initialization failed for " + this + ". Exiting. ", ioe); + return; + } + } } - initialized = true; // bp is initialized; - + runningState = RunningState.RUNNING; + while (shouldRun()) { try { offerService(); @@ -826,14 +837,20 @@ class BPServiceActor implements Runnable sleepAndLogInterrupts(5000, "offering service"); } } + runningState = RunningState.EXITED; } catch (Throwable ex) { LOG.warn("Unexpected exception in block pool " + this, ex); + runningState = RunningState.FAILED; } finally { LOG.warn("Ending block pool service for: " + this); cleanUp(); } } + private boolean shouldRetryInit() { + return shouldRun() && bpos.shouldRetryInit(); + } + private boolean shouldRun() { return shouldServiceRun && dn.shouldRun(); } Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java Mon May 12 12:43:59 2014 @@ -88,7 +88,11 @@ class BlockPoolManager { synchronized void remove(BPOfferService t) { offerServices.remove(t); - bpByBlockPoolId.remove(t.getBlockPoolId()); + if (t.hasBlockPoolId()) { + // It's possible that the block pool never successfully registered + // with any NN, so it was never added it to this map + bpByBlockPoolId.remove(t.getBlockPoolId()); + } boolean removed = false; for (Iterator it = bpByNameserviceId.values().iterator(); Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon May 12 12:43:59 2014 @@ -847,19 +847,24 @@ public class DataNode extends Configured */ void shutdownBlockPool(BPOfferService bpos) { blockPoolManager.remove(bpos); + if (bpos.hasBlockPoolId()) { + // Possible that this is shutting down before successfully + // registering anywhere. If that's the case, we wouldn't have + // a block pool id + String bpId = bpos.getBlockPoolId(); + if (blockScanner != null) { + blockScanner.removeBlockPool(bpId); + } - String bpId = bpos.getBlockPoolId(); - if (blockScanner != null) { - blockScanner.removeBlockPool(bpId); - } - - if (data != null) { - data.shutdownBlockPool(bpId); - } + if (data != null) { + data.shutdownBlockPool(bpId); + } - if (storage != null) { - storage.removeBlockPoolStorage(bpId); + if (storage != null) { + storage.removeBlockPoolStorage(bpId); + } } + } /** @@ -880,10 +885,10 @@ public class DataNode extends Configured + " should have retrieved namespace info before initBlockPool."); } + setClusterId(nsInfo.clusterID, nsInfo.getBlockPoolID()); + // Register the new block pool with the BP manager. blockPoolManager.addBlockPool(bpos); - - setClusterId(nsInfo.clusterID, nsInfo.getBlockPoolID()); // In the case that this is the first block pool to connect, initialize // the dataset, block scanners, etc. @@ -1067,6 +1072,7 @@ public class DataNode extends Configured Token token) throws IOException { checkBlockLocalPathAccess(); checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ); + Preconditions.checkNotNull(data, "Storage not yet initialized"); BlockLocalPathInfo info = data.getBlockLocalPathInfo(block); if (LOG.isDebugEnabled()) { if (info != null) { @@ -2427,6 +2433,7 @@ public class DataNode extends Configured */ @Override // DataNodeMXBean public String getVolumeInfo() { + Preconditions.checkNotNull(data, "Storage not yet initialized"); return JSON.toString(data.getVolumeInfoMap()); } Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Mon May 12 12:43:59 2014 @@ -221,11 +221,16 @@ public class DataStorage extends Storage // Each storage directory is treated individually. // During startup some of them can upgrade or rollback // while others could be uptodate for the regular startup. - for(int idx = 0; idx < getNumStorageDirs(); idx++) { - doTransition(datanode, getStorageDir(idx), nsInfo, startOpt); - createStorageID(getStorageDir(idx)); + try { + for (int idx = 0; idx < getNumStorageDirs(); idx++) { + doTransition(datanode, getStorageDir(idx), nsInfo, startOpt); + createStorageID(getStorageDir(idx)); + } + } catch (IOException e) { + unlockAll(); + throw e; } - + // 3. Update all storages. Some of them might have just been formatted. this.writeAll(); Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java Mon May 12 12:43:59 2014 @@ -50,7 +50,7 @@ public class FsDatasetUtil { } /** Find the corresponding meta data file from a given block file */ - static File findMetaFile(final File blockFile) throws IOException { + public static File findMetaFile(final File blockFile) throws IOException { final String prefix = blockFile.getName() + "_"; final File parent = blockFile.getParentFile(); final File[] matches = parent.listFiles(new FilenameFilter() { Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon May 12 12:43:59 2014 @@ -284,7 +284,7 @@ public class FSDirectory implements Clos while (!ready) { try { cond.await(5000, TimeUnit.MILLISECONDS); - } catch (InterruptedException ie) { + } catch (InterruptedException ignored) { } } } finally { @@ -525,7 +525,7 @@ public class FSDirectory implements Clos } // update space consumed - final INodesInPath iip = rootDir.getINodesInPath4Write(path, true); + final INodesInPath iip = getINodesInPath4Write(path, true); updateCount(iip, 0, -fileNode.getBlockDiskspace(), true); return true; } @@ -597,7 +597,7 @@ public class FSDirectory implements Clos throws QuotaExceededException, UnresolvedLinkException, FileAlreadyExistsException, SnapshotAccessControlException, IOException { assert hasWriteLock(); - INodesInPath srcIIP = rootDir.getINodesInPath4Write(src, false); + INodesInPath srcIIP = getINodesInPath4Write(src, false); final INode srcInode = srcIIP.getLastINode(); // check the validation of the source @@ -731,9 +731,8 @@ public class FSDirectory implements Clos } else { withCount.getReferredINode().setLocalName(dstChildName); int dstSnapshotId = dstIIP.getLatestSnapshotId(); - final INodeReference.DstReference ref = new INodeReference.DstReference( + toDst = new INodeReference.DstReference( dstParent.asDirectory(), withCount, dstSnapshotId); - toDst = ref; } added = addLastINodeNoQuotaCheck(dstIIP, toDst); @@ -772,14 +771,12 @@ public class FSDirectory implements Clos } else if (!srcChildIsReference) { // src must be in snapshot // the withCount node will no longer be used thus no need to update // its reference number here - final INode originalChild = withCount.getReferredINode(); - srcChild = originalChild; + srcChild = withCount.getReferredINode(); srcChild.setLocalName(srcChildName); } else { withCount.removeReference(oldSrcChild.asReference()); - final INodeReference originalRef = new INodeReference.DstReference( + srcChild = new INodeReference.DstReference( srcParent, withCount, srcRefDstSnapshot); - srcChild = originalRef; withCount.getReferredINode().setLocalName(srcChildName); } @@ -822,8 +819,8 @@ public class FSDirectory implements Clos } } } - String error = null; - final INodesInPath srcIIP = rootDir.getINodesInPath4Write(src, false); + final String error; + final INodesInPath srcIIP = getINodesInPath4Write(src, false); final INode srcInode = srcIIP.getLastINode(); // validate source if (srcInode == null) { @@ -861,7 +858,7 @@ public class FSDirectory implements Clos + error); throw new IOException(error); } - INodesInPath dstIIP = rootDir.getINodesInPath4Write(dst, false); + INodesInPath dstIIP = getINodesInPath4Write(dst, false); if (dstIIP.getINodes().length == 1) { error = "rename destination cannot be the root"; NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " @@ -966,7 +963,7 @@ public class FSDirectory implements Clos // src and dst file/dir are in the same directory, and the dstParent has // been replaced when we removed the src. Refresh the dstIIP and // dstParent. - dstIIP = rootDir.getINodesInPath4Write(dst, false); + dstIIP = getINodesInPath4Write(dst, false); } boolean undoRemoveDst = false; @@ -989,9 +986,8 @@ public class FSDirectory implements Clos } else { withCount.getReferredINode().setLocalName(dstChildName); int dstSnapshotId = dstIIP.getLatestSnapshotId(); - final INodeReference.DstReference ref = new INodeReference.DstReference( + toDst = new INodeReference.DstReference( dstIIP.getINode(-2).asDirectory(), withCount, dstSnapshotId); - toDst = ref; } // add src as dst to complete rename @@ -1052,14 +1048,12 @@ public class FSDirectory implements Clos } else if (!srcChildIsReference) { // src must be in snapshot // the withCount node will no longer be used thus no need to update // its reference number here - final INode originalChild = withCount.getReferredINode(); - srcChild = originalChild; + srcChild = withCount.getReferredINode(); srcChild.setLocalName(srcChildName); } else { withCount.removeReference(oldSrcChild.asReference()); - final INodeReference originalRef = new INodeReference.DstReference( + srcChild = new INodeReference.DstReference( srcParent, withCount, srcRefDstSnapshot); - srcChild = originalRef; withCount.getReferredINode().setLocalName(srcChildName); } @@ -1123,7 +1117,7 @@ public class FSDirectory implements Clos UnresolvedLinkException, SnapshotAccessControlException { assert hasWriteLock(); - final INodesInPath iip = rootDir.getINodesInPath4Write(src, true); + final INodesInPath iip = getINodesInPath4Write(src, true); final INode inode = iip.getLastINode(); if (inode == null || !inode.isFile()) { return null; @@ -1164,27 +1158,13 @@ public class FSDirectory implements Clos FileNotFoundException, IOException { readLock(); try { - return INodeFile.valueOf(rootDir.getNode(path, false), path + return INodeFile.valueOf(getNode(path, false), path ).getPreferredBlockSize(); } finally { readUnlock(); } } - boolean exists(String src) throws UnresolvedLinkException { - src = normalizePath(src); - readLock(); - try { - INode inode = rootDir.getNode(src, false); - if (inode == null) { - return false; - } - return !inode.isFile() || inode.asFile().getBlocks() != null; - } finally { - readUnlock(); - } - } - void setPermission(String src, FsPermission permission) throws FileNotFoundException, UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException { @@ -1201,7 +1181,7 @@ public class FSDirectory implements Clos throws FileNotFoundException, UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException { assert hasWriteLock(); - final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(src, true); + final INodesInPath inodesInPath = getINodesInPath4Write(src, true); final INode inode = inodesInPath.getLastINode(); if (inode == null) { throw new FileNotFoundException("File does not exist: " + src); @@ -1226,7 +1206,7 @@ public class FSDirectory implements Clos throws FileNotFoundException, UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException { assert hasWriteLock(); - final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(src, true); + final INodesInPath inodesInPath = getINodesInPath4Write(src, true); INode inode = inodesInPath.getLastINode(); if (inode == null) { throw new FileNotFoundException("File does not exist: " + src); @@ -1273,7 +1253,7 @@ public class FSDirectory implements Clos } // do the move - final INodesInPath trgIIP = rootDir.getINodesInPath4Write(target, true); + final INodesInPath trgIIP = getINodesInPath4Write(target, true); final INode[] trgINodes = trgIIP.getINodes(); final INodeFile trgInode = trgIIP.getLastINode().asFile(); INodeDirectory trgParent = trgINodes[trgINodes.length-2].asDirectory(); @@ -1342,7 +1322,7 @@ public class FSDirectory implements Clos final long filesRemoved; writeLock(); try { - final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( + final INodesInPath inodesInPath = getINodesInPath4Write( normalizePath(src), false); if (!deleteAllowed(inodesInPath, src) ) { filesRemoved = -1; @@ -1392,7 +1372,7 @@ public class FSDirectory implements Clos boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException { readLock(); try { - final INodesInPath inodesInPath = rootDir.getLastINodeInPath(path, false); + final INodesInPath inodesInPath = getLastINodeInPath(path, false); final INode inode = inodesInPath.getINode(0); if (inode == null || !inode.isDirectory()) { //not found or not a directory @@ -1421,7 +1401,7 @@ public class FSDirectory implements Clos BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List removedINodes = new ChunkedArrayList(); - final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( + final INodesInPath inodesInPath = getINodesInPath4Write( normalizePath(src), false); long filesRemoved = -1; if (deleteAllowed(inodesInPath, src)) { @@ -1546,7 +1526,7 @@ public class FSDirectory implements Clos if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { return getSnapshotsListing(srcs, startAfter); } - final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true); + final INodesInPath inodesInPath = getLastINodeInPath(srcs, true); final int snapshot = inodesInPath.getPathSnapshotId(); final INode targetNode = inodesInPath.getINode(0); if (targetNode == null) @@ -1599,7 +1579,7 @@ public class FSDirectory implements Clos throws UnresolvedLinkException, IOException { Preconditions.checkState(hasReadLock()); Preconditions.checkArgument( - src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), + src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); final String dirPath = normalizePath(src.substring(0, @@ -1636,7 +1616,7 @@ public class FSDirectory implements Clos if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { return getFileInfo4DotSnapshot(srcs); } - final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink); + final INodesInPath inodesInPath = getLastINodeInPath(srcs, resolveLink); final INode i = inodesInPath.getINode(0); return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i, inodesInPath.getPathSnapshotId()); @@ -1663,7 +1643,7 @@ public class FSDirectory implements Clos private INode getINode4DotSnapshot(String src) throws UnresolvedLinkException { Preconditions.checkArgument( - src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), + src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); final String dirPath = normalizePath(src.substring(0, @@ -1678,21 +1658,6 @@ public class FSDirectory implements Clos return null; } - /** - * Get the blocks associated with the file. - */ - Block[] getFileBlocks(String src) throws UnresolvedLinkException { - waitForReady(); - readLock(); - try { - final INode i = rootDir.getNode(src, false); - return i != null && i.isFile()? i.asFile().getBlocks(): null; - } finally { - readUnlock(); - } - } - - INodesInPath getExistingPathINodes(byte[][] components) throws UnresolvedLinkException { return INodesInPath.resolve(rootDir, components); @@ -1712,12 +1677,12 @@ public class FSDirectory implements Clos throws UnresolvedLinkException { readLock(); try { - return rootDir.getLastINodeInPath(src, true); + return getLastINodeInPath(src, true); } finally { readUnlock(); } } - + /** * Get {@link INode} associated with the file / directory. */ @@ -1725,7 +1690,7 @@ public class FSDirectory implements Clos ) throws UnresolvedLinkException, SnapshotAccessControlException { readLock(); try { - return rootDir.getINodesInPath4Write(src, true); + return getINodesInPath4Write(src, true); } finally { readUnlock(); } @@ -1739,7 +1704,7 @@ public class FSDirectory implements Clos SnapshotAccessControlException { readLock(); try { - return rootDir.getINode4Write(src, true); + return getINode4Write(src, true); } finally { readUnlock(); } @@ -1754,12 +1719,8 @@ public class FSDirectory implements Clos String srcs = normalizePath(src); readLock(); try { - if (srcs.startsWith("/") && !srcs.endsWith("/") - && rootDir.getINode4Write(srcs, false) == null) { - return true; - } else { - return false; - } + return srcs.startsWith("/") && !srcs.endsWith("/") + && getINode4Write(srcs, false) == null; } finally { readUnlock(); } @@ -1772,7 +1733,7 @@ public class FSDirectory implements Clos src = normalizePath(src); readLock(); try { - INode node = rootDir.getNode(src, false); + INode node = getNode(src, false); return node != null && node.isDirectory(); } finally { readUnlock(); @@ -1788,7 +1749,7 @@ public class FSDirectory implements Clos src = normalizePath(src); readLock(); try { - INode node = rootDir.getINode4Write(src, false); + INode node = getINode4Write(src, false); return node != null && node.isDirectory(); } finally { readUnlock(); @@ -1809,7 +1770,7 @@ public class FSDirectory implements Clos UnresolvedLinkException, SnapshotAccessControlException { writeLock(); try { - final INodesInPath iip = rootDir.getINodesInPath4Write(path, false); + final INodesInPath iip = getINodesInPath4Write(path, false); if (iip.getLastINode() == null) { throw new FileNotFoundException("Path not found: " + path); } @@ -2012,7 +1973,7 @@ public class FSDirectory implements Clos // create directories beginning from the first null index for(; i < inodes.length; i++) { - pathbuilder.append(Path.SEPARATOR + names[i]); + pathbuilder.append(Path.SEPARATOR).append(names[i]); unprotectedMkdir(namesystem.allocateNewInodeId(), iip, i, components[i], (i < lastInodeIndex) ? parentPermissions : permissions, null, now); @@ -2141,7 +2102,7 @@ public class FSDirectory implements Clos return; } int i = 0; - for(; src[i] == dst[i]; i++); + while(src[i] == dst[i]) { i++; } // src[i - 1] is the last common ancestor. final Quota.Counts delta = src[src.length - 1].computeQuotaUsage(); @@ -2302,7 +2263,7 @@ public class FSDirectory implements Clos counts.get(Quota.NAMESPACE), counts.get(Quota.DISKSPACE), checkQuota); boolean isRename = (child.getParent() != null); final INodeDirectory parent = inodes[pos-1].asDirectory(); - boolean added = false; + boolean added; try { added = parent.addChild(child, true, iip.getLatestSnapshotId()); } catch (QuotaExceededException e) { @@ -2381,7 +2342,7 @@ public class FSDirectory implements Clos String srcs = normalizePath(src); readLock(); try { - INode targetNode = rootDir.getNode(srcs, false); + INode targetNode = getNode(srcs, false); if (targetNode == null) { throw new FileNotFoundException("File does not exist: " + srcs); } @@ -2478,7 +2439,7 @@ public class FSDirectory implements Clos } String srcs = normalizePath(src); - final INodesInPath iip = rootDir.getINodesInPath4Write(srcs, true); + final INodesInPath iip = getINodesInPath4Write(srcs, true); INodeDirectory dirNode = INodeDirectory.valueOf(iip.getLastINode(), srcs); if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) { throw new IllegalArgumentException("Cannot clear namespace quota on root."); @@ -2666,7 +2627,7 @@ public class FSDirectory implements Clos blocksize = fileNode.getPreferredBlockSize(); final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID; - final boolean isUc = inSnapshot ? false : fileNode.isUnderConstruction(); + final boolean isUc = !inSnapshot && fileNode.isUnderConstruction(); final long fileSize = !inSnapshot && isUc ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size; loc = getFSNamesystem().getBlockManager().createLocatedBlocks( @@ -2761,7 +2722,7 @@ public class FSDirectory implements Clos private List unprotectedModifyAclEntries(String src, List aclSpec) throws IOException { assert hasWriteLock(); - INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); INode inode = resolveLastINode(src, iip); int snapshotId = iip.getLatestSnapshotId(); List existingAcl = AclStorage.readINodeLogicalAcl(inode); @@ -2784,7 +2745,7 @@ public class FSDirectory implements Clos private List unprotectedRemoveAclEntries(String src, List aclSpec) throws IOException { assert hasWriteLock(); - INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); INode inode = resolveLastINode(src, iip); int snapshotId = iip.getLatestSnapshotId(); List existingAcl = AclStorage.readINodeLogicalAcl(inode); @@ -2807,7 +2768,7 @@ public class FSDirectory implements Clos private List unprotectedRemoveDefaultAcl(String src) throws IOException { assert hasWriteLock(); - INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); INode inode = resolveLastINode(src, iip); int snapshotId = iip.getLatestSnapshotId(); List existingAcl = AclStorage.readINodeLogicalAcl(inode); @@ -2829,7 +2790,7 @@ public class FSDirectory implements Clos private void unprotectedRemoveAcl(String src) throws IOException { assert hasWriteLock(); - INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); INode inode = resolveLastINode(src, iip); int snapshotId = iip.getLatestSnapshotId(); AclStorage.removeINodeAcl(inode, snapshotId); @@ -2854,7 +2815,7 @@ public class FSDirectory implements Clos } assert hasWriteLock(); - INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); INode inode = resolveLastINode(src, iip); int snapshotId = iip.getLatestSnapshotId(); List existingAcl = AclStorage.readINodeLogicalAcl(inode); @@ -2874,7 +2835,7 @@ public class FSDirectory implements Clos getINode4DotSnapshot(srcs) != null) { return new AclStatus.Builder().owner("").group("").build(); } - INodesInPath iip = rootDir.getLastINodeInPath(srcs, true); + INodesInPath iip = getLastINodeInPath(srcs, true); INode inode = resolveLastINode(src, iip); int snapshotId = iip.getPathSnapshotId(); List acl = AclStorage.readINodeAcl(inode, snapshotId); @@ -2906,7 +2867,7 @@ public class FSDirectory implements Clos XAttr unprotectedRemoveXAttr(String src, XAttr xAttr) throws IOException { assert hasWriteLock(); - INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); INode inode = resolveLastINode(src, iip); int snapshotId = iip.getLatestSnapshotId(); List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); @@ -2949,7 +2910,7 @@ public class FSDirectory implements Clos void unprotectedSetXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException { assert hasWriteLock(); - INodesInPath iip = rootDir.getINodesInPath4Write(normalizePath(src), true); + INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); INode inode = resolveLastINode(src, iip); int snapshotId = iip.getLatestSnapshotId(); List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); @@ -2988,7 +2949,7 @@ public class FSDirectory implements Clos String srcs = normalizePath(src); readLock(); try { - INodesInPath iip = rootDir.getLastINodeInPath(srcs, true); + INodesInPath iip = getLastINodeInPath(srcs, true); INode inode = resolveLastINode(src, iip); int snapshotId = iip.getPathSnapshotId(); return XAttrStorage.readINodeXAttrs(inode, snapshotId); @@ -3096,7 +3057,18 @@ public class FSDirectory implements Clos static byte[][] getPathComponentsForReservedPath(String src) { return !isReservedName(src) ? null : INode.getPathComponents(src); } - + + /** Check if a given inode name is reserved */ + public static boolean isReservedName(INode inode) { + return CHECK_RESERVED_FILE_NAMES + && Arrays.equals(inode.getLocalNameBytes(), DOT_RESERVED); + } + + /** Check if a given path is reserved */ + public static boolean isReservedName(String src) { + return src.startsWith(DOT_RESERVED_PATH_PREFIX); + } + /** * Resolve the path of /.reserved/.inodes//... to a regular path * @@ -3119,7 +3091,7 @@ public class FSDirectory implements Clos return src; } final String inodeId = DFSUtil.bytes2String(pathComponents[3]); - long id = 0; + final long id; try { id = Long.parseLong(inodeId); } catch (NumberFormatException e) { @@ -3156,15 +3128,53 @@ public class FSDirectory implements Clos } return path.toString(); } - - /** Check if a given inode name is reserved */ - public static boolean isReservedName(INode inode) { - return CHECK_RESERVED_FILE_NAMES - && Arrays.equals(inode.getLocalNameBytes(), DOT_RESERVED); + + /** @return the {@link INodesInPath} containing only the last inode. */ + private INodesInPath getLastINodeInPath(String path, boolean resolveLink + ) throws UnresolvedLinkException { + return INodesInPath.resolve(rootDir, INode.getPathComponents(path), 1, + resolveLink); } - - /** Check if a given path is reserved */ - public static boolean isReservedName(String src) { - return src.startsWith(DOT_RESERVED_PATH_PREFIX); + + /** @return the {@link INodesInPath} containing all inodes in the path. */ + INodesInPath getINodesInPath(String path, boolean resolveLink + ) throws UnresolvedLinkException { + final byte[][] components = INode.getPathComponents(path); + return INodesInPath.resolve(rootDir, components, components.length, + resolveLink); + } + + /** @return the last inode in the path. */ + INode getNode(String path, boolean resolveLink) + throws UnresolvedLinkException { + return getLastINodeInPath(path, resolveLink).getINode(0); + } + + /** + * @return the INode of the last component in src, or null if the last + * component does not exist. + * @throws UnresolvedLinkException if symlink can't be resolved + * @throws SnapshotAccessControlException if path is in RO snapshot + */ + private INode getINode4Write(String src, boolean resolveLink) + throws UnresolvedLinkException, SnapshotAccessControlException { + return getINodesInPath4Write(src, resolveLink).getLastINode(); + } + + /** + * @return the INodesInPath of the components in src + * @throws UnresolvedLinkException if symlink can't be resolved + * @throws SnapshotAccessControlException if path is in RO snapshot + */ + private INodesInPath getINodesInPath4Write(String src, boolean resolveLink) + throws UnresolvedLinkException, SnapshotAccessControlException { + final byte[][] components = INode.getPathComponents(src); + INodesInPath inodesInPath = INodesInPath.resolve(rootDir, components, + components.length, resolveLink); + if (inodesInPath.isSnapshot()) { + throw new SnapshotAccessControlException( + "Modification on a read-only snapshot is disallowed"); + } + return inodesInPath; } } Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon May 12 12:43:59 2014 @@ -1200,7 +1200,7 @@ public class FSEditLog implements LogsPu * Finalize the current log segment. * Transitions from IN_SEGMENT state to BETWEEN_LOG_SEGMENTS state. */ - synchronized void endCurrentLogSegment(boolean writeEndTxn) { + public synchronized void endCurrentLogSegment(boolean writeEndTxn) { LOG.info("Ending log segment " + curSegmentTxId); Preconditions.checkState(isSegmentOpen(), "Bad state: %s", state); Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Mon May 12 12:43:59 2014 @@ -542,7 +542,7 @@ public class FSImage implements Closeabl } @VisibleForTesting - void setEditLogForTesting(FSEditLog newLog) { + public void setEditLogForTesting(FSEditLog newLog) { editLog = newLog; } Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Mon May 12 12:43:59 2014 @@ -494,7 +494,7 @@ public class FSImageFormat { // Rename .snapshot paths if we're doing an upgrade parentPath = renameReservedPathsOnUpgrade(parentPath, getLayoutVersion()); final INodeDirectory parent = INodeDirectory.valueOf( - namesystem.dir.rootDir.getNode(parentPath, true), parentPath); + namesystem.dir.getNode(parentPath, true), parentPath); return loadChildren(parent, in, counter); } Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon May 12 12:43:59 2014 @@ -2670,9 +2670,9 @@ public class FSNamesystem implements Nam checkOperation(OperationCategory.READ); src = FSDirectory.resolvePath(src, pathComponents, dir); LocatedBlock[] onRetryBlock = new LocatedBlock[1]; - final INode[] inodes = analyzeFileState( - src, fileId, clientName, previous, onRetryBlock).getINodes(); - final INodeFile pendingFile = inodes[inodes.length - 1].asFile(); + final INodeFile pendingFile = analyzeFileState( + src, fileId, clientName, previous, onRetryBlock); + src = pendingFile.getFullPathName(); if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) { // This is a retry. Just return the last block if having locations. @@ -2706,10 +2706,8 @@ public class FSNamesystem implements Nam // Run the full analysis again, since things could have changed // while chooseTarget() was executing. LocatedBlock[] onRetryBlock = new LocatedBlock[1]; - INodesInPath inodesInPath = + final INodeFile pendingFile = analyzeFileState(src, fileId, clientName, previous, onRetryBlock); - INode[] inodes = inodesInPath.getINodes(); - final INodeFile pendingFile = inodes[inodes.length - 1].asFile(); if (onRetryBlock[0] != null) { if (onRetryBlock[0].getLocations().length > 0) { @@ -2731,6 +2729,7 @@ public class FSNamesystem implements Nam // allocate new block, record block locations in INode. newBlock = createNewBlock(); + INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile); saveAllocatedBlock(src, inodesInPath, newBlock, targets); dir.persistNewBlock(src, pendingFile); @@ -2744,7 +2743,7 @@ public class FSNamesystem implements Nam return makeLocatedBlock(newBlock, targets, offset); } - INodesInPath analyzeFileState(String src, + INodeFile analyzeFileState(String src, long fileId, String clientName, ExtendedBlock previous, @@ -2761,9 +2760,20 @@ public class FSNamesystem implements Nam checkFsObjectLimit(); Block previousBlock = ExtendedBlock.getLocalBlock(previous); - final INodesInPath iip = dir.getINodesInPath4Write(src); - final INodeFile pendingFile - = checkLease(src, fileId, clientName, iip.getLastINode()); + INode inode; + if (fileId == INodeId.GRANDFATHER_INODE_ID) { + // Older clients may not have given us an inode ID to work with. + // In this case, we have to try to resolve the path and hope it + // hasn't changed or been deleted since the file was opened for write. + final INodesInPath iip = dir.getINodesInPath4Write(src); + inode = iip.getLastINode(); + } else { + // Newer clients pass the inode ID, so we can just get the inode + // directly. + inode = dir.getInode(fileId); + if (inode != null) src = inode.getFullPathName(); + } + final INodeFile pendingFile = checkLease(src, clientName, inode, fileId); BlockInfo lastBlockInFile = pendingFile.getLastBlock(); if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) { // The block that the client claims is the current last block @@ -2821,7 +2831,7 @@ public class FSNamesystem implements Nam onRetryBlock[0] = makeLocatedBlock(lastBlockInFile, ((BlockInfoUnderConstruction)lastBlockInFile).getExpectedStorageLocations(), offset); - return iip; + return pendingFile; } else { // Case 3 throw new IOException("Cannot allocate block in " + src + ": " + @@ -2834,7 +2844,7 @@ public class FSNamesystem implements Nam if (!checkFileProgress(pendingFile, false)) { throw new NotReplicatedYetException("Not replicated yet: " + src); } - return iip; + return pendingFile; } LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs, @@ -2847,8 +2857,9 @@ public class FSNamesystem implements Nam } /** @see ClientProtocol#getAdditionalDatanode */ - LocatedBlock getAdditionalDatanode(String src, final ExtendedBlock blk, - final DatanodeInfo[] existings, final String[] storageIDs, + LocatedBlock getAdditionalDatanode(String src, long fileId, + final ExtendedBlock blk, final DatanodeInfo[] existings, + final String[] storageIDs, final Set excludes, final int numAdditionalNodes, final String clientName ) throws IOException { @@ -2868,7 +2879,17 @@ public class FSNamesystem implements Nam src = FSDirectory.resolvePath(src, pathComponents, dir); //check lease - final INodeFile file = checkLease(src, clientName); + final INode inode; + if (fileId == INodeId.GRANDFATHER_INODE_ID) { + // Older clients may not have given us an inode ID to work with. + // In this case, we have to try to resolve the path and hope it + // hasn't changed or been deleted since the file was opened for write. + inode = dir.getINode(src); + } else { + inode = dir.getInode(fileId); + if (inode != null) src = inode.getFullPathName(); + } + final INodeFile file = checkLease(src, clientName, inode, fileId); clientnode = file.getFileUnderConstructionFeature().getClientNode(); preferredblocksize = file.getPreferredBlockSize(); @@ -2892,7 +2913,7 @@ public class FSNamesystem implements Nam /** * The client would like to let go of the given block */ - boolean abandonBlock(ExtendedBlock b, String src, String holder) + boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder) throws LeaseExpiredException, FileNotFoundException, UnresolvedLinkException, IOException { if(NameNode.stateChangeLog.isDebugEnabled()) { @@ -2904,13 +2925,24 @@ public class FSNamesystem implements Nam writeLock(); try { checkOperation(OperationCategory.WRITE); - checkNameNodeSafeMode("Cannot abandon block " + b + " for fle" + src); + checkNameNodeSafeMode("Cannot abandon block " + b + " for file" + src); src = FSDirectory.resolvePath(src, pathComponents, dir); + final INode inode; + if (fileId == INodeId.GRANDFATHER_INODE_ID) { + // Older clients may not have given us an inode ID to work with. + // In this case, we have to try to resolve the path and hope it + // hasn't changed or been deleted since the file was opened for write. + inode = dir.getINode(src); + } else { + inode = dir.getInode(fileId); + if (inode != null) src = inode.getFullPathName(); + } + final INodeFile file = checkLease(src, holder, inode, fileId); + // // Remove the block from the pending creates list // - INodeFile file = checkLease(src, holder); boolean removed = dir.removeBlock(src, file, ExtendedBlock.getLocalBlock(b)); if (!removed) { @@ -2928,39 +2960,39 @@ public class FSNamesystem implements Nam return true; } - - /** make sure that we still have the lease on this file. */ - private INodeFile checkLease(String src, String holder) - throws LeaseExpiredException, UnresolvedLinkException, - FileNotFoundException { - return checkLease(src, INodeId.GRANDFATHER_INODE_ID, holder, - dir.getINode(src)); - } - - private INodeFile checkLease(String src, long fileId, String holder, - INode inode) throws LeaseExpiredException, FileNotFoundException { + + private INodeFile checkLease(String src, String holder, INode inode, + long fileId) + throws LeaseExpiredException, FileNotFoundException { assert hasReadLock(); - if (inode == null || !inode.isFile()) { + final String ident = src + " (inode " + fileId + ")"; + if (inode == null) { Lease lease = leaseManager.getLease(holder); throw new LeaseExpiredException( - "No lease on " + src + ": File does not exist. " + "No lease on " + ident + ": File does not exist. " + (lease != null ? lease.toString() : "Holder " + holder + " does not have any open files.")); } + if (!inode.isFile()) { + Lease lease = leaseManager.getLease(holder); + throw new LeaseExpiredException( + "No lease on " + ident + ": INode is not a regular file. " + + (lease != null ? lease.toString() + : "Holder " + holder + " does not have any open files.")); + } final INodeFile file = inode.asFile(); if (!file.isUnderConstruction()) { Lease lease = leaseManager.getLease(holder); throw new LeaseExpiredException( - "No lease on " + src + ": File is not open for writing. " + "No lease on " + ident + ": File is not open for writing. " + (lease != null ? lease.toString() : "Holder " + holder + " does not have any open files.")); } String clientName = file.getFileUnderConstructionFeature().getClientName(); if (holder != null && !clientName.equals(holder)) { - throw new LeaseExpiredException("Lease mismatch on " + src + " owned by " - + clientName + " but is accessed by " + holder); + throw new LeaseExpiredException("Lease mismatch on " + ident + + " owned by " + clientName + " but is accessed by " + holder); } - INodeId.checkId(fileId, file); return file; } @@ -3003,10 +3035,20 @@ public class FSNamesystem implements Nam String holder, Block last, long fileId) throws SafeModeException, UnresolvedLinkException, IOException { assert hasWriteLock(); - final INodesInPath iip = dir.getLastINodeInPath(src); final INodeFile pendingFile; try { - pendingFile = checkLease(src, fileId, holder, iip.getINode(0)); + final INode inode; + if (fileId == INodeId.GRANDFATHER_INODE_ID) { + // Older clients may not have given us an inode ID to work with. + // In this case, we have to try to resolve the path and hope it + // hasn't changed or been deleted since the file was opened for write. + final INodesInPath iip = dir.getLastINodeInPath(src); + inode = iip.getINode(0); + } else { + inode = dir.getInode(fileId); + if (inode != null) src = inode.getFullPathName(); + } + pendingFile = checkLease(src, holder, inode, fileId); } catch (LeaseExpiredException lee) { final INode inode = dir.getINode(src); if (inode != null @@ -3021,9 +3063,9 @@ public class FSNamesystem implements Nam final Block realLastBlock = inode.asFile().getLastBlock(); if (Block.matchingIdAndGenStamp(last, realLastBlock)) { NameNode.stateChangeLog.info("DIR* completeFile: " + - "request from " + holder + " to complete " + src + - " which is already closed. But, it appears to be an RPC " + - "retry. Returning success"); + "request from " + holder + " to complete inode " + fileId + + "(" + src + ") which is already closed. But, it appears to be " + + "an RPC retry. Returning success"); return true; } } @@ -3043,7 +3085,7 @@ public class FSNamesystem implements Nam } finalizeINodeFileUnderConstruction(src, pendingFile, - iip.getLatestSnapshotId()); + Snapshot.CURRENT_STATE_ID); return true; } @@ -3684,12 +3726,14 @@ public class FSNamesystem implements Nam /** Persist all metadata about this file. * @param src The string representation of the path + * @param fileId The inode ID that we're fsyncing. Older clients will pass + * INodeId.GRANDFATHER_INODE_ID here. * @param clientName The string representation of the client * @param lastBlockLength The length of the last block * under construction reported from client. * @throws IOException if path does not exist */ - void fsync(String src, String clientName, long lastBlockLength) + void fsync(String src, long fileId, String clientName, long lastBlockLength) throws IOException, UnresolvedLinkException { NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName); checkOperation(OperationCategory.WRITE); @@ -3699,7 +3743,17 @@ public class FSNamesystem implements Nam checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot fsync file " + src); src = FSDirectory.resolvePath(src, pathComponents, dir); - INodeFile pendingFile = checkLease(src, clientName); + final INode inode; + if (fileId == INodeId.GRANDFATHER_INODE_ID) { + // Older clients may not have given us an inode ID to work with. + // In this case, we have to try to resolve the path and hope it + // hasn't changed or been deleted since the file was opened for write. + inode = dir.getINode(src); + } else { + inode = dir.getInode(fileId); + if (inode != null) src = inode.getFullPathName(); + } + final INodeFile pendingFile = checkLease(src, clientName, inode, fileId); if (lastBlockLength > 0) { pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock( pendingFile, lastBlockLength); @@ -5509,7 +5563,7 @@ public class FSNamesystem implements Nam dir.waitForReady(); readLock(); try { - pc.checkPermission(path, dir.rootDir, doCheckOwner, ancestorAccess, + pc.checkPermission(path, dir, doCheckOwner, ancestorAccess, parentAccess, access, subAccess, resolveLink); } finally { readUnlock(); @@ -7917,6 +7971,8 @@ public class FSNamesystem implements Nam } sb.append(trackingId); } + sb.append("\t").append("proto="); + sb.append(NamenodeWebHdfsMethods.isWebHdfsInvocation() ? "webhdfs" : "rpc"); logAuditMessage(sb.toString()); } } Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java Mon May 12 12:43:59 2014 @@ -144,7 +144,7 @@ class FSPermissionChecker { * Guarded by {@link FSNamesystem#readLock()} * Caller of this method must hold that lock. */ - void checkPermission(String path, INodeDirectory root, boolean doCheckOwner, + void checkPermission(String path, FSDirectory dir, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess, boolean resolveLink) throws AccessControlException, UnresolvedLinkException { @@ -159,7 +159,7 @@ class FSPermissionChecker { } // check if (parentAccess != null) && file exists, then check sb // If resolveLink, the check is performed on the link target. - final INodesInPath inodesInPath = root.getINodesInPath(path, resolveLink); + final INodesInPath inodesInPath = dir.getINodesInPath(path, resolveLink); final int snapshotId = inodesInPath.getPathSnapshotId(); final INode[] inodes = inodesInPath.getINodes(); int ancestorIndex = inodes.length - 2; Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Mon May 12 12:43:59 2014 @@ -403,53 +403,6 @@ public class INodeDirectory extends INod : ReadOnlyList.Util.asReadOnlyList(children); } - /** @return the {@link INodesInPath} containing only the last inode. */ - INodesInPath getLastINodeInPath(String path, boolean resolveLink - ) throws UnresolvedLinkException { - return INodesInPath.resolve(this, getPathComponents(path), 1, resolveLink); - } - - /** @return the {@link INodesInPath} containing all inodes in the path. */ - INodesInPath getINodesInPath(String path, boolean resolveLink - ) throws UnresolvedLinkException { - final byte[][] components = getPathComponents(path); - return INodesInPath.resolve(this, components, components.length, resolveLink); - } - - /** @return the last inode in the path. */ - INode getNode(String path, boolean resolveLink) - throws UnresolvedLinkException { - return getLastINodeInPath(path, resolveLink).getINode(0); - } - - /** - * @return the INode of the last component in src, or null if the last - * component does not exist. - * @throws UnresolvedLinkException if symlink can't be resolved - * @throws SnapshotAccessControlException if path is in RO snapshot - */ - INode getINode4Write(String src, boolean resolveLink) - throws UnresolvedLinkException, SnapshotAccessControlException { - return getINodesInPath4Write(src, resolveLink).getLastINode(); - } - - /** - * @return the INodesInPath of the components in src - * @throws UnresolvedLinkException if symlink can't be resolved - * @throws SnapshotAccessControlException if path is in RO snapshot - */ - INodesInPath getINodesInPath4Write(String src, boolean resolveLink) - throws UnresolvedLinkException, SnapshotAccessControlException { - final byte[][] components = INode.getPathComponents(src); - INodesInPath inodesInPath = INodesInPath.resolve(this, components, - components.length, resolveLink); - if (inodesInPath.isSnapshot()) { - throw new SnapshotAccessControlException( - "Modification on a read-only snapshot is disallowed"); - } - return inodesInPath; - } - /** * Given a child's name, return the index of the next child * Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java?rev=1593948&r1=1593947&r2=1593948&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java (original) +++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java Mon May 12 12:43:59 2014 @@ -46,6 +46,28 @@ public class INodesInPath { : Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent); } + static INodesInPath fromINode(INode inode) { + int depth = 0, index; + INode tmp = inode; + while (tmp != null) { + depth++; + tmp = tmp.getParent(); + } + final byte[][] path = new byte[depth][]; + final INode[] inodes = new INode[depth]; + final INodesInPath iip = new INodesInPath(path, depth); + tmp = inode; + index = depth; + while (tmp != null) { + index--; + path[index] = tmp.getKey(); + inodes[index] = tmp; + tmp = tmp.getParent(); + } + iip.setINodes(inodes); + return iip; + } + /** * Given some components, create a path name. * @param components The path components @@ -341,6 +363,11 @@ public class INodesInPath { private void addNode(INode node) { inodes[numNonNull++] = node; } + + private void setINodes(INode inodes[]) { + this.inodes = inodes; + this.numNonNull = this.inodes.length; + } void setINode(int i, INode inode) { inodes[i >= 0? i: inodes.length + i] = inode; @@ -417,4 +444,4 @@ public class INodesInPath { + ", this=" + toString(false)); } } -} \ No newline at end of file +}