Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 4D94410269 for ; Mon, 2 Mar 2015 07:07:03 +0000 (UTC) Received: (qmail 66580 invoked by uid 500); 2 Mar 2015 07:06:47 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 66519 invoked by uid 500); 2 Mar 2015 07:06:47 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 66509 invoked by uid 99); 2 Mar 2015 07:06:47 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 02 Mar 2015 07:06:47 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 14945E034B; Mon, 2 Mar 2015 07:06:47 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: szetszwo@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-7439. Add BlockOpResponseProto's message to the exception messages. Contributed by Takanobu Asanuma Date: Mon, 2 Mar 2015 07:06:47 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/branch-2 c40293c82 -> a5f3156b3 HDFS-7439. Add BlockOpResponseProto's message to the exception messages. Contributed by Takanobu Asanuma Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5f3156b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5f3156b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5f3156b Branch: refs/heads/branch-2 Commit: a5f3156b3003139b1759a0cbd07a9a6750eb2fd7 Parents: c40293c Author: Tsz-Wo Nicholas Sze Authored: Mon Mar 2 15:03:58 2015 +0800 Committer: Tsz-Wo Nicholas Sze Committed: Mon Mar 2 15:05:06 2015 +0800 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/hdfs/DFSClient.java | 26 ++++++-------------- .../org/apache/hadoop/hdfs/DFSOutputStream.java | 15 ++++------- .../apache/hadoop/hdfs/RemoteBlockReader2.java | 24 ++++++------------ .../datatransfer/DataTransferProtoUtil.java | 26 ++++++++++++++++++++ .../hadoop/hdfs/server/balancer/Dispatcher.java | 9 +++---- .../hdfs/server/datanode/DataXceiver.java | 14 +++-------- 7 files changed, 55 insertions(+), 62 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5f3156b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 12f9899..028521e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -393,6 +393,9 @@ Release 2.7.0 - UNRELEASED HDFS-5853. Add "hadoop.user.group.metrics.percentiles.intervals" to hdfs-default.xml. (aajisaka) + HDFS-7439. Add BlockOpResponseProto's message to the exception messages. + (Takanobu Asanuma via szetszwo) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5f3156b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index cd747fc..6f96126 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -175,6 +175,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; @@ -2259,15 +2260,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); - if (reply.getStatus() != Status.SUCCESS) { - if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException(); - } else { - throw new IOException("Bad response " + reply + " for block " - + block + " from datanode " + datanodes[j]); - } - } - + String logInfo = "for block " + block + " from datanode " + datanodes[j]; + DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); + OpBlockChecksumResponseProto checksumData = reply.getChecksumResponse(); @@ -2424,16 +2419,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, 0, 1, true, CachingStrategy.newDefaultStrategy()); final BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); - - if (reply.getStatus() != Status.SUCCESS) { - if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException(); - } else { - throw new IOException("Bad response " + reply + " trying to read " - + lb.getBlock() + " from datanode " + dn); - } - } - + String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn; + DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); + return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType()); } finally { IOUtils.cleanup(null, pair.in, pair.out); http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5f3156b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 14d39c6..60a9b37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; @@ -1469,16 +1470,10 @@ public class DFSOutputStream extends FSOutputSummer checkRestart = true; throw new IOException("A datanode is restarting."); } - if (pipelineStatus != SUCCESS) { - if (pipelineStatus == Status.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException( - "Got access token error for connect ack with firstBadLink as " - + firstBadLink); - } else { - throw new IOException("Bad connect ack with firstBadLink as " - + firstBadLink); - } - } + + String logInfo = "ack with firstBadLink as " + firstBadLink; + DataTransferProtoUtil.checkBlockOpStatus(resp, logInfo); + assert null == blockStream : "Previous blockStream unclosed"; blockStream = out; result = true; // success http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5f3156b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index 3f133b6..9245a84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumIn import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; import org.apache.hadoop.net.NetUtils; @@ -448,22 +447,13 @@ public class RemoteBlockReader2 implements BlockReader { BlockOpResponseProto status, Peer peer, ExtendedBlock block, String file) throws IOException { - if (status.getStatus() != Status.SUCCESS) { - if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException( - "Got access token error for OP_READ_BLOCK, self=" - + peer.getLocalAddressString() + ", remote=" - + peer.getRemoteAddressString() + ", for file " + file - + ", for pool " + block.getBlockPoolId() + " block " - + block.getBlockId() + "_" + block.getGenerationStamp()); - } else { - throw new IOException("Got error for OP_READ_BLOCK, self=" - + peer.getLocalAddressString() + ", remote=" - + peer.getRemoteAddressString() + ", for file " + file - + ", for pool " + block.getBlockPoolId() + " block " - + block.getBlockId() + "_" + block.getGenerationStamp()); - } - } + String logInfo = "for OP_READ_BLOCK" + + ", self=" + peer.getLocalAddressString() + + ", remote=" + peer.getRemoteAddressString() + + ", for file " + file + + ", for pool " + block.getBlockPoolId() + + " block " + block.getBlockId() + "_" + block.getGenerationStamp(); + DataTransferProtoUtil.checkBlockOpStatus(status, logInfo); } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5f3156b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java index 2ef3c3f..284281a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java @@ -17,11 +17,16 @@ */ package org.apache.hadoop.hdfs.protocol.datatransfer; +import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.net.Peer; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto; +import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; @@ -29,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTrac import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; import org.apache.htrace.Span; @@ -119,4 +125,24 @@ public abstract class DataTransferProtoUtil { } return scope; } + + public static void checkBlockOpStatus( + BlockOpResponseProto response, + String logInfo) throws IOException { + if (response.getStatus() != Status.SUCCESS) { + if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) { + throw new InvalidBlockTokenException( + "Got access token error" + + ", status message " + response.getMessage() + + ", " + logInfo + ); + } else { + throw new IOException( + "Got error" + + ", status message " + response.getMessage() + + ", " + logInfo + ); + } + } + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5f3156b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index f6cc311..47368fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; @@ -359,12 +360,8 @@ public class Dispatcher { // read intermediate responses response = BlockOpResponseProto.parseFrom(vintPrefixed(in)); } - if (response.getStatus() != Status.SUCCESS) { - if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) { - throw new IOException("block move failed due to access token error"); - } - throw new IOException("block move is failed: " + response.getMessage()); - } + String logInfo = "block move is failed"; + DataTransferProtoUtil.checkBlockOpStatus(response, logInfo); } /** reset the object */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5f3156b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 6a2250f..e9547a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -1116,16 +1116,10 @@ class DataXceiver extends Receiver implements Runnable { BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom( PBHelper.vintPrefixed(proxyReply)); - if (copyResponse.getStatus() != SUCCESS) { - if (copyResponse.getStatus() == ERROR_ACCESS_TOKEN) { - throw new IOException("Copy block " + block + " from " - + proxySock.getRemoteSocketAddress() - + " failed due to access token error"); - } - throw new IOException("Copy block " + block + " from " - + proxySock.getRemoteSocketAddress() + " failed"); - } - + String logInfo = "copy block " + block + " from " + + proxySock.getRemoteSocketAddress(); + DataTransferProtoUtil.checkBlockOpStatus(copyResponse, logInfo); + // get checksum info about the block we're copying ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo(); DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(