hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From msi...@apache.org
Subject hadoop git commit: HDDS-721. NullPointerException thrown while trying to read a file when datanode restarted. Contributed by Shashikant Banerjee.
Date Tue, 30 Oct 2018 14:16:16 GMT
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 da32f6537 -> 5e48a7047


HDDS-721. NullPointerException thrown while trying to read a file when datanode restarted.
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e48a704
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e48a704
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e48a704

Branch: refs/heads/ozone-0.3
Commit: 5e48a70474fe9e427a15e1ed55375b6a5a64b553
Parents: da32f65
Author: Mukul Kumar Singh <msingh@apache.org>
Authored: Tue Oct 30 19:45:32 2018 +0530
Committer: Mukul Kumar Singh <msingh@apache.org>
Committed: Tue Oct 30 19:45:32 2018 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java   | 4 ++--
 .../apache/hadoop/ozone/client/io/ChunkGroupInputStream.java | 4 ++++
 .../hadoop/ozone/client/io/ChunkGroupOutputStream.java       | 8 +++++++-
 3 files changed, 13 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e48a704/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 9526be3..c390f33 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -84,7 +84,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   public void connect() throws Exception {
 
     // leader by default is the 1st datanode in the datanode list of pipleline
-    DatanodeDetails leader = this.pipeline.getLeader();
+    DatanodeDetails leader = this.pipeline.getMachines().get(0);
     // just make a connection to the 1st datanode at the beginning
     connectToDatanode(leader);
   }
@@ -201,7 +201,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   public CompletableFuture<ContainerCommandResponseProto> sendCommandAsync(
       ContainerCommandRequestProto request)
       throws IOException, ExecutionException, InterruptedException {
-    return sendCommandAsync(request, pipeline.getLeader());
+    return sendCommandAsync(request, pipeline.getMachines().get(0));
   }
 
   private CompletableFuture<ContainerCommandResponseProto> sendCommandAsync(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e48a704/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index 3772c59..201d925 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -279,6 +279,10 @@ public class ChunkGroupInputStream extends InputStream implements Seekable
{
       ContainerWithPipeline containerWithPipeline =
           storageContainerLocationClient.getContainerWithPipeline(containerID);
       Pipeline pipeline = containerWithPipeline.getPipeline();
+      if (pipeline.getMachines().isEmpty()) {
+        throw new IOException(
+            "No datanodes found in the pipeline " + pipeline.getId());
+      }
 
       // irrespective of the container state, we will always read via Standalone
       // protocol.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e48a704/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 5966718..d9a9910 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -191,9 +192,14 @@ public class ChunkGroupOutputStream extends OutputStream {
     ContainerWithPipeline containerWithPipeline = scmClient
         .getContainerWithPipeline(subKeyInfo.getContainerID());
     ContainerInfo container = containerWithPipeline.getContainerInfo();
+    Pipeline pipeline = containerWithPipeline.getPipeline();
+    if (pipeline.getMachines().isEmpty()) {
+      throw new IOException(
+          "No datanodes found in the pipeline " + pipeline.getId());
+    }
 
     XceiverClientSpi xceiverClient =
-        xceiverClientManager.acquireClient(containerWithPipeline.getPipeline());
+        xceiverClientManager.acquireClient(pipeline);
     // create container if needed
     if (subKeyInfo.getShouldCreateContainer()) {
       try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message