This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
commit d207aba0265e786904ee2ac4e612c5537401c90d
Author: Wei-Chiu Chuang <weichiu@apache.org>
AuthorDate: Mon Sep 2 09:43:44 2019 -0700
Revert "HDFS-14706. Checksums are not checked if block meta file is less than 7 bytes.
Contributed by Stephen O'Donnell."
This reverts commit 7bebad61d9c3dbff81fdcf243585fd3e9ae59dde.
---
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 72 +---------------------
.../hadoop/hdfs/client/HdfsClientConfigKeys.java | 3 -
.../apache/hadoop/hdfs/TestDFSOutputStream.java | 37 -----------
3 files changed, 2 insertions(+), 110 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index b7f2ff9..4a0d75e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -23,7 +23,6 @@ import java.io.InterruptedIOException;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.util.EnumSet;
-import java.util.Random;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -127,8 +126,6 @@ public class DFSOutputStream extends FSOutputSummer
protected final AtomicReference<CachingStrategy> cachingStrategy;
private FileEncryptionInfo fileEncryptionInfo;
private int writePacketSize;
- private boolean leaseRecovered = false;
- private boolean exceptionInClose = false; //for unit test
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
protected DFSPacket createPacket(int packetSize, int chunksPerPkt,
@@ -839,39 +836,6 @@ public class DFSOutputStream extends FSOutputSummer
}
}
- @VisibleForTesting
- public void setExceptionInClose(boolean enable) {
- exceptionInClose = enable;
- }
-
- private class EmulateExceptionInClose {
- private Random rand = null;
- private int kickedNum;
-
- EmulateExceptionInClose(int callNum) {
- if (exceptionInClose) {
- rand = new Random();
- }
- kickedNum = callNum;
- }
-
- void kickRandomException() throws IOException {
- if (exceptionInClose) {
- if (kickedNum > 0) {
- if (rand.nextInt(kickedNum) == 1) {
- throw new IOException("Emulated random IOException in close");
- }
- }
- }
- }
-
- void kickException() throws IOException {
- if (exceptionInClose) {
- throw new IOException("Emulated IOException in close");
- }
- }
- }
-
/**
* Closes this output stream and releases any system
* resources associated with this stream.
@@ -894,20 +858,7 @@ public class DFSOutputStream extends FSOutputSummer
}
protected synchronized void closeImpl() throws IOException {
- boolean recoverOnCloseException = dfsClient.getConfiguration().getBoolean(
- HdfsClientConfigKeys.Write.RECOVER_ON_CLOSE_EXCEPTION_KEY,
- HdfsClientConfigKeys.Write.RECOVER_ON_CLOSE_EXCEPTION_DEFAULT);
if (isClosed()) {
- if (recoverOnCloseException && !leaseRecovered) {
- try {
- dfsClient.endFileLease(fileId);
- dfsClient.recoverLease(src);
- leaseRecovered = true;
- } catch (Exception e) {
- LOG.warn("Fail to recover lease for {}", src, e);
- }
- }
-
LOG.debug("Closing an already closed stream. [Stream:{}, streamer:{}]",
closed, getStreamer().streamerClosed());
try {
@@ -924,11 +875,8 @@ public class DFSOutputStream extends FSOutputSummer
return;
}
- EmulateExceptionInClose eei = new EmulateExceptionInClose(5);
try {
- flushBuffer(); // flush from all upper layers
- // for test
- eei.kickRandomException();
+ flushBuffer(); // flush from all upper layers
if (currentPacket != null) {
enqueueCurrentPacket();
@@ -939,28 +887,12 @@ public class DFSOutputStream extends FSOutputSummer
}
try {
- flushInternal(); // flush all data to Datanodes
+ flushInternal(); // flush all data to Datanodes
} catch (IOException ioe) {
cleanupAndRethrowIOException(ioe);
}
- // for test
- eei.kickRandomException();
completeFile();
- // for test
- eei.kickException();
} catch (ClosedChannelException ignored) {
- } catch (IOException ioe) {
- if (recoverOnCloseException) {
- try {
- dfsClient.endFileLease(fileId);
- dfsClient.recoverLease(src);
- leaseRecovered = true;
- } catch (Exception e) {
- // Ignore exception rendered by recoverLease. Throw original
- // exception
- }
- }
- throw ioe;
} finally {
// Failures may happen when flushing data.
// Streamers may keep waiting for the new block information.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 2e2e4a6..97a8472 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -307,9 +307,6 @@ public interface HdfsClientConfigKeys {
String EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY =
PREFIX + "exclude.nodes.cache.expiry.interval.millis";
long EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT = 10*MINUTE;
- String RECOVER_ON_CLOSE_EXCEPTION_KEY =
- PREFIX + "recover.on.close.exception";
- boolean RECOVER_ON_CLOSE_EXCEPTION_DEFAULT = false;
interface ByteArrayManager {
String PREFIX = Write.PREFIX + "byte-array-manager.";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
index 57510a6..1891956 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
@@ -31,7 +31,6 @@ import java.util.LinkedList;
import java.util.Map;
import java.util.Random;
-import org.apache.commons.lang3.reflect.FieldUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -41,7 +40,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StreamCapabilities.StreamCapability;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DataStreamer.LastExceptionInStreamer;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -373,41 +371,6 @@ public class TestDFSOutputStream {
os.close();
}
- /**
- * If dfs.client.recover-on-close-exception.enable is set and exception
- * happens in close, the local lease should be closed and lease in namenode
- * should be recovered.
- */
- @Test
- public void testExceptionInClose() throws Exception {
- String testStr = "Test exception in close";
- DistributedFileSystem fs = cluster.getFileSystem();
- Path testFile = new Path("/closeexception");
- fs.getConf().setBoolean(
- HdfsClientConfigKeys.Write.RECOVER_ON_CLOSE_EXCEPTION_KEY, true);
- FSDataOutputStream os = fs.create(testFile);
- DFSOutputStream dos =
- (DFSOutputStream) FieldUtils.readField(os, "wrappedStream", true);
- dos.setExceptionInClose(true);
- os.write(testStr.getBytes());
- try {
- dos.close();
- // There should be exception
- Assert.assertTrue(false);
- } catch (IOException ioe) {
- GenericTestUtils.waitFor(() -> {
- boolean closed;
- try {
- closed = fs.isFileClosed(testFile);
- } catch (IOException e) {
- return false;
- }
- return closed;
- }, 1000, 5000);
- Assert.assertTrue(fs.isFileClosed(testFile));
- }
- }
-
@AfterClass
public static void tearDown() {
if (cluster != null) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
|