Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 1249F1742F for ; Wed, 9 Sep 2015 01:37:32 +0000 (UTC) Received: (qmail 16793 invoked by uid 500); 9 Sep 2015 01:37:31 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 16730 invoked by uid 500); 9 Sep 2015 01:37:31 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 16721 invoked by uid 99); 9 Sep 2015 01:37:31 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 09 Sep 2015 01:37:31 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 9EFABE027D; Wed, 9 Sep 2015 01:37:31 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: cmccabe@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-8860. Remove unused Replica copyOnWrite code (Lei (Eddy) Xu via Colin P. McCabe) Date: Wed, 9 Sep 2015 01:37:31 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/trunk d9c1fab2e -> a153b9601 HDFS-8860. Remove unused Replica copyOnWrite code (Lei (Eddy) Xu via Colin P. McCabe) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a153b960 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a153b960 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a153b960 Branch: refs/heads/trunk Commit: a153b9601ad8628fdd608d8696310ca8c1f58ff0 Parents: d9c1fab Author: Colin Patrick Mccabe Authored: Tue Sep 8 18:12:47 2015 -0700 Committer: Colin Patrick Mccabe Committed: Tue Sep 8 18:37:22 2015 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hdfs/server/datanode/FinalizedReplica.java | 15 +--- .../hdfs/server/datanode/ReplicaInfo.java | 82 -------------------- .../server/datanode/ReplicaUnderRecovery.java | 10 --- .../datanode/ReplicaWaitingToBeRecovered.java | 15 +--- .../datanode/fsdataset/impl/FsDatasetImpl.java | 3 - .../org/apache/hadoop/hdfs/TestFileAppend.java | 72 ----------------- .../hdfs/server/datanode/DataNodeTestUtils.java | 5 -- .../fsdataset/impl/FsDatasetTestUtil.java | 6 -- .../fsdataset/impl/TestDatanodeRestart.java | 72 ----------------- 10 files changed, 4 insertions(+), 278 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8b50065..cfedb0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -903,6 +903,8 @@ Release 2.8.0 - UNRELEASED HDFS-9019. Adding informative message to sticky bit permission denied exception. (xyao) + HDFS-8860. Remove unused Replica copyOnWrite code (Lei (Eddy) Xu via Colin P. McCabe) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java index cc32874..8daeb51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; * This class describes a replica that has been finalized. */ public class FinalizedReplica extends ReplicaInfo { - private boolean unlinked; // copy-on-write done for block /** * Constructor @@ -58,7 +57,6 @@ public class FinalizedReplica extends ReplicaInfo { */ public FinalizedReplica(FinalizedReplica from) { super(from); - this.unlinked = from.isUnlinked(); } @Override // ReplicaInfo @@ -66,16 +64,6 @@ public class FinalizedReplica extends ReplicaInfo { return ReplicaState.FINALIZED; } - @Override // ReplicaInfo - public boolean isUnlinked() { - return unlinked; - } - - @Override // ReplicaInfo - public void setUnlinked() { - unlinked = true; - } - @Override public long getVisibleLength() { return getNumBytes(); // all bytes are visible @@ -98,7 +86,6 @@ public class FinalizedReplica extends ReplicaInfo { @Override public String toString() { - return super.toString() - + "\n unlinked =" + unlinked; + return super.toString(); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java index 136d8a9..31b14fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java @@ -197,22 +197,6 @@ abstract public class ReplicaInfo extends Block implements Replica { } /** - * check if this replica has already been unlinked. - * @return true if the replica has already been unlinked - * or no need to be detached; false otherwise - */ - public boolean isUnlinked() { - return true; // no need to be unlinked - } - - /** - * set that this replica is unlinked - */ - public void setUnlinked() { - // no need to be unlinked - } - - /** * Number of bytes reserved for this replica on disk. */ public long getBytesReserved() { @@ -229,72 +213,6 @@ abstract public class ReplicaInfo extends Block implements Replica { return 0; } - /** - * Copy specified file into a temporary file. Then rename the - * temporary file to the original name. This will cause any - * hardlinks to the original file to be removed. The temporary - * files are created in the same directory. The temporary files will - * be recovered (especially on Windows) on datanode restart. - */ - private void unlinkFile(File file, Block b) throws IOException { - File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file)); - try { - FileInputStream in = new FileInputStream(file); - try { - FileOutputStream out = new FileOutputStream(tmpFile); - try { - IOUtils.copyBytes(in, out, 16*1024); - } finally { - out.close(); - } - } finally { - in.close(); - } - if (file.length() != tmpFile.length()) { - throw new IOException("Copy of file " + file + " size " + file.length()+ - " into file " + tmpFile + - " resulted in a size of " + tmpFile.length()); - } - FileUtil.replaceFile(tmpFile, file); - } catch (IOException e) { - boolean done = tmpFile.delete(); - if (!done) { - DataNode.LOG.info("detachFile failed to delete temporary file " + - tmpFile); - } - throw e; - } - } - - /** - * Remove a hard link by copying the block to a temporary place and - * then moving it back - * @param numLinks number of hard links - * @return true if copy is successful; - * false if it is already detached or no need to be detached - * @throws IOException if there is any copy error - */ - public boolean unlinkBlock(int numLinks) throws IOException { - if (isUnlinked()) { - return false; - } - File file = getBlockFile(); - if (file == null || getVolume() == null) { - throw new IOException("detachBlock:Block not found. " + this); - } - File meta = getMetaFile(); - - if (HardLink.getLinkCount(file) > numLinks) { - DataNode.LOG.info("CopyOnWrite for block " + this); - unlinkFile(file, this); - } - if (HardLink.getLinkCount(meta) > numLinks) { - unlinkFile(meta, this); - } - setUnlinked(); - return true; - } - @Override //Object public String toString() { return getClass().getSimpleName() http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java index 2cd8a01..558ee21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java @@ -85,16 +85,6 @@ public class ReplicaUnderRecovery extends ReplicaInfo { public ReplicaInfo getOriginalReplica() { return original; } - - @Override //ReplicaInfo - public boolean isUnlinked() { - return original.isUnlinked(); - } - - @Override //ReplicaInfo - public void setUnlinked() { - original.setUnlinked(); - } @Override //ReplicaInfo public ReplicaState getState() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java index 26ab3db..220649d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; * lease recovery. */ public class ReplicaWaitingToBeRecovered extends ReplicaInfo { - private boolean unlinked; // copy-on-write done for block /** * Constructor @@ -64,7 +63,6 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo { */ public ReplicaWaitingToBeRecovered(ReplicaWaitingToBeRecovered from) { super(from); - this.unlinked = from.isUnlinked(); } @Override //ReplicaInfo @@ -73,16 +71,6 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo { } @Override //ReplicaInfo - public boolean isUnlinked() { - return unlinked; - } - - @Override //ReplicaInfo - public void setUnlinked() { - unlinked = true; - } - - @Override //ReplicaInfo public long getVisibleLength() { return -1; //no bytes are visible } @@ -104,7 +92,6 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo { @Override public String toString() { - return super.toString() - + "\n unlinked=" + unlinked; + return super.toString(); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index e981ccb..8722d35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -1109,8 +1109,6 @@ class FsDatasetImpl implements FsDatasetSpi { throws IOException { // If the block is cached, start uncaching it. cacheManager.uncacheBlock(bpid, replicaInfo.getBlockId()); - // unlink the finalized replica - replicaInfo.unlinkBlock(1); // construct a RBW replica with the new GS File blkfile = replicaInfo.getBlockFile(); @@ -2480,7 +2478,6 @@ class FsDatasetImpl implements FsDatasetSpi { + ", rur=" + rur); } if (rur.getNumBytes() > newlength) { - rur.unlinkBlock(1); truncateBlock(blockFile, metaFile, rur.getNumBytes(), newlength); if(!copyOnTruncate) { // update RUR with the new length http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 85d92c9..7b7f415 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -110,78 +110,6 @@ public class TestFileAppend{ } /** - * Test that copy on write for blocks works correctly - * @throws IOException an exception might be thrown - */ - @Test - public void testCopyOnWrite() throws IOException { - Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); - FileSystem fs = cluster.getFileSystem(); - InetSocketAddress addr = new InetSocketAddress("localhost", - cluster.getNameNodePort()); - DFSClient client = new DFSClient(addr, conf); - try { - - // create a new file, write to it and close it. - // - Path file1 = new Path("/filestatus.dat"); - FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1); - writeFile(stm); - stm.close(); - - // Get a handle to the datanode - DataNode[] dn = cluster.listDataNodes(); - assertTrue("There should be only one datanode but found " + dn.length, - dn.length == 1); - - LocatedBlocks locations = client.getNamenode().getBlockLocations( - file1.toString(), 0, Long.MAX_VALUE); - List blocks = locations.getLocatedBlocks(); - - // - // Create hard links for a few of the blocks - // - for (int i = 0; i < blocks.size(); i = i + 2) { - ExtendedBlock b = blocks.get(i).getBlock(); - final File f = DataNodeTestUtils.getFile(dn[0], - b.getBlockPoolId(), b.getLocalBlock().getBlockId()); - File link = new File(f.toString() + ".link"); - System.out.println("Creating hardlink for File " + f + " to " + link); - HardLink.createHardLink(f, link); - } - - // - // Detach all blocks. This should remove hardlinks (if any) - // - for (int i = 0; i < blocks.size(); i++) { - ExtendedBlock b = blocks.get(i).getBlock(); - System.out.println("testCopyOnWrite detaching block " + b); - assertTrue("Detaching block " + b + " should have returned true", - DataNodeTestUtils.unlinkBlock(dn[0], b, 1)); - } - - // Since the blocks were already detached earlier, these calls should - // return false - // - for (int i = 0; i < blocks.size(); i++) { - ExtendedBlock b = blocks.get(i).getBlock(); - System.out.println("testCopyOnWrite detaching block " + b); - assertTrue("Detaching block " + b + " should have returned false", - !DataNodeTestUtils.unlinkBlock(dn[0], b, 1)); - } - - } finally { - client.close(); - fs.close(); - cluster.shutdown(); - } - } - - /** * Test a simple flush on a simple HDFS file. * @throws IOException an exception might be thrown */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 2f9a3e5..b4071de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -151,11 +151,6 @@ public class DataNodeTestUtils { throws IOException { return FsDatasetTestUtil.getMetaFile(dn.getFSDataset(), bpid, b); } - - public static boolean unlinkBlock(DataNode dn, ExtendedBlock bk, int numLinks - ) throws IOException { - return FsDatasetTestUtil.unlinkBlock(dn.getFSDataset(), bk, numLinks); - } public static long getPendingAsyncDeletions(DataNode dn) { return FsDatasetTestUtil.getPendingAsyncDeletions(dn.getFSDataset()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java index 7ac9b65..164385e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java @@ -54,12 +54,6 @@ public class FsDatasetTestUtil { return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b .getGenerationStamp()); } - - public static boolean unlinkBlock(FsDatasetSpi fsd, - ExtendedBlock block, int numLinks) throws IOException { - final ReplicaInfo info = ((FsDatasetImpl)fsd).getReplicaInfo(block); - return info.unlinkBlock(numLinks); - } public static ReplicaInfo fetchReplicaInfo (final FsDatasetSpi fsd, final String bpid, final long blockId) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java index 4516696..8bbac9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java @@ -143,79 +143,7 @@ public class TestDatanodeRestart { } } - // test recovering unlinked tmp replicas - @Test public void testRecoverReplicas() throws Exception { - Configuration conf = new HdfsConfiguration(); - conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); - conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); - cluster.waitActive(); - try { - FileSystem fs = cluster.getFileSystem(); - for (int i=0; i<4; i++) { - Path fileName = new Path("/test"+i); - DFSTestUtil.createFile(fs, fileName, 1, (short)1, 0L); - DFSTestUtil.waitReplication(fs, fileName, (short)1); - } - String bpid = cluster.getNamesystem().getBlockPoolId(); - DataNode dn = cluster.getDataNodes().get(0); - Iterator replicasItor = - dataset(dn).volumeMap.replicas(bpid).iterator(); - ReplicaInfo replica = replicasItor.next(); - createUnlinkTmpFile(replica, true, true); // rename block file - createUnlinkTmpFile(replica, false, true); // rename meta file - replica = replicasItor.next(); - createUnlinkTmpFile(replica, true, false); // copy block file - createUnlinkTmpFile(replica, false, false); // copy meta file - replica = replicasItor.next(); - createUnlinkTmpFile(replica, true, true); // rename block file - createUnlinkTmpFile(replica, false, false); // copy meta file - - cluster.restartDataNodes(); - cluster.waitActive(); - dn = cluster.getDataNodes().get(0); - - // check volumeMap: 4 finalized replica - Collection replicas = dataset(dn).volumeMap.replicas(bpid); - Assert.assertEquals(4, replicas.size()); - replicasItor = replicas.iterator(); - while (replicasItor.hasNext()) { - Assert.assertEquals(ReplicaState.FINALIZED, - replicasItor.next().getState()); - } - } finally { - cluster.shutdown(); - } - } - private static FsDatasetImpl dataset(DataNode dn) { return (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn); } - - private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, - boolean changeBlockFile, - boolean isRename) throws IOException { - File src; - if (changeBlockFile) { - src = replicaInfo.getBlockFile(); - } else { - src = replicaInfo.getMetaFile(); - } - File dst = DatanodeUtil.getUnlinkTmpFile(src); - if (isRename) { - src.renameTo(dst); - } else { - FileInputStream in = new FileInputStream(src); - try { - FileOutputStream out = new FileOutputStream(dst); - try { - IOUtils.copyBytes(in, out, 1); - } finally { - out.close(); - } - } finally { - in.close(); - } - } - } }