Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 35E13D789 for ; Sun, 19 Aug 2012 05:47:49 +0000 (UTC) Received: (qmail 40769 invoked by uid 500); 19 Aug 2012 05:47:49 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 40645 invoked by uid 500); 19 Aug 2012 05:47:47 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 40236 invoked by uid 99); 19 Aug 2012 05:47:46 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 19 Aug 2012 05:47:46 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 19 Aug 2012 05:47:43 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id AAF832388994; Sun, 19 Aug 2012 05:47:00 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1374696 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src: main/java/org/apache/hadoop/fs/ main/java/org/apache/hadoop/hdfs/ main/java/org/apache/hadoop/hdfs/protocolPB/ main/java/org/apache/hadoop/hdfs/server/datanode/web/reso... Date: Sun, 19 Aug 2012 05:46:59 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120819054700.AAF832388994@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Sun Aug 19 05:46:57 2012 New Revision: 1374696 URL: http://svn.apache.org/viewvc?rev=1374696&view=rev Log: HADOOP-8240. Add a new API to allow users to specify a checksum type on FileSystem.create(..). Contributed by Kihwal Lee Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java?rev=1374696&r1=1374695&r2=1374696&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java Sun Aug 19 05:46:57 2012 @@ -31,6 +31,7 @@ import org.apache.hadoop.classification. import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.hdfs.CorruptFileBlockIterator; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSUtil; @@ -93,10 +94,10 @@ public class Hdfs extends AbstractFileSy public HdfsDataOutputStream createInternal(Path f, EnumSet createFlag, FsPermission absolutePermission, int bufferSize, short replication, long blockSize, Progressable progress, - int bytesPerChecksum, boolean createParent) throws IOException { + ChecksumOpt checksumOpt, boolean createParent) throws IOException { return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f), absolutePermission, createFlag, createParent, replication, blockSize, - progress, bufferSize, bytesPerChecksum), getStatistics()); + progress, bufferSize, checksumOpt), getStatistics()); } @Override Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1374696&r1=1374695&r2=1374696&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Sun Aug 19 05:46:57 2012 @@ -91,6 +91,7 @@ import org.apache.hadoop.fs.HdfsBlockLoc import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; @@ -203,8 +204,7 @@ public class DFSClient implements java.i final int maxBlockAcquireFailures; final int confTime; final int ioBufferSize; - final DataChecksum.Type checksumType; - final int bytesPerChecksum; + final ChecksumOpt defaultChecksumOpt; final int writePacketSize; final int socketTimeout; final int socketCacheCapacity; @@ -243,9 +243,7 @@ public class DFSClient implements java.i ioBufferSize = conf.getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); - checksumType = getChecksumType(conf); - bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, - DFS_BYTES_PER_CHECKSUM_DEFAULT); + defaultChecksumOpt = getChecksumOptFromConf(conf); socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsServerConstants.READ_TIMEOUT); /** dfs.write.packet.size is an internal config variable */ @@ -300,9 +298,32 @@ public class DFSClient implements java.i } } - private DataChecksum createChecksum() { - return DataChecksum.newDataChecksum( - checksumType, bytesPerChecksum); + // Construct a checksum option from conf + private ChecksumOpt getChecksumOptFromConf(Configuration conf) { + DataChecksum.Type type = getChecksumType(conf); + int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, + DFS_BYTES_PER_CHECKSUM_DEFAULT); + return new ChecksumOpt(type, bytesPerChecksum); + } + + // create a DataChecksum with the default option. + private DataChecksum createChecksum() throws IOException { + return createChecksum(null); + } + + private DataChecksum createChecksum(ChecksumOpt userOpt) + throws IOException { + // Fill in any missing field with the default. + ChecksumOpt myOpt = ChecksumOpt.processChecksumOpt( + defaultChecksumOpt, userOpt); + DataChecksum dataChecksum = DataChecksum.newDataChecksum( + myOpt.getChecksumType(), + myOpt.getBytesPerChecksum()); + if (dataChecksum == null) { + throw new IOException("Invalid checksum type specified: " + + myOpt.getChecksumType().name()); + } + return dataChecksum; } } @@ -1143,12 +1164,13 @@ public class DFSClient implements java.i return create(src, FsPermission.getDefault(), overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress, - buffersize); + buffersize, null); } /** * Call {@link #create(String, FsPermission, EnumSet, boolean, short, - * long, Progressable, int)} with createParent set to true. + * long, Progressable, int, ChecksumOpt)} with createParent + * set to true. */ public DFSOutputStream create(String src, FsPermission permission, @@ -1156,10 +1178,11 @@ public class DFSClient implements java.i short replication, long blockSize, Progressable progress, - int buffersize) + int buffersize, + ChecksumOpt checksumOpt) throws IOException { return create(src, permission, flag, true, - replication, blockSize, progress, buffersize); + replication, blockSize, progress, buffersize, checksumOpt); } /** @@ -1177,6 +1200,7 @@ public class DFSClient implements java.i * @param blockSize maximum block size * @param progress interface for reporting client progress * @param buffersize underlying buffer size + * @param checksumOpts checksum options * * @return output stream * @@ -1190,8 +1214,8 @@ public class DFSClient implements java.i short replication, long blockSize, Progressable progress, - int buffersize) - throws IOException { + int buffersize, + ChecksumOpt checksumOpt) throws IOException { checkOpen(); if (permission == null) { permission = FsPermission.getDefault(); @@ -1202,7 +1226,7 @@ public class DFSClient implements java.i } final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this, src, masked, flag, createParent, replication, blockSize, progress, - buffersize, dfsClientConf.createChecksum()); + buffersize, dfsClientConf.createChecksum(checksumOpt)); beginFileLease(src, result); return result; } @@ -1240,15 +1264,13 @@ public class DFSClient implements java.i long blockSize, Progressable progress, int buffersize, - int bytesPerChecksum) + ChecksumOpt checksumOpt) throws IOException, UnresolvedLinkException { checkOpen(); CreateFlag.validate(flag); DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress); if (result == null) { - DataChecksum checksum = DataChecksum.newDataChecksum( - dfsClientConf.checksumType, - bytesPerChecksum); + DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt); result = DFSOutputStream.newStreamForCreate(this, src, absPermission, flag, createParent, replication, blockSize, progress, buffersize, checksum); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1374696&r1=1374695&r2=1374696&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Sun Aug 19 05:46:57 2012 @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; @@ -258,19 +259,19 @@ public class DistributedFileSystem exten public HdfsDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { - return create(f, permission, + return this.create(f, permission, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), bufferSize, replication, - blockSize, progress); + blockSize, progress, null); } @Override public HdfsDataOutputStream create(Path f, FsPermission permission, EnumSet cflags, int bufferSize, short replication, long blockSize, - Progressable progress) throws IOException { + Progressable progress, ChecksumOpt checksumOpt) throws IOException { statistics.incrementWriteOps(1); final DFSOutputStream out = dfs.create(getPathName(f), permission, cflags, - replication, blockSize, progress, bufferSize); + replication, blockSize, progress, bufferSize, checksumOpt); return new HdfsDataOutputStream(out, statistics); } @@ -279,11 +280,11 @@ public class DistributedFileSystem exten protected HdfsDataOutputStream primitiveCreate(Path f, FsPermission absolutePermission, EnumSet flag, int bufferSize, short replication, long blockSize, Progressable progress, - int bytesPerChecksum) throws IOException { + ChecksumOpt checksumOpt) throws IOException { statistics.incrementWriteOps(1); return new HdfsDataOutputStream(dfs.primitiveCreate(getPathName(f), absolutePermission, flag, true, replication, blockSize, - progress, bufferSize, bytesPerChecksum),statistics); + progress, bufferSize, checksumOpt),statistics); } /** @@ -298,7 +299,8 @@ public class DistributedFileSystem exten flag.add(CreateFlag.CREATE); } return new HdfsDataOutputStream(dfs.create(getPathName(f), permission, flag, - false, replication, blockSize, progress, bufferSize), statistics); + false, replication, blockSize, progress, + bufferSize, null), statistics); } @Override Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1374696&r1=1374695&r2=1374696&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Sun Aug 19 05:46:57 2012 @@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.protocol.p import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; @@ -134,6 +135,7 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; +import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.security.token.Token; import com.google.protobuf.ByteString; @@ -1003,7 +1005,8 @@ public class PBHelper { fs.getWritePacketSize(), (short) fs.getReplication(), fs.getFileBufferSize(), fs.getEncryptDataTransfer(), - fs.getTrashInterval()); + fs.getTrashInterval(), + DataChecksum.Type.valueOf(fs.getChecksumType().name())); } public static FsServerDefaultsProto convert(FsServerDefaults fs) { @@ -1015,7 +1018,9 @@ public class PBHelper { .setReplication(fs.getReplication()) .setFileBufferSize(fs.getFileBufferSize()) .setEncryptDataTransfer(fs.getEncryptDataTransfer()) - .setTrashInterval(fs.getTrashInterval()).build(); + .setTrashInterval(fs.getTrashInterval()) + .setChecksumType(ChecksumTypeProto.valueOf(fs.getChecksumType().name())) + .build(); } public static FsPermissionProto convert(FsPermission p) { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1374696&r1=1374695&r2=1374696&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Sun Aug 19 05:46:57 2012 @@ -215,7 +215,7 @@ public class DatanodeWebHdfsMethods { fullpath, permission.getFsPermission(), overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), - replication.getValue(conf), blockSize.getValue(conf), null, b), null); + replication.getValue(conf), blockSize.getValue(conf), null, b, null), null); IOUtils.copyBytes(in, out, b); out.close(); out = null; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1374696&r1=1374695&r2=1374696&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sun Aug 19 05:46:57 2012 @@ -25,6 +25,8 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY; @@ -195,6 +197,7 @@ import org.apache.hadoop.security.token. import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; import org.mortbay.util.ajax.JSON; @@ -476,6 +479,16 @@ public class FSNamesystem implements Nam "must not be specified if HA is not enabled."); } + // Get the checksum type from config + String checksumTypeStr = conf.get(DFS_CHECKSUM_TYPE_KEY, DFS_CHECKSUM_TYPE_DEFAULT); + DataChecksum.Type checksumType; + try { + checksumType = DataChecksum.Type.valueOf(checksumTypeStr); + } catch (IllegalArgumentException iae) { + throw new IOException("Invalid checksum type in " + + DFS_CHECKSUM_TYPE_KEY + ": " + checksumTypeStr); + } + this.serverDefaults = new FsServerDefaults( conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT), conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT), @@ -483,7 +496,8 @@ public class FSNamesystem implements Nam (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT), conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT), conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, DFS_ENCRYPT_DATA_TRANSFER_DEFAULT), - conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)); + conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT), + checksumType); this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, DFS_NAMENODE_MAX_OBJECTS_DEFAULT); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1374696&r1=1374695&r2=1374696&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Sun Aug 19 05:46:57 2012 @@ -179,6 +179,15 @@ message HdfsFileStatusProto { } /** + * Checksum algorithms/types used in HDFS + */ +enum ChecksumTypeProto { + NULL = 0; + CRC32 = 1; + CRC32C = 2; +} + +/** * HDFS Server Defaults */ message FsServerDefaultsProto { @@ -189,6 +198,7 @@ message FsServerDefaultsProto { required uint32 fileBufferSize = 5; optional bool encryptDataTransfer = 6 [default = false]; optional uint64 trashInterval = 7 [default = 0]; + optional ChecksumTypeProto checksumType = 8 [default = CRC32]; } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1374696&r1=1374695&r2=1374696&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Sun Aug 19 05:46:57 2012 @@ -28,6 +28,7 @@ import java.io.IOException; import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.Arrays; +import java.util.EnumSet; import java.util.Random; import org.apache.commons.lang.ArrayUtils; @@ -36,16 +37,19 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStorageLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.junit.Test; @@ -664,4 +668,54 @@ public class TestDistributedFileSystem { (l.getVolumeIds()[0].isValid()) ^ (l.getVolumeIds()[1].isValid())); } } + + @Test + public void testCreateWithCustomChecksum() throws Exception { + Configuration conf = getTestConfiguration(); + final long grace = 1000L; + MiniDFSCluster cluster = null; + Path testBasePath = new Path("/test/csum"); + // create args + Path path1 = new Path(testBasePath, "file_wtih_crc1"); + Path path2 = new Path(testBasePath, "file_with_crc2"); + ChecksumOpt opt1 = new ChecksumOpt(DataChecksum.Type.CRC32C, 512); + ChecksumOpt opt2 = new ChecksumOpt(DataChecksum.Type.CRC32, 512); + + // common args + FsPermission perm = FsPermission.getDefault().applyUMask( + FsPermission.getUMask(conf)); + EnumSet flags = EnumSet.of(CreateFlag.OVERWRITE, + CreateFlag.CREATE); + short repl = 1; + + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + FileSystem dfs = cluster.getFileSystem(); + + dfs.mkdirs(testBasePath); + + // create two files with different checksum types + FSDataOutputStream out1 = dfs.create(path1, perm, flags, 4096, repl, + 131072L, null, opt1); + FSDataOutputStream out2 = dfs.create(path2, perm, flags, 4096, repl, + 131072L, null, opt2); + + for (int i = 0; i < 1024; i++) { + out1.write(i); + out2.write(i); + } + out1.close(); + out2.close(); + + // the two checksums must be different. + FileChecksum sum1 = dfs.getFileChecksum(path1); + FileChecksum sum2 = dfs.getFileChecksum(path2); + assertFalse(sum1.equals(sum2)); + } finally { + if (cluster != null) { + cluster.getFileSystem().delete(testBasePath, true); + cluster.shutdown(); + } + } + } }