Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 35FF79E4C for ; Thu, 13 Sep 2012 23:49:52 +0000 (UTC) Received: (qmail 8848 invoked by uid 500); 13 Sep 2012 23:49:52 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 8820 invoked by uid 500); 13 Sep 2012 23:49:52 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 8811 invoked by uid 99); 13 Sep 2012 23:49:52 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 13 Sep 2012 23:49:52 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 13 Sep 2012 23:49:50 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 6936E23889E1; Thu, 13 Sep 2012 23:49:07 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1384603 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ Date: Thu, 13 Sep 2012 23:49:07 -0000 To: hdfs-commits@hadoop.apache.org From: atm@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120913234907.6936E23889E1@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: atm Date: Thu Sep 13 23:49:06 2012 New Revision: 1384603 URL: http://svn.apache.org/viewvc?rev=1384603&view=rev Log: HDFS-3924. Multi-byte id in HdfsVolumeId. Contributed by Andrew Wang. Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1384603&r1=1384602&r2=1384603&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Sep 13 23:49:06 2012 @@ -25,6 +25,8 @@ Release 2.0.3-alpha - Unreleased HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever. (Andy Isaacson via eli) + HDFS-3924. Multi-byte id in HdfsVolumeId. (Andrew Wang via atm) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java?rev=1384603&r1=1384602&r2=1384603&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/HdfsVolumeId.java Thu Sep 13 23:49:06 2012 @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs; +import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; @@ -31,10 +32,10 @@ import org.apache.hadoop.classification. @InterfaceAudience.Public public class HdfsVolumeId implements VolumeId { - private final byte id; + private final byte[] id; private final boolean isValid; - public HdfsVolumeId(byte id, boolean isValid) { + public HdfsVolumeId(byte[] id, boolean isValid) { this.id = id; this.isValid = isValid; } @@ -69,6 +70,6 @@ public class HdfsVolumeId implements Vol @Override public String toString() { - return Byte.toString(id); + return Base64.encodeBase64String(id); } } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java?rev=1384603&r1=1384602&r2=1384603&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java Thu Sep 13 23:49:06 2012 @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -201,7 +202,7 @@ class BlockStorageLocationUtil { ArrayList l = new ArrayList(b.getLocations().length); // Start off all IDs as invalid, fill it in later with results from RPCs for (int i = 0; i < b.getLocations().length; i++) { - l.add(new HdfsVolumeId((byte)-1, false)); + l.add(new HdfsVolumeId(null, false)); } blockVolumeIds.put(b, l); } @@ -234,8 +235,8 @@ class BlockStorageLocationUtil { } // Get the VolumeId by indexing into the list of VolumeIds // provided by the datanode - HdfsVolumeId id = new HdfsVolumeId(metaVolumeIds.get(volumeIndex)[0], - true); + byte[] volumeId = metaVolumeIds.get(volumeIndex); + HdfsVolumeId id = new HdfsVolumeId(volumeId, true); // Find out which index we are in the LocatedBlock's replicas LocatedBlock locBlock = extBlockToLocBlock.get(extBlock); DatanodeInfo[] dnInfos = locBlock.getLocations(); @@ -255,8 +256,8 @@ class BlockStorageLocationUtil { } // Place VolumeId at the same index as the DN's index in the list of // replicas - List VolumeIds = blockVolumeIds.get(locBlock); - VolumeIds.set(index, id); + List volumeIds = blockVolumeIds.get(locBlock); + volumeIds.set(index, id); } } return blockVolumeIds; Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1384603&r1=1384602&r2=1384603&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Thu Sep 13 23:49:06 2012 @@ -24,6 +24,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; +import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.Collection; @@ -1676,10 +1677,10 @@ class FsDatasetImpl implements FsDataset List blocksVolumeIds = new ArrayList(volumes.volumes.size()); // List of indexes into the list of VolumeIds, pointing at the VolumeId of // the volume that the block is on - List blocksVolumendexes = new ArrayList(blocks.size()); + List blocksVolumeIndexes = new ArrayList(blocks.size()); // Initialize the list of VolumeIds simply by enumerating the volumes for (int i = 0; i < volumes.volumes.size(); i++) { - blocksVolumeIds.add(new byte[] { (byte) i }); + blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array()); } // Determine the index of the VolumeId of each block's volume, by comparing // the block's volume against the enumerated volumes @@ -1700,10 +1701,10 @@ class FsDatasetImpl implements FsDataset if (!isValid) { volumeIndex = Integer.MAX_VALUE; } - blocksVolumendexes.add(volumeIndex); + blocksVolumeIndexes.add(volumeIndex); } return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}), - blocksVolumeIds, blocksVolumendexes); + blocksVolumeIds, blocksVolumeIndexes); } @Override