Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 5B9EA17823 for ; Thu, 21 May 2015 16:05:14 +0000 (UTC) Received: (qmail 98235 invoked by uid 500); 21 May 2015 16:05:14 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 98176 invoked by uid 500); 21 May 2015 16:05:14 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 98167 invoked by uid 99); 21 May 2015 16:05:14 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 21 May 2015 16:05:14 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id C81EAE3A56; Thu, 21 May 2015 16:05:13 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: jitendra@apache.org To: common-commits@hadoop.apache.org Message-Id: <27762236feab46c395853278a8cf4e16@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-8210. Ozone: Implement storage container manager. Date: Thu, 21 May 2015 16:05:13 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/HDFS-7240 2b6bcfdaf -> 770ed9262 HDFS-8210. Ozone: Implement storage container manager. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/770ed926 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/770ed926 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/770ed926 Branch: refs/heads/HDFS-7240 Commit: 770ed92623869a2ecd7630d5786902fb58fa0232 Parents: 2b6bcfd Author: Jitendra Pandey Authored: Thu May 21 09:04:37 2015 -0700 Committer: Jitendra Pandey Committed: Thu May 21 09:04:37 2015 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../blockmanagement/BlockInfoContiguous.java | 6 +- .../server/blockmanagement/BlockManager.java | 72 ++--- .../hdfs/server/blockmanagement/BlocksMap.java | 16 +- .../blockmanagement/DatanodeDescriptor.java | 18 +- .../blockmanagement/DatanodeStorageInfo.java | 14 +- .../BitWiseTrieContainerMap.java | 128 ++++++++ .../StorageContainerConfiguration.java | 32 ++ .../StorageContainerManager.java | 318 +++++++++++++++++++ .../storagecontainer/StorageContainerMap.java | 123 +++++++ .../StorageContainerNameService.java | 155 +++++++++ .../protocol/ContainerLocationProtocol.java | 27 ++ .../protocol/StorageContainer.java | 34 ++ .../TestStorageContainerMap.java | 92 ++++++ 14 files changed, 957 insertions(+), 81 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9cfad7d..71790eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -33,6 +33,9 @@ Trunk (Unreleased) HDFS-3125. Add JournalService to enable Journal Daemon. (suresh) + HDFS-7240. Ozone + HDFS-8210. Ozone: Implement storage container manager. (jitendra) + IMPROVEMENTS HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common. http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java index 769046b..d286784 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import java.util.LinkedList; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.util.LightWeightGSet; +import java.util.LinkedList; + /** * BlockInfo class maintains for a given block * the {@link BlockCollection} it is part of and datanodes where the replicas of @@ -96,7 +96,7 @@ public class BlockInfoContiguous extends Block return storage == null ? null : storage.getDatanodeDescriptor(); } - DatanodeStorageInfo getStorageInfo(int index) { + public DatanodeStorageInfo getStorageInfo(int index) { assert this.triplets != null : "BlockInfo is not initialized"; assert index >= 0 && index*3 < triplets.length : "Index is out of bound"; return (DatanodeStorageInfo)triplets[index*3]; http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 54981fb..19a20bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -17,48 +17,22 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import static org.apache.hadoop.util.ExitUtil.terminate; - -import java.io.IOException; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.atomic.AtomicLong; - +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.Sets; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason; @@ -70,29 +44,26 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; -import org.apache.hadoop.hdfs.server.protocol.BlockCommand; -import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; -import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; +import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; -import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; -import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; -import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; -import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; -import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.net.Node; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.io.PrintWriter; +import java.util.*; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLong; + +import static org.apache.hadoop.util.ExitUtil.terminate; + /** * Keeps information related to the blocks stored in the Hadoop cluster. */ @@ -268,7 +239,14 @@ public class BlockManager { private boolean checkNSRunning = true; public BlockManager(final Namesystem namesystem, final Configuration conf) - throws IOException { + throws IOException { + // Compute the map capacity by allocating 2% of total memory + this(namesystem, conf, new BlocksMap( + LightWeightGSet.computeCapacity(2.0, "BlocksMap"))); + } + + public BlockManager(final Namesystem namesystem, final Configuration conf, + BlocksMap blocksMap) throws IOException { this.namesystem = namesystem; datanodeManager = new DatanodeManager(this, namesystem, conf); heartbeatManager = datanodeManager.getHeartbeatManager(); @@ -279,9 +257,7 @@ public class BlockManager { invalidateBlocks = new InvalidateBlocks( datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs); - // Compute the map capacity by allocating 2% of total memory - blocksMap = new BlocksMap( - LightWeightGSet.computeCapacity(2.0, "BlocksMap")); + this.blocksMap = blocksMap; blockplacement = BlockPlacementPolicy.getInstance( conf, datanodeManager.getFSClusterStats(), datanodeManager.getNetworkTopology(), http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index 5e7d34f..465f095 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -17,24 +17,23 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import java.util.Iterator; - +import com.google.common.base.Preconditions; +import com.google.common.base.Predicate; +import com.google.common.collect.Iterables; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.LightWeightGSet; -import com.google.common.base.Preconditions; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; +import java.util.Iterator; /** * This class maintains the map from a block to its metadata. * block's metadata currently includes blockCollection it belongs to and * the datanodes that store the block. */ -class BlocksMap { +public class BlocksMap { private static class StorageIterator implements Iterator { private final BlockInfoContiguous blockInfo; private int nextIdx = 0; @@ -65,6 +64,11 @@ class BlocksMap { private GSet blocks; + public BlocksMap(int capacity, GSet b) { + this.capacity = capacity; + this.blocks = b; + } + BlocksMap(int capacity) { // Use 2% of total memory to size the GSet capacity this.capacity = capacity; http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 4731ad4..1f27e74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -17,21 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import java.util.ArrayList; -import java.util.BitSet; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; - import com.google.common.annotations.VisibleForTesting; - import com.google.common.collect.ImmutableList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -51,6 +37,8 @@ import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.Time; +import java.util.*; + /** * This class extends the DatanodeInfo class with ephemeral information (eg * health, capacity, what blocks are associated with the Datanode) that is @@ -335,7 +323,7 @@ public class DatanodeDescriptor extends DatanodeInfo { * Remove block from the list of blocks belonging to the data-node. Remove * data-node from the block. */ - boolean removeBlock(BlockInfoContiguous b) { + public boolean removeBlock(BlockInfoContiguous b) { final DatanodeStorageInfo s = b.findStorageInfo(this); // if block exists on this datanode if (s != null) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index c6c9001..12f0762 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -17,21 +17,17 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; - import com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + /** * A Datanode has one or more storages. A storage in the Datanode is represented * by this class. @@ -193,7 +189,7 @@ public class DatanodeStorageInfo { this.lastBlockReportId = lastBlockReportId; } - State getState() { + public State getState() { return this.state; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/BitWiseTrieContainerMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/BitWiseTrieContainerMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/BitWiseTrieContainerMap.java new file mode 100644 index 0000000..7178d12 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/BitWiseTrieContainerMap.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.storagecontainer; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.storagecontainer.protocol.StorageContainer; + +/** + * A simple trie implementation. + * A Storage Container Identifier can be broken into a prefix followed by + * a container id. The id is represented in the trie for efficient prefix + * matching. At any trie node, the left child represents 0 bit and the right + * child represents the 1 bit. The storage containers exist only at the leaves. + */ +public class BitWiseTrieContainerMap { + + private final int prefixLength; + private final long constantPrefix; + + public BitWiseTrieContainerMap(long constantPrefix, int prefixLength) { + if (prefixLength > 64 || prefixLength <= 0) { + throw new IllegalArgumentException("Must have 0 blockIds) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void blockReceivedAndDeleted(DatanodeRegistration registration, + String poolId, StorageReceivedDeletedBlocks[] rcvdAndDeletedBlocks) + throws IOException { + for(StorageReceivedDeletedBlocks r : rcvdAndDeletedBlocks) { + ns.writeLock(); + try { + blockManager.processIncrementalBlockReport(registration, r); + } finally { + ns.writeUnlock(); + } + } + } + + @Override + public void errorReport(DatanodeRegistration registration, + int errorCode, String msg) throws IOException { + String dnName = + (registration == null) ? "Unknown DataNode" : registration.toString(); + + if (errorCode == DatanodeProtocol.NOTIFY) { + LOG.info("Error report from " + dnName + ": " + msg); + return; + } + + if (errorCode == DatanodeProtocol.DISK_ERROR) { + LOG.warn("Disk error on " + dnName + ": " + msg); + } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) { + LOG.warn("Fatal disk error on " + dnName + ": " + msg); + blockManager.getDatanodeManager().removeDatanode(registration); + } else { + LOG.info("Error report from " + dnName + ": " + msg); + } + } + + @Override + public NamespaceInfo versionRequest() throws IOException { + ns.readLock(); + try { + return unprotectedGetNamespaceInfo(); + } finally { + ns.readUnlock(); + } + } + + private NamespaceInfo unprotectedGetNamespaceInfo() { + return new NamespaceInfo(1, "random", "random", 2); + } + + @Override + public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { + // It doesn't make sense to have LocatedBlock in this API. + ns.writeLock(); + try { + for (int i = 0; i < blocks.length; i++) { + ExtendedBlock blk = blocks[i].getBlock(); + DatanodeInfo[] nodes = blocks[i].getLocations(); + String[] storageIDs = blocks[i].getStorageIDs(); + for (int j = 0; j < nodes.length; j++) { + blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j], + storageIDs == null ? null: storageIDs[j], + "client machine reported it"); + } + } + } finally { + ns.writeUnlock(); + } + } + + /** + * Start client and service RPC servers. + */ + void start() { + clientRpcServer.start(); + if (serviceRpcServer != null) { + serviceRpcServer.start(); + } + } + + /** + * Wait until the RPC servers have shutdown. + */ + void join() throws InterruptedException { + clientRpcServer.join(); + if (serviceRpcServer != null) { + serviceRpcServer.join(); + } + } + + @Override + public void commitBlockSynchronization(ExtendedBlock block, + long newgenerationstamp, long newlength, boolean closeFile, + boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages) + throws IOException { + // Not needed for the purpose of object store + throw new UnsupportedOperationException(); + } + + public static void main(String[] argv) throws IOException { + StorageContainerConfiguration conf = new StorageContainerConfiguration(); + StorageContainerManager scm = new StorageContainerManager(conf); + scm.start(); + try { + scm.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java new file mode 100644 index 0000000..117ee3b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.storagecontainer; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.util.GSet; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +/** + * Maps a storage container to its location on datanodes. Similar to + * {@link org.apache.hadoop.hdfs.server.blockmanagement.BlocksMap} + */ +public class StorageContainerMap implements GSet { + + private Map containerPrefixMap + = new HashMap(); + private int size; + public static final int PREFIX_LENGTH = 28; + + @Override + public int size() { + // TODO: update size when new containers created + return size; + } + + @Override + public boolean contains(Block key) { + return getBlockInfoContiguous(key.getBlockId()) != null; + } + + @Override + public BlockInfoContiguous get(Block key) { + return getBlockInfoContiguous(key.getBlockId()); + } + + @Override + public BlockInfoContiguous put(BlockInfoContiguous element) { + BlockInfoContiguous info = getBlockInfoContiguous(element.getBlockId()); + if (info == null) { + throw new IllegalStateException( + "The containers are created by splitting"); + } + // TODO: replace + return info; + } + + @Override + public BlockInfoContiguous remove(Block key) { + // It doesn't remove + return getBlockInfoContiguous(key.getBlockId()); + } + + @Override + public void clear() { + containerPrefixMap.clear(); + } + + @Override + public Iterator iterator() { + // TODO : Support iteration + throw new UnsupportedOperationException(""); + } + + /** + * Initialize a new trie for a new bucket. + */ + public synchronized void initPrefix(long prefix) { + Preconditions.checkArgument((prefix >>> PREFIX_LENGTH) == 0, + "Prefix shouldn't be longer than "+PREFIX_LENGTH+" bits"); + if (getTrieMap(prefix << (64 - PREFIX_LENGTH)) != null) { + // Already initialized + return; + } + BitWiseTrieContainerMap newTrie = new BitWiseTrieContainerMap(prefix, + PREFIX_LENGTH); + containerPrefixMap.put(prefix, newTrie); + } + + @VisibleForTesting + synchronized BitWiseTrieContainerMap getTrieMap(long containerId) { + long prefix = containerId >>> (64 - PREFIX_LENGTH); + return containerPrefixMap.get(prefix); + } + + @VisibleForTesting + BlockInfoContiguous getBlockInfoContiguous(long containerId) { + BitWiseTrieContainerMap map = getTrieMap(containerId); + if (map == null) { + return null; + } + return map.get(containerId); + } + + public void splitContainer(long key) { + BitWiseTrieContainerMap map = getTrieMap(key); + if (map == null) { + throw new IllegalArgumentException("No container exists"); + } + map.addBit(key); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java new file mode 100644 index 0000000..4c42f11 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.storagecontainer; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.Namesystem; +import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.security.AccessControlException; + +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * Namesystem implementation to be used by StorageContainerManager. + */ +public class StorageContainerNameService implements Namesystem { + + private ReentrantReadWriteLock coarseLock = new ReentrantReadWriteLock(); + private String blockPoolId; + private volatile boolean serviceRunning = true; + + public void shutdown() { + serviceRunning = false; + } + + @Override + public boolean isRunning() { + return serviceRunning; + } + + @Override + public void checkSuperuserPrivilege() throws AccessControlException { + // TBD + } + + @Override + public String getBlockPoolId() { + return blockPoolId; + } + + public void setBlockPoolId(String id) { + this.blockPoolId = id; + } + + @Override + public boolean isInStandbyState() { + // HA mode is not supported + return false; + } + + @Override + public boolean isGenStampInFuture(Block block) { + // HA mode is not supported + return false; + } + + @Override + public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal) { + // TBD + } + + @Override + public void checkOperation(NameNode.OperationCategory read) + throws StandbyException { + // HA mode is not supported + } + + @Override + public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) { + // Snapshots not supported + return false; + } + + @Override + public void readLock() { + coarseLock.readLock().lock(); + } + + @Override + public void readUnlock() { + coarseLock.readLock().unlock(); + } + + @Override + public boolean hasReadLock() { + return coarseLock.getReadHoldCount() > 0 || hasWriteLock(); + } + + @Override + public void writeLock() { + coarseLock.writeLock().lock(); + } + + @Override + public void writeLockInterruptibly() throws InterruptedException { + coarseLock.writeLock().lockInterruptibly(); + } + + @Override + public void writeUnlock() { + coarseLock.writeLock().unlock(); + } + + @Override + public boolean hasWriteLock() { + return coarseLock.isWriteLockedByCurrentThread(); + } + + @Override + public void checkSafeMode() { + // TBD + } + + @Override + public boolean isInSafeMode() { + return false; + } + + @Override + public boolean isInStartupSafeMode() { + return false; + } + + @Override + public boolean isPopulatingReplQueues() { + return false; + } + + @Override + public void incrementSafeBlockCount(int replication) { + // Do nothing + } + + @Override + public void decrementSafeBlockCount(Block b) { + // Do nothing + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/ContainerLocationProtocol.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/ContainerLocationProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/ContainerLocationProtocol.java new file mode 100644 index 0000000..73552d5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/ContainerLocationProtocol.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.storagecontainer.protocol; + +/** + * This protocol is used by clients to request container + * location. + */ +public interface ContainerLocationProtocol { + +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/StorageContainer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/StorageContainer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/StorageContainer.java new file mode 100644 index 0000000..962f767 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/StorageContainer.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.storagecontainer.protocol; + +import org.apache.hadoop.hdfs.protocol.Block; + +/** + * Storage Container extends {@link org.apache.hadoop.hdfs.protocol.Block}. + */ +public class StorageContainer extends Block { + public StorageContainer(long constantPrefix) { + super(constantPrefix); + } + + public long getContainerId() { + return super.getBlockId(); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/storagecontainer/TestStorageContainerMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/storagecontainer/TestStorageContainerMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/storagecontainer/TestStorageContainerMap.java new file mode 100644 index 0000000..731599d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/storagecontainer/TestStorageContainerMap.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.storagecontainer; + +import org.apache.hadoop.storagecontainer.protocol.StorageContainer; +import org.junit.Assert; +import org.junit.Test; + +public class TestStorageContainerMap { + + @Test + public void testTrieMap() { + BitWiseTrieContainerMap trie = new BitWiseTrieContainerMap(0x2345fab, 28); + long key = 0x2345fab123456679L; + Assert.assertEquals(0x2345fab000000000L, trie.get(key).getBlockId()); + trie.addBit(key); + Assert.assertEquals(0x2345fab000000001L, trie.get(key).getBlockId()); + trie.addBit(key); + Assert.assertEquals(0x2345fab000000001L, trie.get(key).getBlockId()); + trie.addBit(key); + Assert.assertEquals(0x2345fab000000001L, trie.get(key).getBlockId()); + trie.addBit(key); + Assert.assertEquals(0x2345fab000000009L, trie.get(key).getBlockId()); + for (int i = 0; i < 32; i++) { + trie.addBit(key); + } + Assert.assertEquals(0x2345fab123456679L, trie.get(key).getBlockId()); + } + + @Test + public void testTrieMapNegativeKey() { + BitWiseTrieContainerMap trie = new BitWiseTrieContainerMap(0xf345fab, 28); + long key = 0xf345fab123456679L; + for (int i = 0; i < 13; i++) { + trie.addBit(key); + } + Assert.assertEquals(0xf345fab000000679L, trie.get(key).getBlockId()); + } + + @Test + public void testStorageContainer() { + StorageContainerMap containerMap = new StorageContainerMap(); + try { + containerMap.initPrefix(0xabcdef12); + Assert.fail("Prefix is longer than expected"); + } catch (IllegalArgumentException ex) { + // Expected + } + containerMap.initPrefix(0xabcdef1); + containerMap.initPrefix(0xfffffff); + Assert.assertTrue(null == containerMap.getTrieMap(0xabcdef2)); + containerMap.splitContainer(0xabcdef1111122223L); + containerMap.splitContainer(0xabcdef1111122223L); + StorageContainer sc = new StorageContainer(0xabcdef1000000003L); + Assert.assertEquals(0xabcdef1000000003L, containerMap.get(sc).getBlockId()); + Assert.assertEquals(0xabcdef1000000003L, + containerMap.getTrieMap(0xabcdef1111122223L) + .get(0xabcdef1111122223L).getBlockId()); + } + + @Test + public void testStorageContainerMaxSplit() { + StorageContainerMap containerMap = new StorageContainerMap(); + containerMap.initPrefix(0xabcdef1); + for (int i = 0; i < 64 - StorageContainerMap.PREFIX_LENGTH; i++) { + containerMap.splitContainer(0xabcdef1111122223L); + } + Assert.assertEquals(0xabcdef1111122223L, + containerMap.getTrieMap(0xabcdef1111122223L) + .get(0xabcdef1111122223L).getBlockId()); + try { + containerMap.splitContainer(0xabcdef1111122223L); + Assert.fail("Exceeding max splits"); + } catch (IllegalArgumentException expected) {} + } +} \ No newline at end of file