hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jiten...@apache.org
Subject hadoop git commit: HDFS-8210. Ozone: Implement storage container manager.
Date Thu, 21 May 2015 16:05:13 GMT
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 2b6bcfdaf -> 770ed9262


HDFS-8210. Ozone: Implement storage container manager.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/770ed926
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/770ed926
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/770ed926

Branch: refs/heads/HDFS-7240
Commit: 770ed92623869a2ecd7630d5786902fb58fa0232
Parents: 2b6bcfd
Author: Jitendra Pandey <jitendra@apache.org>
Authored: Thu May 21 09:04:37 2015 -0700
Committer: Jitendra Pandey <jitendra@apache.org>
Committed: Thu May 21 09:04:37 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../blockmanagement/BlockInfoContiguous.java    |   6 +-
 .../server/blockmanagement/BlockManager.java    |  72 ++---
 .../hdfs/server/blockmanagement/BlocksMap.java  |  16 +-
 .../blockmanagement/DatanodeDescriptor.java     |  18 +-
 .../blockmanagement/DatanodeStorageInfo.java    |  14 +-
 .../BitWiseTrieContainerMap.java                | 128 ++++++++
 .../StorageContainerConfiguration.java          |  32 ++
 .../StorageContainerManager.java                | 318 +++++++++++++++++++
 .../storagecontainer/StorageContainerMap.java   | 123 +++++++
 .../StorageContainerNameService.java            | 155 +++++++++
 .../protocol/ContainerLocationProtocol.java     |  27 ++
 .../protocol/StorageContainer.java              |  34 ++
 .../TestStorageContainerMap.java                |  92 ++++++
 14 files changed, 957 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9cfad7d..71790eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -33,6 +33,9 @@ Trunk (Unreleased)
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
 
+    HDFS-7240. Ozone
+      HDFS-8210. Ozone: Implement storage container manager. (jitendra)
+
   IMPROVEMENTS
 
     HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 769046b..d286784 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import java.util.LinkedList;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.util.LightWeightGSet;
 
+import java.util.LinkedList;
+
 /**
  * BlockInfo class maintains for a given block
  * the {@link BlockCollection} it is part of and datanodes where the replicas of 
@@ -96,7 +96,7 @@ public class BlockInfoContiguous extends Block
     return storage == null ? null : storage.getDatanodeDescriptor();
   }
 
-  DatanodeStorageInfo getStorageInfo(int index) {
+  public DatanodeStorageInfo getStorageInfo(int index) {
     assert this.triplets != null : "BlockInfo is not initialized";
     assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
     return (DatanodeStorageInfo)triplets[index*3];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 54981fb..19a20bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -17,48 +17,22 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.atomic.AtomicLong;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Sets;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason;
@@ -70,29 +44,26 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
-import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.*;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
-import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
-import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
-import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.LightWeightGSet;
 import org.apache.hadoop.util.Time;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.*;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
 /**
  * Keeps information related to the blocks stored in the Hadoop cluster.
  */
@@ -268,7 +239,14 @@ public class BlockManager {
   private boolean checkNSRunning = true;
 
   public BlockManager(final Namesystem namesystem, final Configuration conf)
-    throws IOException {
+      throws IOException {
+    // Compute the map capacity by allocating 2% of total memory
+    this(namesystem, conf, new BlocksMap(
+        LightWeightGSet.computeCapacity(2.0, "BlocksMap")));
+  }
+
+  public BlockManager(final Namesystem namesystem, final Configuration conf,
+      BlocksMap blocksMap) throws IOException {
     this.namesystem = namesystem;
     datanodeManager = new DatanodeManager(this, namesystem, conf);
     heartbeatManager = datanodeManager.getHeartbeatManager();
@@ -279,9 +257,7 @@ public class BlockManager {
     invalidateBlocks = new InvalidateBlocks(
         datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs);
 
-    // Compute the map capacity by allocating 2% of total memory
-    blocksMap = new BlocksMap(
-        LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
+    this.blocksMap = blocksMap;
     blockplacement = BlockPlacementPolicy.getInstance(
       conf, datanodeManager.getFSClusterStats(),
       datanodeManager.getNetworkTopology(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index 5e7d34f..465f095 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@ -17,24 +17,23 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import java.util.Iterator;
-
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.LightWeightGSet;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.collect.Iterables;
+import java.util.Iterator;
 
 /**
  * This class maintains the map from a block to its metadata.
  * block's metadata currently includes blockCollection it belongs to and
  * the datanodes that store the block.
  */
-class BlocksMap {
+public class BlocksMap {
   private static class StorageIterator implements Iterator<DatanodeStorageInfo> {
     private final BlockInfoContiguous blockInfo;
     private int nextIdx = 0;
@@ -65,6 +64,11 @@ class BlocksMap {
   
   private GSet<Block, BlockInfoContiguous> blocks;
 
+  public BlocksMap(int capacity, GSet<Block, BlockInfoContiguous> b) {
+    this.capacity = capacity;
+    this.blocks = b;
+  }
+
   BlocksMap(int capacity) {
     // Use 2% of total memory to size the GSet capacity
     this.capacity = capacity;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 4731ad4..1f27e74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -17,21 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Set;
-
 import com.google.common.annotations.VisibleForTesting;
-
 import com.google.common.collect.ImmutableList;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -51,6 +37,8 @@ import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.util.IntrusiveCollection;
 import org.apache.hadoop.util.Time;
 
+import java.util.*;
+
 /**
  * This class extends the DatanodeInfo class with ephemeral information (eg
  * health, capacity, what blocks are associated with the Datanode) that is
@@ -335,7 +323,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * Remove block from the list of blocks belonging to the data-node. Remove
    * data-node from the block.
    */
-  boolean removeBlock(BlockInfoContiguous b) {
+  public boolean removeBlock(BlockInfoContiguous b) {
     final DatanodeStorageInfo s = b.findStorageInfo(this);
     // if block exists on this datanode
     if (s != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index c6c9001..12f0762 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -17,21 +17,17 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-
 import com.google.common.annotations.VisibleForTesting;
-
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
 /**
  * A Datanode has one or more storages. A storage in the Datanode is represented
  * by this class.
@@ -193,7 +189,7 @@ public class DatanodeStorageInfo {
     this.lastBlockReportId = lastBlockReportId;
   }
 
-  State getState() {
+  public State getState() {
     return this.state;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/BitWiseTrieContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/BitWiseTrieContainerMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/BitWiseTrieContainerMap.java
new file mode 100644
index 0000000..7178d12
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/BitWiseTrieContainerMap.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.storagecontainer;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.storagecontainer.protocol.StorageContainer;
+
+/**
+ * A simple trie implementation.
+ * A Storage Container Identifier can be broken into a prefix followed by
+ * a container id. The id is represented in the trie for efficient prefix
+ * matching. At any trie node, the left child represents 0 bit and the right
+ * child represents the 1 bit. The storage containers exist only at the leaves.
+ */
+public class BitWiseTrieContainerMap {
+
+  private final int prefixLength;
+  private final long constantPrefix;
+
+  public BitWiseTrieContainerMap(long constantPrefix, int prefixLength) {
+    if (prefixLength > 64 || prefixLength <= 0) {
+      throw new IllegalArgumentException("Must have 0<keyLength<=64");
+    }
+    this.prefixLength = prefixLength;
+    this.constantPrefix = constantPrefix;
+    root = new TrieNode();
+    root.bitLength = 0;
+    long rootContainerId = constantPrefix << (64 - prefixLength);
+
+    // TODO: Fix hard coded replication
+    root.container = new BlockInfoContiguous(
+        new StorageContainer(rootContainerId), (short)3);
+  }
+
+  public synchronized BlockInfoContiguous get(long key) {
+    if (root.left == null || root.right == null) {
+      return root.container;
+    }
+    return getInternal(key, root, 1).container;
+  }
+
+  /**
+   * Splits the trie node corresponding to the key into left and right
+   * children.
+   * TODO: This should be idempotent
+   */
+  public synchronized void addBit(long key) {
+    TrieNode tn = root;
+    if (root.left != null || root.right != null) {
+      tn = getInternal(key, root, 1);
+    }
+
+    Preconditions.checkArgument(tn.bitLength < (64 - prefixLength),
+        "Maximum bitLength achieved for key "+key);
+
+    Preconditions.checkArgument(tn.right == null && tn.left == null,
+        "The extension of trie is allowed only at the leaf nodes");
+
+    // Container Id of the left child is same as that of parent, but
+    // the bitLength increases by one.
+    tn.left = new TrieNode(tn.container, tn.bitLength+1);
+
+    long rightContainerId = tn.container.getBlockId() | (0x1L << tn.bitLength);
+
+    // TODO: Fix hard coded replication
+    BlockInfoContiguous newContainer = new BlockInfoContiguous(
+        new StorageContainer(rightContainerId), (short) 3);
+
+    tn.right = new TrieNode(newContainer, tn.bitLength+1);
+    tn.container = null;
+  }
+
+  public long getConstantPrefix() {
+    return constantPrefix;
+  }
+
+  /**
+   * Performs bitwise matching recursively down the trie.
+   * The bitIndex is counted from left. The maximum depth of recursion
+   * is 64 - {@link
+   * org.apache.hadoop.storagecontainer.StorageContainerMap#PREFIX_LENGTH}
+   */
+  private TrieNode getInternal(long key, TrieNode head, int bitLength) {
+    long mask = 0x1L << (bitLength-1);
+
+    TrieNode nextNode;
+    nextNode = (key & mask) == 0 ? head.left : head.right;
+    if (nextNode == null) {
+      return head;
+    } else {
+      return getInternal(key, nextNode, bitLength+1);
+    }
+  }
+
+  private static class TrieNode {
+    private TrieNode left = null;
+    private TrieNode right = null;
+    private BlockInfoContiguous container;
+
+    private int bitLength;
+
+    TrieNode() {}
+
+    TrieNode(BlockInfoContiguous c, int l) {
+      this.container = c;
+      this.bitLength = l;
+    }
+  }
+
+  private TrieNode root;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerConfiguration.java
new file mode 100644
index 0000000..dbe28d2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerConfiguration.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.storagecontainer;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
+/**
+ * Loads storage container specific configurations.
+ */
+public class StorageContainerConfiguration extends HdfsConfiguration {
+  static {
+    // adds the default resources
+    Configuration.addDefaultResource("storage-container-site.xml");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
new file mode 100644
index 0000000..cce9507
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerManager.java
@@ -0,0 +1,318 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.storagecontainer;
+
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlocksMap;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.server.protocol.*;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.WritableRpcEngine;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.storagecontainer.protocol.ContainerLocationProtocol;
+import org.apache.hadoop.util.LightWeightGSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+
+/**
+ * Service that allocates storage containers and tracks their
+ * location.
+ */
+public class StorageContainerManager
+    implements DatanodeProtocol, ContainerLocationProtocol {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(StorageContainerManager.class);
+
+  private final Namesystem ns = new StorageContainerNameService();
+  private final BlockManager blockManager;
+
+  private long txnId = 234;
+
+  /** The RPC server that listens to requests from DataNodes. */
+  private final RPC.Server serviceRpcServer;
+  private final InetSocketAddress serviceRPCAddress;
+
+  /** The RPC server that listens to requests from clients. */
+  private final RPC.Server clientRpcServer;
+  private final InetSocketAddress clientRpcAddress;
+
+  public StorageContainerManager(StorageContainerConfiguration conf)
+      throws IOException {
+    BlocksMap containerMap = new BlocksMap(
+        LightWeightGSet.computeCapacity(2.0, "BlocksMap"),
+        new StorageContainerMap());
+    this.blockManager = new BlockManager(ns, conf, containerMap);
+
+    int handlerCount =
+        conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY,
+            DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
+
+    RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
+        ProtobufRpcEngine.class);
+
+    DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator =
+        new DatanodeProtocolServerSideTranslatorPB(this);
+    BlockingService dnProtoPbService =
+        DatanodeProtocolProtos.DatanodeProtocolService
+            .newReflectiveBlockingService(dnProtoPbTranslator);
+
+    WritableRpcEngine.ensureInitialized();
+
+    InetSocketAddress serviceRpcAddr = NameNode.getServiceAddress(conf, false);
+    if (serviceRpcAddr != null) {
+      String bindHost =
+          conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
+      if (bindHost == null || bindHost.isEmpty()) {
+        bindHost = serviceRpcAddr.getHostName();
+      }
+      LOG.info("Service RPC server is binding to " + bindHost + ":" +
+          serviceRpcAddr.getPort());
+
+      int serviceHandlerCount =
+          conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
+              DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
+      serviceRpcServer = new RPC.Builder(conf)
+          .setProtocol(
+              org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB.class)
+          .setInstance(dnProtoPbService)
+          .setBindAddress(bindHost)
+          .setPort(serviceRpcAddr.getPort())
+          .setNumHandlers(serviceHandlerCount)
+          .setVerbose(false)
+          .setSecretManager(null)
+          .build();
+
+      DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService,
+          serviceRpcServer);
+
+      InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress();
+      serviceRPCAddress = new InetSocketAddress(
+          serviceRpcAddr.getHostName(), listenAddr.getPort());
+      conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+          NetUtils.getHostPortString(serviceRPCAddress));
+    } else {
+      serviceRpcServer = null;
+      serviceRPCAddress = null;
+    }
+
+    InetSocketAddress rpcAddr = NameNode.getAddress(conf);
+    String bindHost = conf.getTrimmed(DFS_NAMENODE_RPC_BIND_HOST_KEY);
+    if (bindHost == null || bindHost.isEmpty()) {
+      bindHost = rpcAddr.getHostName();
+    }
+    LOG.info("RPC server is binding to " + bindHost + ":" + rpcAddr.getPort());
+
+    clientRpcServer = new RPC.Builder(conf)
+        .setProtocol(
+            org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB.class)
+        .setInstance(dnProtoPbService)
+        .setBindAddress(bindHost)
+        .setPort(rpcAddr.getPort())
+        .setNumHandlers(handlerCount)
+        .setVerbose(false)
+        .setSecretManager(null)
+        .build();
+
+    DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService,
+        clientRpcServer);
+
+    // The rpc-server port can be ephemeral... ensure we have the correct info
+    InetSocketAddress listenAddr = clientRpcServer.getListenerAddress();
+    clientRpcAddress = new InetSocketAddress(
+        rpcAddr.getHostName(), listenAddr.getPort());
+    conf.set(FS_DEFAULT_NAME_KEY,
+        NameNode.getUri(clientRpcAddress).toString());
+  }
+
+  @Override
+  public DatanodeRegistration registerDatanode(
+      DatanodeRegistration registration) throws IOException {
+    ns.writeLock();
+    try {
+      blockManager.getDatanodeManager().registerDatanode(registration);
+    } finally {
+      ns.writeUnlock();
+    }
+    return registration;
+  }
+
+  @Override
+  public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
+      StorageReport[] reports, long dnCacheCapacity, long dnCacheUsed,
+      int xmitsInProgress, int xceiverCount, int failedVolumes,
+      VolumeFailureSummary volumeFailureSummary) throws IOException {
+    ns.readLock();
+    try {
+      final int maxTransfer = blockManager.getMaxReplicationStreams()
+          - xmitsInProgress;
+      DatanodeCommand[] cmds = blockManager.getDatanodeManager()
+          .handleHeartbeat(registration, reports, ns.getBlockPoolId(), 0, 0,
+              xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary);
+
+      return new HeartbeatResponse(cmds,
+          new NNHAStatusHeartbeat(HAServiceProtocol.HAServiceState.ACTIVE,
+              txnId), null);
+    } finally {
+      ns.readUnlock();
+    }
+  }
+
+  @Override
+  public DatanodeCommand blockReport(DatanodeRegistration registration,
+      String poolId, StorageBlockReport[] reports,
+      BlockReportContext context) throws IOException {
+    for (int r = 0; r < reports.length; r++) {
+      final BlockListAsLongs storageContainerList = reports[r].getBlocks();
+      blockManager.processReport(registration, reports[r].getStorage(),
+          storageContainerList, context, (r == reports.length - 1));
+    }
+    return null;
+  }
+
+  @Override
+  public DatanodeCommand cacheReport(DatanodeRegistration registration,
+      String poolId, List<Long> blockIds) throws IOException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void blockReceivedAndDeleted(DatanodeRegistration registration,
+      String poolId, StorageReceivedDeletedBlocks[] rcvdAndDeletedBlocks)
+      throws IOException {
+    for(StorageReceivedDeletedBlocks r : rcvdAndDeletedBlocks) {
+      ns.writeLock();
+      try {
+        blockManager.processIncrementalBlockReport(registration, r);
+      } finally {
+        ns.writeUnlock();
+      }
+    }
+  }
+
+  @Override
+  public void errorReport(DatanodeRegistration registration,
+      int errorCode, String msg) throws IOException {
+    String dnName =
+        (registration == null) ? "Unknown DataNode" : registration.toString();
+
+    if (errorCode == DatanodeProtocol.NOTIFY) {
+      LOG.info("Error report from " + dnName + ": " + msg);
+      return;
+    }
+
+    if (errorCode == DatanodeProtocol.DISK_ERROR) {
+      LOG.warn("Disk error on " + dnName + ": " + msg);
+    } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
+      LOG.warn("Fatal disk error on " + dnName + ": " + msg);
+      blockManager.getDatanodeManager().removeDatanode(registration);
+    } else {
+      LOG.info("Error report from " + dnName + ": " + msg);
+    }
+  }
+
+  @Override
+  public NamespaceInfo versionRequest() throws IOException {
+    ns.readLock();
+    try {
+      return unprotectedGetNamespaceInfo();
+    } finally {
+      ns.readUnlock();
+    }
+  }
+
+  private NamespaceInfo unprotectedGetNamespaceInfo() {
+    return new NamespaceInfo(1, "random", "random", 2);
+  }
+
+  @Override
+  public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
+    // It doesn't make sense to have LocatedBlock in this API.
+    ns.writeLock();
+    try {
+      for (int i = 0; i < blocks.length; i++) {
+        ExtendedBlock blk = blocks[i].getBlock();
+        DatanodeInfo[] nodes = blocks[i].getLocations();
+        String[] storageIDs = blocks[i].getStorageIDs();
+        for (int j = 0; j < nodes.length; j++) {
+          blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j],
+              storageIDs == null ? null: storageIDs[j],
+              "client machine reported it");
+        }
+      }
+    } finally {
+      ns.writeUnlock();
+    }
+  }
+
+  /**
+   * Start client and service RPC servers.
+   */
+  void start() {
+    clientRpcServer.start();
+    if (serviceRpcServer != null) {
+      serviceRpcServer.start();
+    }
+  }
+
+  /**
+   * Wait until the RPC servers have shutdown.
+   */
+  void join() throws InterruptedException {
+    clientRpcServer.join();
+    if (serviceRpcServer != null) {
+      serviceRpcServer.join();
+    }
+  }
+
+  @Override
+  public void commitBlockSynchronization(ExtendedBlock block,
+      long newgenerationstamp, long newlength, boolean closeFile,
+      boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages)
+      throws IOException {
+    // Not needed for the purpose of object store
+    throw new UnsupportedOperationException();
+  }
+
+  public static void main(String[] argv) throws IOException {
+    StorageContainerConfiguration conf = new StorageContainerConfiguration();
+    StorageContainerManager scm = new StorageContainerManager(conf);
+    scm.start();
+    try {
+      scm.join();
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
new file mode 100644
index 0000000..117ee3b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.storagecontainer;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.util.GSet;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * Maps a storage container to its location on datanodes. Similar to
+ * {@link org.apache.hadoop.hdfs.server.blockmanagement.BlocksMap}
+ */
+public class StorageContainerMap implements GSet<Block, BlockInfoContiguous> {
+
+  private Map<Long, BitWiseTrieContainerMap> containerPrefixMap
+      = new HashMap<Long, BitWiseTrieContainerMap>();
+  private int size;
+  public static final int PREFIX_LENGTH = 28;
+
+  @Override
+  public int size() {
+    // TODO: update size when new containers created
+    return size;
+  }
+
+  @Override
+  public boolean contains(Block key) {
+    return getBlockInfoContiguous(key.getBlockId()) != null;
+  }
+
+  @Override
+  public BlockInfoContiguous get(Block key) {
+    return getBlockInfoContiguous(key.getBlockId());
+  }
+
+  @Override
+  public BlockInfoContiguous put(BlockInfoContiguous element) {
+    BlockInfoContiguous info = getBlockInfoContiguous(element.getBlockId());
+    if (info == null) {
+      throw new IllegalStateException(
+          "The containers are created by splitting");
+    }
+    // TODO: replace
+    return info;
+  }
+
+  @Override
+  public BlockInfoContiguous remove(Block key) {
+    // It doesn't remove
+    return getBlockInfoContiguous(key.getBlockId());
+  }
+
+  @Override
+  public void clear() {
+    containerPrefixMap.clear();
+  }
+
+  @Override
+  public Iterator<BlockInfoContiguous> iterator() {
+    // TODO : Support iteration
+    throw new UnsupportedOperationException("");
+  }
+
+  /**
+   * Initialize a new trie for a new bucket.
+   */
+  public synchronized void initPrefix(long prefix) {
+    Preconditions.checkArgument((prefix >>> PREFIX_LENGTH) == 0,
+        "Prefix shouldn't be longer than "+PREFIX_LENGTH+" bits");
+    if (getTrieMap(prefix << (64 - PREFIX_LENGTH)) != null) {
+      // Already initialized
+      return;
+    }
+    BitWiseTrieContainerMap newTrie = new BitWiseTrieContainerMap(prefix,
+        PREFIX_LENGTH);
+    containerPrefixMap.put(prefix, newTrie);
+  }
+
+  @VisibleForTesting
+  synchronized BitWiseTrieContainerMap getTrieMap(long containerId) {
+    long prefix = containerId >>> (64 - PREFIX_LENGTH);
+    return containerPrefixMap.get(prefix);
+  }
+
+  @VisibleForTesting
+  BlockInfoContiguous getBlockInfoContiguous(long containerId) {
+    BitWiseTrieContainerMap map = getTrieMap(containerId);
+    if (map == null) {
+      return null;
+    }
+    return map.get(containerId);
+  }
+
+  public void splitContainer(long key) {
+    BitWiseTrieContainerMap map = getTrieMap(key);
+    if (map == null) {
+      throw new IllegalArgumentException("No container exists");
+    }
+    map.addBit(key);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
new file mode 100644
index 0000000..4c42f11
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.storagecontainer;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.security.AccessControlException;
+
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Namesystem implementation to be used by StorageContainerManager.
+ */
+public class StorageContainerNameService implements Namesystem {
+
+  private ReentrantReadWriteLock coarseLock = new ReentrantReadWriteLock();
+  private String blockPoolId;
+  private volatile boolean serviceRunning = true;
+
+  public void shutdown() {
+    serviceRunning = false;
+  }
+
+  @Override
+  public boolean isRunning() {
+    return serviceRunning;
+  }
+
+  @Override
+  public void checkSuperuserPrivilege() throws AccessControlException {
+    // TBD
+  }
+
+  @Override
+  public String getBlockPoolId() {
+    return blockPoolId;
+  }
+
+  public void setBlockPoolId(String id) {
+    this.blockPoolId = id;
+  }
+
+  @Override
+  public boolean isInStandbyState() {
+    // HA mode is not supported
+    return false;
+  }
+
+  @Override
+  public boolean isGenStampInFuture(Block block) {
+    // HA mode is not supported
+    return false;
+  }
+
+  @Override
+  public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal) {
+    // TBD
+  }
+
+  @Override
+  public void checkOperation(NameNode.OperationCategory read)
+    throws StandbyException {
+    // HA mode is not supported
+  }
+
+  @Override
+  public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) {
+    // Snapshots not supported
+    return false;
+  }
+
+  @Override
+  public void readLock() {
+    coarseLock.readLock().lock();
+  }
+
+  @Override
+  public void readUnlock() {
+    coarseLock.readLock().unlock();
+  }
+
+  @Override
+  public boolean hasReadLock() {
+    return coarseLock.getReadHoldCount() > 0 || hasWriteLock();
+  }
+
+  @Override
+  public void writeLock() {
+    coarseLock.writeLock().lock();
+  }
+
+  @Override
+  public void writeLockInterruptibly() throws InterruptedException {
+    coarseLock.writeLock().lockInterruptibly();
+  }
+
+  @Override
+  public void writeUnlock() {
+    coarseLock.writeLock().unlock();
+  }
+
+  @Override
+  public boolean hasWriteLock() {
+    return coarseLock.isWriteLockedByCurrentThread();
+  }
+
+  @Override
+  public void checkSafeMode() {
+    // TBD
+  }
+
+  @Override
+  public boolean isInSafeMode() {
+    return false;
+  }
+
+  @Override
+  public boolean isInStartupSafeMode() {
+    return false;
+  }
+
+  @Override
+  public boolean isPopulatingReplQueues() {
+    return false;
+  }
+
+  @Override
+  public void incrementSafeBlockCount(int replication) {
+    // Do nothing
+  }
+
+  @Override
+  public void decrementSafeBlockCount(Block b) {
+    // Do nothing
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/ContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/ContainerLocationProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/ContainerLocationProtocol.java
new file mode 100644
index 0000000..73552d5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/ContainerLocationProtocol.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.storagecontainer.protocol;
+
+/**
+ * This protocol is used by clients to request container
+ * location.
+ */
+public interface ContainerLocationProtocol {
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/StorageContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/StorageContainer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/StorageContainer.java
new file mode 100644
index 0000000..962f767
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/protocol/StorageContainer.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.storagecontainer.protocol;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+
+/**
+ * Storage Container extends {@link org.apache.hadoop.hdfs.protocol.Block}.
+ */
+public class StorageContainer extends Block {
+  public StorageContainer(long constantPrefix) {
+    super(constantPrefix);
+  }
+
+  public long getContainerId() {
+    return super.getBlockId();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/770ed926/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/storagecontainer/TestStorageContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/storagecontainer/TestStorageContainerMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/storagecontainer/TestStorageContainerMap.java
new file mode 100644
index 0000000..731599d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/storagecontainer/TestStorageContainerMap.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.storagecontainer;
+
+import org.apache.hadoop.storagecontainer.protocol.StorageContainer;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestStorageContainerMap {
+
+  @Test
+  public void testTrieMap() {
+    BitWiseTrieContainerMap trie = new BitWiseTrieContainerMap(0x2345fab, 28);
+    long key = 0x2345fab123456679L;
+    Assert.assertEquals(0x2345fab000000000L, trie.get(key).getBlockId());
+    trie.addBit(key);
+    Assert.assertEquals(0x2345fab000000001L, trie.get(key).getBlockId());
+    trie.addBit(key);
+    Assert.assertEquals(0x2345fab000000001L, trie.get(key).getBlockId());
+    trie.addBit(key);
+    Assert.assertEquals(0x2345fab000000001L, trie.get(key).getBlockId());
+    trie.addBit(key);
+    Assert.assertEquals(0x2345fab000000009L, trie.get(key).getBlockId());
+    for (int i = 0; i < 32; i++) {
+      trie.addBit(key);
+    }
+    Assert.assertEquals(0x2345fab123456679L, trie.get(key).getBlockId());
+  }
+
+  @Test
+  public void testTrieMapNegativeKey() {
+    BitWiseTrieContainerMap trie = new BitWiseTrieContainerMap(0xf345fab, 28);
+    long key = 0xf345fab123456679L;
+    for (int i = 0; i < 13; i++) {
+      trie.addBit(key);
+    }
+    Assert.assertEquals(0xf345fab000000679L, trie.get(key).getBlockId());
+  }
+
+  @Test
+  public void testStorageContainer() {
+    StorageContainerMap containerMap = new StorageContainerMap();
+    try {
+      containerMap.initPrefix(0xabcdef12);
+      Assert.fail("Prefix is longer than expected");
+    } catch (IllegalArgumentException ex) {
+      // Expected
+    }
+    containerMap.initPrefix(0xabcdef1);
+    containerMap.initPrefix(0xfffffff);
+    Assert.assertTrue(null == containerMap.getTrieMap(0xabcdef2));
+    containerMap.splitContainer(0xabcdef1111122223L);
+    containerMap.splitContainer(0xabcdef1111122223L);
+    StorageContainer sc = new StorageContainer(0xabcdef1000000003L);
+    Assert.assertEquals(0xabcdef1000000003L, containerMap.get(sc).getBlockId());
+    Assert.assertEquals(0xabcdef1000000003L,
+        containerMap.getTrieMap(0xabcdef1111122223L)
+            .get(0xabcdef1111122223L).getBlockId());
+  }
+
+  @Test
+  public void testStorageContainerMaxSplit() {
+    StorageContainerMap containerMap = new StorageContainerMap();
+    containerMap.initPrefix(0xabcdef1);
+    for (int i = 0; i < 64 - StorageContainerMap.PREFIX_LENGTH; i++) {
+      containerMap.splitContainer(0xabcdef1111122223L);
+    }
+    Assert.assertEquals(0xabcdef1111122223L,
+        containerMap.getTrieMap(0xabcdef1111122223L)
+            .get(0xabcdef1111122223L).getBlockId());
+    try {
+      containerMap.splitContainer(0xabcdef1111122223L);
+      Assert.fail("Exceeding max splits");
+    } catch (IllegalArgumentException expected) {}
+  }
+}
\ No newline at end of file


Mime
View raw message