hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject [1/2] hadoop git commit: HDFS-10195. Ozone: Add container persistence. Contributed by Anu Engineer.
Date Sat, 26 Mar 2016 18:40:16 GMT
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 2dc48b7f1 -> 643c5e5bd


http://git-wip-us.apache.org/repos/asf/hadoop/blob/643c5e5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
new file mode 100644
index 0000000..3b498e2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.impl;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.utils.LevelDBStore;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.container.ContainerTestHelper
+    .createSingleNodePipeline;
+import static org.junit.Assert.fail;
+
+/**
+ * Simple tests to verify that container persistence works as expected.
+ */
+public class TestContainerPersistence {
+
+  static String path;
+  static ContainerManagerImpl containerManager;
+  static OzoneConfiguration conf;
+  static FsDatasetSpi fsDataSet;
+  static MiniDFSCluster cluster;
+  static List<Path> pathLists = new LinkedList<>();
+
+  @BeforeClass
+  public static void init() throws IOException {
+    conf = new OzoneConfiguration();
+    URL p = conf.getClass().getResource("");
+    path = p.getPath().concat(
+        TestContainerPersistence.class.getSimpleName());
+    path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
+        OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
+    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
+    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
+    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
+
+    File containerDir = new File(path);
+    if (containerDir.exists()) {
+      FileUtils.deleteDirectory(new File(path));
+    }
+
+    Assert.assertTrue(containerDir.mkdirs());
+
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster.waitActive();
+    fsDataSet = cluster.getDataNodes().get(0).getFSDataset();
+    containerManager = new ContainerManagerImpl();
+  }
+
+  @AfterClass
+  public static void shutdown() throws IOException {
+    cluster.shutdown();
+    FileUtils.deleteDirectory(new File(path));
+  }
+
+  @Before
+  public void setupPaths() throws IOException {
+    if (!new File(path).exists()) {
+      new File(path).mkdirs();
+    }
+    pathLists.clear();
+    containerManager.getContainerMap().clear();
+    pathLists.add(Paths.get(path));
+    containerManager.init(conf, pathLists, fsDataSet);
+  }
+
+  @After
+  public void cleanupDir() throws IOException {
+    FileUtils.deleteDirectory(new File(path));
+  }
+
+  @Test
+  public void testCreateContainer() throws Exception {
+
+    String containerName = OzoneUtils.getRequestID();
+    ContainerData data = new ContainerData(containerName);
+    data.addMetadata("VOLUME", "shire");
+    data.addMetadata("owner)", "bilbo");
+    containerManager.createContainer(createSingleNodePipeline(), data);
+    Assert.assertTrue(containerManager.getContainerMap()
+        .containsKey(containerName));
+    ContainerManagerImpl.ContainerStatus status = containerManager
+        .getContainerMap().get(containerName);
+
+    Assert.assertTrue(status.isActive());
+    Assert.assertNotNull(status.getContainer().getContainerPath());
+    Assert.assertNotNull(status.getContainer().getDBPath());
+
+
+    Assert.assertTrue(new File(status.getContainer().getContainerPath())
+        .exists());
+
+    String containerPathString = ContainerUtils.getContainerNameFromFile(new
+        File(status.getContainer().getContainerPath()));
+
+    Path meta = Paths.get(containerPathString);
+
+    String metadataFile = meta.toString() + OzoneConsts.CONTAINER_META;
+    Assert.assertTrue(new File(metadataFile).exists());
+
+
+    String dbPath = status.getContainer().getDBPath();
+
+    LevelDBStore store = null;
+    try {
+      store = new LevelDBStore(new File(dbPath), false);
+      Assert.assertNotNull(store.getDB());
+    } finally {
+      if (store != null) {
+        store.close();
+      }
+    }
+  }
+
+  @Test
+  public void testCreateDuplicateContainer() throws Exception {
+    String containerName = OzoneUtils.getRequestID();
+
+    ContainerData data = new ContainerData(containerName);
+    data.addMetadata("VOLUME", "shire");
+    data.addMetadata("owner)", "bilbo");
+    containerManager.createContainer(createSingleNodePipeline(), data);
+    try {
+      containerManager.createContainer(createSingleNodePipeline(), data);
+      fail("Expected Exception not thrown.");
+    } catch (IOException ex) {
+      Assert.assertNotNull(ex);
+    }
+  }
+
+  @Test
+  public void testDeleteContainer() throws Exception {
+    String containerName1 = OzoneUtils.getRequestID();
+    String containerName2 = OzoneUtils.getRequestID();
+
+
+    ContainerData data = new ContainerData(containerName1);
+    data.addMetadata("VOLUME", "shire");
+    data.addMetadata("owner)", "bilbo");
+    containerManager.createContainer(createSingleNodePipeline(), data);
+
+    data = new ContainerData(containerName2);
+    data.addMetadata("VOLUME", "shire");
+    data.addMetadata("owner)", "bilbo");
+    containerManager.createContainer(createSingleNodePipeline(), data);
+
+
+    Assert.assertTrue(containerManager.getContainerMap()
+        .containsKey(containerName1));
+    Assert.assertTrue(containerManager.getContainerMap()
+        .containsKey(containerName2));
+
+    containerManager.deleteContainer(createSingleNodePipeline(),
+        containerName1);
+    Assert.assertFalse(containerManager.getContainerMap()
+        .containsKey(containerName1));
+
+    // Let us make sure that we are able to re-use a container name after
+    // delete.
+
+    data = new ContainerData(containerName1);
+    data.addMetadata("VOLUME", "shire");
+    data.addMetadata("owner)", "bilbo");
+    containerManager.createContainer(createSingleNodePipeline(), data);
+
+    // Assert we still have both containers.
+    Assert.assertTrue(containerManager.getContainerMap()
+        .containsKey(containerName1));
+    Assert.assertTrue(containerManager.getContainerMap()
+        .containsKey(containerName2));
+
+  }
+
+  /**
+   * This test creates 1000 containers and reads them back 5 containers at a
+   * time and verifies that we did get back all containers.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testListContainer() throws IOException {
+    final int count = 1000;
+    final int step = 5;
+
+    Map<String, ContainerData> testMap = new HashMap<>();
+    for (int x = 0; x < count; x++) {
+      String containerName = OzoneUtils.getRequestID();
+
+      ContainerData data = new ContainerData(containerName);
+      data.addMetadata("VOLUME", "shire");
+      data.addMetadata("owner)", "bilbo");
+      containerManager.createContainer(createSingleNodePipeline(), data);
+      testMap.put(containerName, data);
+    }
+
+    int counter = 0;
+    String prevKey = "";
+    List<ContainerData> results = new LinkedList<>();
+    while (counter < count) {
+      containerManager.listContainer(prevKey, step, results);
+      for (int y = 0; y < results.size(); y++) {
+        testMap.remove(results.get(y).getContainerName());
+      }
+      counter += step;
+      String nextKey = results.get(results.size() - 1).getContainerName();
+
+      //Assert that container is returning results in a sorted fashion.
+      Assert.assertTrue(prevKey.compareTo(nextKey) < 0);
+      prevKey = nextKey;
+      results.clear();
+    }
+    // Assert that we listed all the keys that we had put into
+    // container.
+    Assert.assertTrue(testMap.isEmpty());
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/643c5e5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
new file mode 100644
index 0000000..5beb8b9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.ozoneimpl;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConfiguration;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
+import org.apache.hadoop.ozone.container.common.transport.client.XceiverClient;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URL;
+
+
+public class TestOzoneContainer {
+  @Test
+  public void testCreateOzoneContainer() throws Exception {
+
+    Configuration conf = new OzoneConfiguration();
+    URL p = conf.getClass().getResource("");
+    String  path = p.getPath().concat(
+        TestOzoneContainer.class.getSimpleName());
+    path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
+        OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
+    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
+
+    // We don't start Ozone Container via data node, we will do it
+    // independently in our test path.
+    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, false);
+    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster.waitActive();
+
+
+    Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
+    conf.setInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT,
+        pipeline.getLeader().getContainerPort());
+    OzoneContainer container = new OzoneContainer(conf, cluster.getDataNodes
+        ().get(0).getFSDataset());
+    container.start();
+
+    XceiverClient client = new XceiverClient(pipeline, conf);
+    client.connect();
+    ContainerProtos.ContainerCommandRequestProto request =
+        ContainerTestHelper.getCreateContainerRequest();
+    ContainerProtos.ContainerCommandResponseProto response =
+        client.sendCommand(request);
+    Assert.assertNotNull(response);
+    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+    container.stop();
+    cluster.shutdown();
+
+  }
+
+
+  @Test
+  public void testOzoneContainerViaDataNode() throws Exception {
+
+    Configuration conf = new OzoneConfiguration();
+    URL p = conf.getClass().getResource("");
+    String  path = p.getPath().concat(
+        TestOzoneContainer.class.getSimpleName());
+    path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
+        OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
+    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
+
+    // Start ozone container Via Datanode create.
+    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
+    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
+
+    Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
+    conf.setInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT,
+        pipeline.getLeader().getContainerPort());
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster.waitActive();
+
+    // This client talks to ozone container via datanode.
+    XceiverClient client = new XceiverClient(pipeline, conf);
+    client.connect();
+    ContainerProtos.ContainerCommandRequestProto request =
+        ContainerTestHelper.getCreateContainerRequest();
+    ContainerProtos.ContainerCommandResponseProto response =
+        client.sendCommand(request);
+    Assert.assertNotNull(response);
+    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+    cluster.shutdown();
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/643c5e5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
index acf1d93..7599846 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
@@ -22,7 +22,7 @@ package org.apache.hadoop.ozone.web;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ozone.web.request.OzoneAcl;
 import org.apache.hadoop.ozone.web.response.BucketInfo;
-import org.apache.hadoop.ozone.web.utils.OzoneConsts;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.Test;
 
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/643c5e5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
index 877a33d..fd0ed36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfiguration;
 import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
 import org.apache.hadoop.ozone.web.headers.Header;
-import org.apache.hadoop.ozone.web.utils.OzoneConsts;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.http.HttpResponse;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/643c5e5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
index 857ef34..1c2ae75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
@@ -29,13 +29,12 @@ import java.util.Date;
 import java.util.Locale;
 import javax.ws.rs.core.HttpHeaders;
 
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfiguration;
 import org.apache.hadoop.ozone.web.headers.Header;
-import org.apache.hadoop.ozone.web.utils.OzoneConsts;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.util.Time;
 
 import org.apache.http.HttpResponse;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/643c5e5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
index 45b8795..3f50e91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
@@ -24,12 +24,11 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfiguration;
 import org.apache.hadoop.ozone.web.exceptions.OzoneException;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.ozone.web.utils.OzoneConsts;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.junit.AfterClass;
-import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 


Mime
View raw message