From common-commits-return-85223-archive-asf-public=cust-asf.ponee.io@hadoop.apache.org Mon Jul 9 20:25:59 2018 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id 22C1718077A for ; Mon, 9 Jul 2018 20:25:58 +0200 (CEST) Received: (qmail 20286 invoked by uid 500); 9 Jul 2018 18:25:54 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 20015 invoked by uid 99); 9 Jul 2018 18:25:54 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 09 Jul 2018 18:25:54 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id BBAD8E10F6; Mon, 9 Jul 2018 18:25:53 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: virajith@apache.org To: common-commits@hadoop.apache.org Date: Mon, 09 Jul 2018 18:26:02 -0000 Message-Id: <0103381ac4624492b29ec0fed723ef7b@git.apache.org> In-Reply-To: <2566e4db169f4d678fd99ea25429929b@git.apache.org> References: <2566e4db169f4d678fd99ea25429929b@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [10/50] [abbrv] hadoop git commit: HDDS-175. Refactor ContainerInfo to remove Pipeline object from it. Contributed by Ajay Kumar. http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java index d6f5d32..a9781b1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -79,14 +79,16 @@ public class TestXceiverClientMetrics { OzoneConfiguration conf = new OzoneConfiguration(); XceiverClientManager clientManager = new XceiverClientManager(conf); - ContainerInfo container = storageContainerLocationClient + ContainerWithPipeline container = storageContainerLocationClient .allocateContainer(clientManager.getType(), clientManager.getFactor(), containerOwner); - XceiverClientSpi client = clientManager.acquireClient( - container.getPipeline(), container.getContainerID()); + XceiverClientSpi client = clientManager + .acquireClient(container.getPipeline(), + container.getContainerInfo().getContainerID()); ContainerCommandRequestProto request = ContainerTestHelper - .getCreateContainerRequest(container.getContainerID(), + .getCreateContainerRequest( + container.getContainerInfo().getContainerID(), container.getPipeline()); client.sendCommand(request); @@ -112,7 +114,7 @@ public class TestXceiverClientMetrics { // use async interface for testing pending metrics for (int i = 0; i < numRequest; i++) { BlockID blockID = ContainerTestHelper. - getTestBlockID(container.getContainerID()); + getTestBlockID(container.getContainerInfo().getContainerID()); ContainerProtos.ContainerCommandRequestProto smallFileRequest; smallFileRequest = ContainerTestHelper.getWriteSmallFileRequest( http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java index 375450c..c344bbe 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java @@ -60,7 +60,9 @@ public class BenchMarkContainerStateMap { try { ContainerInfo containerInfo = new ContainerInfo.Builder() .setState(CLOSED) - .setPipeline(pipeline) + .setPipelineName(pipeline.getPipelineName()) + .setReplicationType(pipeline.getType()) + .setReplicationFactor(pipeline.getFactor()) // This is bytes allocated for blocks inside container, not the // container size .setAllocatedBytes(0) @@ -81,7 +83,9 @@ public class BenchMarkContainerStateMap { try { ContainerInfo containerInfo = new ContainerInfo.Builder() .setState(OPEN) - .setPipeline(pipeline) + .setPipelineName(pipeline.getPipelineName()) + .setReplicationType(pipeline.getType()) + .setReplicationFactor(pipeline.getFactor()) // This is bytes allocated for blocks inside container, not the // container size .setAllocatedBytes(0) @@ -101,7 +105,9 @@ public class BenchMarkContainerStateMap { try { ContainerInfo containerInfo = new ContainerInfo.Builder() .setState(OPEN) - .setPipeline(pipeline) + .setPipelineName(pipeline.getPipelineName()) + .setReplicationType(pipeline.getType()) + .setReplicationFactor(pipeline.getFactor()) // This is bytes allocated for blocks inside container, not the // container size .setAllocatedBytes(0) @@ -166,7 +172,9 @@ public class BenchMarkContainerStateMap { int cid = state.containerID.incrementAndGet(); ContainerInfo containerInfo = new ContainerInfo.Builder() .setState(CLOSED) - .setPipeline(pipeline) + .setPipelineName(pipeline.getPipelineName()) + .setReplicationType(pipeline.getType()) + .setReplicationFactor(pipeline.getFactor()) // This is bytes allocated for blocks inside container, not the // container size .setAllocatedBytes(0) http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java ---------------------------------------------------------------------- diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index edc0d7b..26776c5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.scm.cli; import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; +import com.google.protobuf.ByteString; import org.apache.commons.cli.BasicParser; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; @@ -37,7 +38,6 @@ import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyI import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -86,12 +86,12 @@ public class SQLCLI extends Configured implements Tool { private static final String CREATE_CONTAINER_INFO = "CREATE TABLE containerInfo (" + "containerID LONG PRIMARY KEY NOT NULL, " + - "leaderUUID TEXT NOT NULL)"; - private static final String CREATE_CONTAINER_MEMBERS = - "CREATE TABLE containerMembers (" + - "containerName TEXT NOT NULL, " + - "datanodeUUID TEXT NOT NULL," + - "PRIMARY KEY(containerName, datanodeUUID));"; + "replicationType TEXT NOT NULL," + + "replicationFactor TEXT NOT NULL," + + "usedBytes LONG NOT NULL," + + "allocatedBytes LONG NOT NULL," + + "owner TEXT," + + "numberOfKeys LONG)"; private static final String CREATE_DATANODE_INFO = "CREATE TABLE datanodeInfo (" + "hostName TEXT NOT NULL, " + @@ -99,8 +99,10 @@ public class SQLCLI extends Configured implements Tool { "ipAddress TEXT, " + "containerPort INTEGER NOT NULL);"; private static final String INSERT_CONTAINER_INFO = - "INSERT INTO containerInfo (containerID, leaderUUID) " + - "VALUES (\"%d\", \"%s\")"; + "INSERT INTO containerInfo (containerID, replicationType, " + + "replicationFactor, usedBytes, allocatedBytes, owner, " + + "numberOfKeys) VALUES (\"%d\", \"%s\", \"%s\", \"%d\", \"%d\", " + + "\"%s\", \"%d\")"; private static final String INSERT_DATANODE_INFO = "INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " + "containerPort) " + @@ -469,10 +471,7 @@ public class SQLCLI extends Configured implements Tool { .setConf(conf).setDbFile(dbFile).build(); Connection conn = connectDB(outPath.toString())) { executeSQL(conn, CREATE_CONTAINER_INFO); - executeSQL(conn, CREATE_CONTAINER_MEMBERS); - executeSQL(conn, CREATE_DATANODE_INFO); - HashSet uuidChecked = new HashSet<>(); dbStore.iterate(null, (key, value) -> { long containerID = Longs.fromByteArray(key); ContainerInfo containerInfo = null; @@ -481,8 +480,7 @@ public class SQLCLI extends Configured implements Tool { Preconditions.checkNotNull(containerInfo); try { //TODO: include container state to sqllite schema - insertContainerDB(conn, containerID, - containerInfo.getPipeline().getProtobufMessage(), uuidChecked); + insertContainerDB(conn, containerInfo, containerID); return true; } catch (SQLException e) { throw new IOException(e); @@ -494,38 +492,23 @@ public class SQLCLI extends Configured implements Tool { /** * Insert into the sqlite DB of container.db. * @param conn the connection to the sqlite DB. - * @param containerID the id of the container. - * @param pipeline the actual container pipeline object. - * @param uuidChecked the uuid that has been already inserted. + * @param containerInfo + * @param containerID * @throws SQLException throws exception. */ - private void insertContainerDB(Connection conn, long containerID, - Pipeline pipeline, Set uuidChecked) throws SQLException { + private void insertContainerDB(Connection conn, ContainerInfo containerInfo, + long containerID) throws SQLException { LOG.info("Insert to sql container db, for container {}", containerID); String insertContainerInfo = String.format( INSERT_CONTAINER_INFO, containerID, - pipeline.getLeaderID()); - executeSQL(conn, insertContainerInfo); + containerInfo.getReplicationType(), + containerInfo.getReplicationFactor(), + containerInfo.getUsedBytes(), + containerInfo.getAllocatedBytes(), + containerInfo.getOwner(), + containerInfo.getNumberOfKeys()); - for (HddsProtos.DatanodeDetailsProto dd : - pipeline.getMembersList()) { - String uuid = dd.getUuid(); - if (!uuidChecked.contains(uuid)) { - // we may also not use this checked set, but catch exception instead - // but this seems a bit cleaner. - String ipAddr = dd.getIpAddress(); - String hostName = dd.getHostName(); - int containerPort = DatanodeDetails.getFromProtoBuf(dd) - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); - String insertMachineInfo = String.format( - INSERT_DATANODE_INFO, hostName, uuid, ipAddr, containerPort); - executeSQL(conn, insertMachineInfo); - uuidChecked.add(uuid); - } - String insertContainerMembers = String.format( - INSERT_CONTAINER_MEMBERS, containerID, uuid); - executeSQL(conn, insertContainerMembers); - } + executeSQL(conn, insertContainerInfo); LOG.info("Insertion completed."); } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org