From common-commits-return-78569-archive-asf-public=cust-asf.ponee.io@hadoop.apache.org Sun Feb 11 17:00:31 2018 Return-Path: X-Original-To: archive-asf-public@eu.ponee.io Delivered-To: archive-asf-public@eu.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by mx-eu-01.ponee.io (Postfix) with ESMTP id B4B3418064E for ; Sun, 11 Feb 2018 17:00:31 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id A4FED160C4E; Sun, 11 Feb 2018 16:00:31 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 2566D160C36 for ; Sun, 11 Feb 2018 17:00:29 +0100 (CET) Received: (qmail 34838 invoked by uid 500); 11 Feb 2018 16:00:29 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 34829 invoked by uid 99); 11 Feb 2018 16:00:29 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 11 Feb 2018 16:00:29 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 19E89E01EC; Sun, 11 Feb 2018 16:00:27 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: msingh@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-13022. Block Storage: Kubernetes dynamic persistent volume provisioner. Contributed by Elek, Marton. Date: Sun, 11 Feb 2018 16:00:27 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/HDFS-7240 ea5c79285 -> eb5e66a1c HDFS-13022. Block Storage: Kubernetes dynamic persistent volume provisioner. Contributed by Elek, Marton. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb5e66a1 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb5e66a1 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb5e66a1 Branch: refs/heads/HDFS-7240 Commit: eb5e66a1c46f3b542b22ee1e2046d1c728abc479 Parents: ea5c792 Author: Mukul Kumar Singh Authored: Sun Feb 11 21:29:29 2018 +0530 Committer: Mukul Kumar Singh Committed: Sun Feb 11 21:29:29 2018 +0530 ---------------------------------------------------------------------- .../apache/hadoop/conf/OzonePropertyTag.java | 3 +- .../apache/hadoop/cblock/CBlockConfigKeys.java | 20 ++ hadoop-hdfs-project/hadoop-hdfs/pom.xml | 15 + .../org/apache/hadoop/cblock/CBlockManager.java | 26 +- .../org/apache/hadoop/cblock/cli/CBlockCli.java | 11 +- .../cblock/kubernetes/DynamicProvisioner.java | 330 +++++++++++++++++++ .../hadoop/cblock/kubernetes/package-info.java | 23 ++ .../hadoop/cblock/storage/StorageManager.java | 2 + .../src/main/resources/ozone-default.xml | 45 +++ .../kubernetes/TestDynamicProvisioner.java | 73 ++++ .../dynamicprovisioner/expected1-pv.json | 37 +++ .../dynamicprovisioner/input1-pvc.json | 38 +++ hadoop-minicluster/pom.xml | 6 + 13 files changed, 624 insertions(+), 5 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/OzonePropertyTag.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/OzonePropertyTag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/OzonePropertyTag.java index c1e6cbd..e725ac8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/OzonePropertyTag.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/OzonePropertyTag.java @@ -44,5 +44,6 @@ public enum OzonePropertyTag implements PropertyTag { REST, STORAGE, PIPELINE, - STANDALONE + STANDALONE, + KUBERNETES } http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java index fb8f17e..fde728c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java @@ -195,6 +195,26 @@ public final class CBlockConfigKeys { public static final int DFS_CBLOCK_ISCSI_ADVERTISED_PORT_DEFAULT = 3260; + + public static final String + DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED + = "dfs.cblock.kubernetes.dynamic-provisioner.enabled"; + + public static final boolean + DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED_DEFAULT = false; + + public static final String + DFS_CBLOCK_KUBERNETES_CBLOCK_USER = + "dfs.cblock.kubernetes.cblock-user"; + + public static final String + DFS_CBLOCK_KUBERNETES_CBLOCK_USER_DEFAULT = + "iqn.2001-04.org.apache.hadoop"; + + public static final String + DFS_CBLOCK_KUBERNETES_CONFIG_FILE_KEY = + "dfs.cblock.kubernetes.configfile"; + private CBlockConfigKeys() { } http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs/pom.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index bdaa789..afede4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -231,6 +231,21 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> 3.8.7 + io.kubernetes + client-java + 1.0.0-beta1 + + + io.swagger + swagger-annotations + + + com.github.stefanbirkner + system-rules + + + + org.apache.curator curator-test test http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java index 2bfbd89..62e89f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.cblock; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingService; +import org.apache.hadoop.cblock.kubernetes.DynamicProvisioner; import org.apache.hadoop.cblock.meta.VolumeDescriptor; import org.apache.hadoop.cblock.meta.VolumeInfo; import org.apache.hadoop.cblock.proto.CBlockClientProtocol; @@ -62,7 +63,6 @@ import java.nio.charset.Charset; import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.UUID; import static org.apache.hadoop.cblock.CBlockConfigKeys .DFS_CBLOCK_CONTAINER_SIZE_GB_DEFAULT; @@ -92,6 +92,11 @@ import static org.apache.hadoop.cblock.CBlockConfigKeys .DFS_CBLOCK_SERVICE_LEVELDB_PATH_DEFAULT; import static org.apache.hadoop.cblock.CBlockConfigKeys .DFS_CBLOCK_SERVICE_LEVELDB_PATH_KEY; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED_DEFAULT; + /** * The main entry point of CBlock operations, ALL the CBlock operations @@ -119,6 +124,8 @@ public class CBlockManager implements CBlockServiceProtocol, private final LevelDBStore levelDBStore; private final String dbPath; + private final DynamicProvisioner kubernetesDynamicProvisioner; + private Charset encoding = Charset.forName("UTF-8"); public CBlockManager(OzoneConfiguration conf, @@ -179,17 +186,34 @@ public class CBlockManager implements CBlockServiceProtocol, DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY, serverRpcAddr, cblockServer); LOG.info("CBlock server listening for client commands on: {}", cblockServerRpcAddress); + + if (conf.getBoolean(DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED, + DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED_DEFAULT)) { + + kubernetesDynamicProvisioner = + new DynamicProvisioner(conf, storageManager); + kubernetesDynamicProvisioner.init(); + + } else { + kubernetesDynamicProvisioner = null; + } } public void start() { cblockService.start(); cblockServer.start(); + if (kubernetesDynamicProvisioner != null) { + kubernetesDynamicProvisioner.start(); + } LOG.info("CBlock manager started!"); } public void stop() { cblockService.stop(); cblockServer.stop(); + if (kubernetesDynamicProvisioner != null) { + kubernetesDynamicProvisioner.stop(); + } } public void join() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java index 03f80cd..af0c1db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java @@ -208,7 +208,7 @@ public class CBlockCli extends Configured implements Tool { System.exit(res); } - private long parseSize(String volumeSizeArgs) throws IOException { + public static long parseSize(String volumeSizeArgs) throws IOException { long multiplier = 1; Pattern p = Pattern.compile("([0-9]+)([a-zA-Z]+)"); @@ -221,9 +221,14 @@ public class CBlockCli extends Configured implements Tool { int size = Integer.parseInt(m.group(1)); String s = m.group(2); - if (s.equalsIgnoreCase("GB")) { + if (s.equalsIgnoreCase("MB") || + s.equalsIgnoreCase("Mi")) { + multiplier = 1024L * 1024; + } else if (s.equalsIgnoreCase("GB") || + s.equalsIgnoreCase("Gi")) { multiplier = 1024L * 1024 * 1024; - } else if (s.equalsIgnoreCase("TB")) { + } else if (s.equalsIgnoreCase("TB") || + s.equalsIgnoreCase("Ti")) { multiplier = 1024L * 1024 * 1024 * 1024; } else { throw new IOException("Invalid volume size args " + volumeSizeArgs); http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java new file mode 100644 index 0000000..93ed005 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java @@ -0,0 +1,330 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cblock.kubernetes; + +import com.google.gson.reflect.TypeToken; +import com.squareup.okhttp.RequestBody; +import io.kubernetes.client.ApiClient; +import io.kubernetes.client.ApiException; +import io.kubernetes.client.Configuration; +import io.kubernetes.client.apis.CoreV1Api; +import io.kubernetes.client.models.V1ISCSIVolumeSource; +import io.kubernetes.client.models.V1ObjectMeta; +import io.kubernetes.client.models.V1ObjectReference; +import io.kubernetes.client.models.V1PersistentVolume; +import io.kubernetes.client.models.V1PersistentVolumeClaim; +import io.kubernetes.client.models.V1PersistentVolumeSpec; +import io.kubernetes.client.util.Config; +import io.kubernetes.client.util.Watch; +import okio.Buffer; +import org.apache.hadoop.cblock.cli.CBlockCli; +import org.apache.hadoop.cblock.exception.CBlockException; +import org.apache.hadoop.cblock.proto.MountVolumeResponse; +import org.apache.hadoop.cblock.storage.StorageManager; +import org.apache.hadoop.conf.OzoneConfiguration; +import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_ISCSI_ADVERTISED_IP; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_ISCSI_ADVERTISED_PORT; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_ISCSI_ADVERTISED_PORT_DEFAULT; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_KUBERNETES_CBLOCK_USER; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_KUBERNETES_CBLOCK_USER_DEFAULT; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_KUBERNETES_CONFIG_FILE_KEY; + +/** + * Kubernetes Dynamic Persistent Volume provisioner. + * + * Listens on the kubernetes feed and creates the appropriate cblock AND + * kubernetes PersistentVolume according to the created PersistentVolumeClaims. + */ +public class DynamicProvisioner implements Runnable{ + + protected static final Logger LOGGER = + LoggerFactory.getLogger(DynamicProvisioner.class); + + private static final String STORAGE_CLASS = "cblock"; + + private static final String PROVISIONER_ID = "hadoop.apache.org/cblock"; + private static final String KUBERNETES_PROVISIONER_KEY = + "volume.beta.kubernetes.io/storage-provisioner"; + private static final String KUBERNETES_BIND_COMPLETED_KEY = + "pv.kubernetes.io/bind-completed"; + + private boolean running = true; + + private final StorageManager storageManager; + + private String kubernetesConfigFile; + + private String externalIp; + + private int externalPort; + + private String cblockUser; + + private CoreV1Api api; + + private ApiClient client; + + private Thread watcherThread; + + public DynamicProvisioner(OzoneConfiguration ozoneConf, + StorageManager storageManager) throws IOException { + this.storageManager = storageManager; + + kubernetesConfigFile = ozoneConf + .getTrimmed(DFS_CBLOCK_KUBERNETES_CONFIG_FILE_KEY); + + String jscsiServerAddress = ozoneConf + .get(DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY, + DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT); + + externalIp = ozoneConf. + getTrimmed(DFS_CBLOCK_ISCSI_ADVERTISED_IP, jscsiServerAddress); + + externalPort = ozoneConf. + getInt(DFS_CBLOCK_ISCSI_ADVERTISED_PORT, + DFS_CBLOCK_ISCSI_ADVERTISED_PORT_DEFAULT); + + cblockUser = ozoneConf.getTrimmed(DFS_CBLOCK_KUBERNETES_CBLOCK_USER, + DFS_CBLOCK_KUBERNETES_CBLOCK_USER_DEFAULT); + + + } + + public void init() throws IOException { + if (kubernetesConfigFile != null) { + client = Config.fromConfig(kubernetesConfigFile); + } else { + client = Config.fromCluster(); + } + client.getHttpClient().setReadTimeout(60, TimeUnit.SECONDS); + Configuration.setDefaultApiClient(client); + api = new CoreV1Api(); + + watcherThread = new Thread(this); + watcherThread.setName("DynamicProvisioner"); + watcherThread.setDaemon(true); + } + + @Override + public void run() { + LOGGER.info("Starting kubernetes dynamic provisioner."); + while (running) { + String resourceVersion = null; + try { + + Watch watch = Watch.createWatch(client, + api.listPersistentVolumeClaimForAllNamespacesCall(null, + null, + false, + null, + null, + null, + resourceVersion, + null, + true, + null, + null), + new TypeToken>() { + }.getType()); + + + //check the new pvc resources, and create cblock + pv if needed + for (Watch.Response item : watch) { + V1PersistentVolumeClaim claim = item.object; + + if (isPvMissingForPvc(claim)) { + + LOGGER.info("Provisioning volumes for PVC {}/{}", + claim.getMetadata().getNamespace(), + claim.getMetadata().getName()); + + if (LOGGER.isDebugEnabled()) { + RequestBody request = + api.getApiClient().serialize(claim, "application/json"); + + final Buffer buffer = new Buffer(); + request.writeTo(buffer); + LOGGER.debug("New PVC is detected: " + buffer.readUtf8()); + } + + String volumeName = createVolumeName(claim); + + long size = CBlockCli.parseSize( + claim.getSpec().getResources().getRequests().get("storage")); + + createCBlock(volumeName, size); + createPersistentVolumeFromPVC(item.object, volumeName); + } + } + } catch (Exception ex) { + if (ex.getCause() != null && ex + .getCause() instanceof SocketTimeoutException) { + //This is normal. We are connection to the kubernetes server and the + //connection should be reopened time to time... + LOGGER.debug("Time exception occured", ex); + } else { + LOGGER.error("Error on provisioning persistent volumes.", ex); + try { + //we can try again in the main loop + Thread.sleep(1000); + } catch (InterruptedException e) { + LOGGER.error("Error on sleeping after an error.", e); + } + } + } + } + } + + private boolean isPvMissingForPvc(V1PersistentVolumeClaim claim) { + + Map annotations = claim.getMetadata().getAnnotations(); + + return claim.getStatus().getPhase().equals("Pending") && STORAGE_CLASS + .equals(claim.getSpec().getStorageClassName()) && PROVISIONER_ID + .equals(annotations.get(KUBERNETES_PROVISIONER_KEY)) && !"yes" + .equals(annotations.get(KUBERNETES_BIND_COMPLETED_KEY)); + } + + @VisibleForTesting + protected String createVolumeName(V1PersistentVolumeClaim claim) { + return claim.getMetadata().getName() + "-" + claim.getMetadata() + .getUid(); + } + + public void stop() { + running = false; + try { + watcherThread.join(60000); + } catch (InterruptedException e) { + LOGGER.error("Kubernetes watcher thread can't stopped gracefully.", e); + } + } + + private void createCBlock(String volumeName, long size) + throws CBlockException { + + MountVolumeResponse mountVolumeResponse = + storageManager.isVolumeValid(cblockUser, volumeName); + if (!mountVolumeResponse.getIsValid()) { + storageManager + .createVolume(cblockUser, volumeName, size, 4 * 1024); + } + } + + private void createPersistentVolumeFromPVC(V1PersistentVolumeClaim claim, + String volumeName) throws ApiException, IOException { + + V1PersistentVolume v1PersistentVolume = + persitenceVolumeBuilder(claim, volumeName); + + if (LOGGER.isDebugEnabled()) { + RequestBody request = + api.getApiClient().serialize(v1PersistentVolume, "application/json"); + + final Buffer buffer = new Buffer(); + request.writeTo(buffer); + LOGGER.debug("Creating new PV: " + buffer.readUtf8()); + } + api.createPersistentVolume(v1PersistentVolume, null); + } + + protected V1PersistentVolume persitenceVolumeBuilder( + V1PersistentVolumeClaim claim, + String volumeName) { + + V1PersistentVolume v1PersistentVolume = new V1PersistentVolume(); + v1PersistentVolume.setKind("PersistentVolume"); + v1PersistentVolume.setApiVersion("v1"); + + V1ObjectMeta metadata = new V1ObjectMeta(); + metadata.setName(volumeName); + metadata.setNamespace(claim.getMetadata().getNamespace()); + metadata.setAnnotations(new HashMap<>()); + + metadata.getAnnotations() + .put("pv.kubernetes.io/provisioned-by", PROVISIONER_ID); + + metadata.getAnnotations() + .put("volume.beta.kubernetes.io/storage-class", STORAGE_CLASS); + + v1PersistentVolume.setMetadata(metadata); + + V1PersistentVolumeSpec spec = new V1PersistentVolumeSpec(); + + spec.setCapacity(new HashMap<>()); + spec.getCapacity().put("storage", + claim.getSpec().getResources().getRequests().get("storage")); + + spec.setAccessModes(new ArrayList<>()); + spec.getAccessModes().add("ReadWriteOnce"); + + V1ObjectReference claimRef = new V1ObjectReference(); + claimRef.setName(claim.getMetadata().getName()); + claimRef.setNamespace(claim.getMetadata().getNamespace()); + claimRef.setKind(claim.getKind()); + claimRef.setApiVersion(claim.getApiVersion()); + claimRef.setUid(claim.getMetadata().getUid()); + spec.setClaimRef(claimRef); + + spec.persistentVolumeReclaimPolicy("Delete"); + + V1ISCSIVolumeSource iscsi = new V1ISCSIVolumeSource(); + iscsi.setIqn(cblockUser + ":" + volumeName); + iscsi.setLun(0); + iscsi.setFsType("ext4"); + String portal = externalIp + ":" + externalPort; + iscsi.setTargetPortal(portal); + iscsi.setPortals(new ArrayList<>()); + iscsi.getPortals().add(portal); + + spec.iscsi(iscsi); + v1PersistentVolume.setSpec(spec); + return v1PersistentVolume; + } + + + @VisibleForTesting + protected CoreV1Api getApi() { + return api; + } + + public void start() { + watcherThread.start(); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/kubernetes/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/kubernetes/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/kubernetes/package-info.java new file mode 100644 index 0000000..3ec5aab --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/kubernetes/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains helper classes to run hadoop cluster in kubernetes + * environment. + */ +package org.apache.hadoop.cblock.kubernetes; http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java index 65b9b49..865f3b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java @@ -204,6 +204,8 @@ public class StorageManager { LOGGER.error("Error creating container Container:{}:" + " index:{} error:{}", container.getContainerID(), containerIdx, e); + } else { + LOGGER.error("Error creating container.", e); } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml index db21c12..4c7c723 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/ozone-default.xml @@ -294,6 +294,51 @@ TCP port returned during the iscsi discovery. + + + dfs.cblock.kubernetes.dynamic-provisioner.enabled + false + CBLOCK, KUBERNETES + Flag to enable automatic creation of cblocks and + kubernetes PersitentVolumes in kubernetes environment. + + + + dfs.cblock.kubernetes.cblock-user + iqn.2001-04.org.apache.hadoop + CBLOCK, KUBERNETES + CBlock user to use for the dynamic provisioner. + This user will own all of the auto-created cblocks. + + + + dfs.cblock.kubernetes.configfile + + CBLOCK, KUBERNETES + Location of the kubernetes configuration file + to access the kubernetes cluster. Not required inside a pod + as the default service account will be if this value is + empty. + + + + dfs.cblock.iscsi.advertised.ip + + CBLOCK, KUBERNETES + IP where the cblock target server is available + from the kubernetes nodes. Usually it's a cluster ip address + which is defined by a deployed Service. + + + + dfs.cblock.iscsi.advertised.port + 3260 + CBLOCK, KUBERNETES + Port where the cblock target server is available + from the kubernetes nodes. Could be different from the + listening port if jscsi is behind a Service. + + ozone.container.cache.size http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java new file mode 100644 index 0000000..02c23ff --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cblock.kubernetes; + +import io.kubernetes.client.JSON; +import io.kubernetes.client.models.V1PersistentVolume; +import io.kubernetes.client.models.V1PersistentVolumeClaim; +import static org.apache.hadoop.cblock.CBlockConfigKeys + .DFS_CBLOCK_ISCSI_ADVERTISED_IP; +import org.apache.hadoop.conf.OzoneConfiguration; +import org.junit.Assert; +import org.junit.Test; + +import java.nio.file.Files; +import java.nio.file.Paths; + +/** + * Test the resource generation of Dynamic Provisioner. + */ +public class TestDynamicProvisioner { + + @Test + public void persitenceVolumeBuilder() throws Exception { + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setStrings(DFS_CBLOCK_ISCSI_ADVERTISED_IP, "1.2.3.4"); + + DynamicProvisioner provisioner = + new DynamicProvisioner(conf, null); + + String pvc = new String(Files.readAllBytes( + Paths.get(getClass().getResource( + "/dynamicprovisioner/input1-pvc.json").toURI()))); + + String pv = new String(Files.readAllBytes( + Paths.get(getClass().getResource( + "/dynamicprovisioner/expected1-pv.json").toURI()))); + + JSON json = new io.kubernetes.client.JSON(); + + V1PersistentVolumeClaim claim = + json.getGson().fromJson(pvc, V1PersistentVolumeClaim.class); + + String volumeName = provisioner.createVolumeName(claim); + + V1PersistentVolume volume = + provisioner.persitenceVolumeBuilder(claim, volumeName); + + //remove the data which should not been compared + V1PersistentVolume expectedVolume = + json.getGson().fromJson(pv, V1PersistentVolume.class); + + + Assert.assertEquals(expectedVolume, volume); + } + +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dynamicprovisioner/expected1-pv.json ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dynamicprovisioner/expected1-pv.json b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dynamicprovisioner/expected1-pv.json new file mode 100644 index 0000000..2f8a193 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dynamicprovisioner/expected1-pv.json @@ -0,0 +1,37 @@ +{ + "apiVersion": "v1", + "kind": "PersistentVolume", + "metadata": { + "annotations": { + "volume.beta.kubernetes.io/storage-class": "cblock", + "pv.kubernetes.io/provisioned-by": "hadoop.apache.org/cblock" + }, + "name": "volume1-b65d053d-f92e-11e7-be3b-84b261c34638", + "namespace": "ns" + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "capacity": { + "storage": "1Gi" + }, + "claimRef": { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "name": "volume1", + "namespace": "ns", + "uid": "b65d053d-f92e-11e7-be3b-84b261c34638" + }, + "iscsi": { + "fsType": "ext4", + "iqn": "iqn.2001-04.org.apache.hadoop:volume1-b65d053d-f92e-11e7-be3b-84b261c34638", + "lun": 0, + "portals": [ + "1.2.3.4:3260" + ], + "targetPortal": "1.2.3.4:3260" + }, + "persistentVolumeReclaimPolicy": "Delete" + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dynamicprovisioner/input1-pvc.json ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dynamicprovisioner/input1-pvc.json b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dynamicprovisioner/input1-pvc.json new file mode 100644 index 0000000..e40ba71 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dynamicprovisioner/input1-pvc.json @@ -0,0 +1,38 @@ +{ + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "annotations": { + "pv.kubernetes.io/bind-completed": "yes", + "pv.kubernetes.io/bound-by-controller": "yes", + "volume.beta.kubernetes.io/storage-provisioner": "hadoop.apache.org/cblock" + }, + "creationTimestamp": "2018-01-14T13:27:48Z", + "name": "volume1", + "namespace": "ns", + "resourceVersion": "5532691", + "selfLink": "/api/v1/namespaces/demo1/persistentvolumeclaims/persistent", + "uid": "b65d053d-f92e-11e7-be3b-84b261c34638" + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "1Gi" + } + }, + "storageClassName": "cblock", + "volumeName": "persistent-b65d053d-f92e-11e7-be3b-84b261c34638" + }, + "status": { + "accessModes": [ + "ReadWriteOnce" + ], + "capacity": { + "storage": "1Gi" + }, + "phase": "Bound" + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5e66a1/hadoop-minicluster/pom.xml ---------------------------------------------------------------------- diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml index 4558e99..5fe2c49 100644 --- a/hadoop-minicluster/pom.xml +++ b/hadoop-minicluster/pom.xml @@ -67,6 +67,12 @@ org.apache.hadoop hadoop-hdfs compile + + + io.kubernetes + client-java + + --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org