Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 8E904200C4D for ; Wed, 22 Mar 2017 07:09:57 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 8D15D160B96; Wed, 22 Mar 2017 06:09:57 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 645F2160B9F for ; Wed, 22 Mar 2017 07:09:55 +0100 (CET) Received: (qmail 82284 invoked by uid 500); 22 Mar 2017 06:09:54 -0000 Mailing-List: contact commits-help@bigtop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: bigtop-dev@bigtop.apache.org Delivered-To: mailing list commits@bigtop.apache.org Received: (qmail 81202 invoked by uid 99); 22 Mar 2017 06:09:52 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 22 Mar 2017 06:09:52 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 5CF05E9637; Wed, 22 Mar 2017 06:09:52 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: rvs@apache.org To: commits@bigtop.apache.org Date: Wed, 22 Mar 2017 06:10:01 -0000 Message-Id: In-Reply-To: <69863c16852949b9998cae3af818e0a6@git.apache.org> References: <69863c16852949b9998cae3af818e0a6@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [10/52] bigtop git commit: ODPI-5. Integrate Ambari packaging into Bigtop archived-at: Wed, 22 Mar 2017 06:09:57 -0000 http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py new file mode 100755 index 0000000..fc2c61f --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py @@ -0,0 +1,318 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import os + +from resource_management.libraries.functions import conf_select +from resource_management.libraries.functions import stack_select +from resource_management.libraries.functions import default +from resource_management.libraries.functions import format_jvm_option +from resource_management.libraries.functions import format +from resource_management.libraries.functions.version import format_stack_version, compare_versions +from ambari_commons.os_check import OSCheck +from resource_management.libraries.script.script import Script +from resource_management.libraries.functions import get_kinit_path +from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources +from resource_management.libraries.resources.hdfs_resource import HdfsResource + +config = Script.get_config() + +host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False) + +stack_version_unformatted = config['hostLevelParams']['stack_version'] +stack_version_formatted = format_stack_version(stack_version_unformatted) + +dfs_type = default("/commandParams/dfs_type", "") +hadoop_conf_dir = "/etc/hadoop/conf" + +component_list = default("/localComponents", []) + +hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir'] + +# hadoop default params +mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*" + +hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec") +hadoop_lib_home = stack_select.get_hadoop_dir("lib") +hadoop_bin = stack_select.get_hadoop_dir("sbin") +hadoop_home = '/usr' +create_lib_snappy_symlinks = True + +# HDP 2.2+ params +if Script.is_stack_greater_or_equal("2.2"): + mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*" + hadoop_home = stack_select.get_hadoop_dir("home") + create_lib_snappy_symlinks = False + +current_service = config['serviceName'] + +#security params +security_enabled = config['configurations']['cluster-env']['security_enabled'] + +#users and groups +has_hadoop_env = 'hadoop-env' in config['configurations'] +mapred_user = config['configurations']['mapred-env']['mapred_user'] +hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] +yarn_user = config['configurations']['yarn-env']['yarn_user'] + +user_group = config['configurations']['cluster-env']['user_group'] + +#hosts +hostname = config["hostname"] +ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0] +rm_host = default("/clusterHostInfo/rm_host", []) +slave_hosts = default("/clusterHostInfo/slave_hosts", []) +oozie_servers = default("/clusterHostInfo/oozie_server", []) +hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", []) +hive_server_host = default("/clusterHostInfo/hive_server_host", []) +hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", []) +hs_host = default("/clusterHostInfo/hs_host", []) +jtnode_host = default("/clusterHostInfo/jtnode_host", []) +namenode_host = default("/clusterHostInfo/namenode_host", []) +zk_hosts = default("/clusterHostInfo/zookeeper_hosts", []) +ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", []) +ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", []) + +has_namenode = not len(namenode_host) == 0 +has_resourcemanager = not len(rm_host) == 0 +has_slaves = not len(slave_hosts) == 0 +has_oozie_server = not len(oozie_servers) == 0 +has_hcat_server_host = not len(hcat_server_hosts) == 0 +has_hive_server_host = not len(hive_server_host) == 0 +has_hbase_masters = not len(hbase_master_hosts) == 0 +has_zk_host = not len(zk_hosts) == 0 +has_ganglia_server = not len(ganglia_server_hosts) == 0 +has_metric_collector = not len(ams_collector_hosts) == 0 + +is_namenode_master = hostname in namenode_host +is_jtnode_master = hostname in jtnode_host +is_rmnode_master = hostname in rm_host +is_hsnode_master = hostname in hs_host +is_hbase_master = hostname in hbase_master_hosts +is_slave = hostname in slave_hosts +if has_ganglia_server: + ganglia_server_host = ganglia_server_hosts[0] +if has_metric_collector: + if 'cluster-env' in config['configurations'] and \ + 'metrics_collector_vip_host' in config['configurations']['cluster-env']: + metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host'] + else: + metric_collector_host = ams_collector_hosts[0] + if 'cluster-env' in config['configurations'] and \ + 'metrics_collector_vip_port' in config['configurations']['cluster-env']: + metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port'] + else: + metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188") + if metric_collector_web_address.find(':') != -1: + metric_collector_port = metric_collector_web_address.split(':')[1] + else: + metric_collector_port = '6188' + if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY": + metric_collector_protocol = 'https' + else: + metric_collector_protocol = 'http' + metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "") + metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "") + metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "") + + pass +metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60) +metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10) + +#hadoop params + +if has_namenode or dfs_type == 'HCFS': + hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}") + hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True) + task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties") + +hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix'] +hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] +hbase_tmp_dir = "/tmp/hbase-hbase" +#db params +server_db_name = config['hostLevelParams']['db_name'] +db_driver_filename = config['hostLevelParams']['db_driver_filename'] +oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url'] +mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url'] +ambari_server_resources = config['hostLevelParams']['jdk_location'] +oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar") +mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar") + +ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0] +ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0] +ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0] +ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0] + +if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']: + rca_enabled = config['configurations']['hadoop-env']['rca_enabled'] +else: + rca_enabled = False +rca_disabled_prefix = "###" +if rca_enabled == True: + rca_prefix = "" +else: + rca_prefix = rca_disabled_prefix + +#hadoop-env.sh +java_home = config['hostLevelParams']['java_home'] + +jsvc_path = "/usr/lib/bigtop-utils" + +hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize'] +namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize'] +namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize'] +namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize'] +namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m") +namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m") + +jtnode_opt_newsize = "200m" +jtnode_opt_maxnewsize = "200m" +jtnode_heapsize = "1024m" +ttnode_heapsize = "1024m" + +dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize'] +mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce") +mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce") + +#log4j.properties + +yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn") + +dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None) + +#log4j.properties +if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])): + log4j_props = config['configurations']['hdfs-log4j']['content'] + if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])): + log4j_props += config['configurations']['yarn-log4j']['content'] +else: + log4j_props = None + +refresh_topology = False +command_params = config["commandParams"] if "commandParams" in config else None +if command_params is not None: + refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False + +ambari_libs_dir = "/var/lib/ambari-agent/lib" +is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled'] +default_fs = config['configurations']['core-site']['fs.defaultFS'] + +#host info +all_hosts = default("/clusterHostInfo/all_hosts", []) +all_racks = default("/clusterHostInfo/all_racks", []) +all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", []) +slave_hosts = default("/clusterHostInfo/slave_hosts", []) + +#topology files +net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py" +net_topology_script_dir = os.path.dirname(net_topology_script_file_path) +net_topology_mapping_data_file_name = 'topology_mappings.data' +net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name) + +#Added logic to create /tmp and /user directory for HCFS stack. +has_core_site = 'core-site' in config['configurations'] +hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] +kinit_path_local = get_kinit_path() +stack_version_unformatted = config['hostLevelParams']['stack_version'] +stack_version_formatted = format_stack_version(stack_version_unformatted) +hadoop_bin_dir = stack_select.get_hadoop_dir("bin") +hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None) +hdfs_site = config['configurations']['hdfs-site'] +default_fs = config['configurations']['core-site']['fs.defaultFS'] +smoke_user = config['configurations']['cluster-env']['smokeuser'] +smoke_hdfs_user_dir = format("/user/{smoke_user}") +smoke_hdfs_user_mode = 0770 + + +##### Namenode RPC ports - metrics config section start ##### + +# Figure out the rpc ports for current namenode +nn_rpc_client_port = None +nn_rpc_dn_port = None +nn_rpc_healthcheck_port = None + +namenode_id = None +namenode_rpc = None + +dfs_ha_enabled = False +dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None) +if dfs_ha_nameservices is None: + dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None) +dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None) + +dfs_ha_namemodes_ids_list = [] +other_namenode_id = None + +if dfs_ha_namenode_ids: + dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",") + dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list) + if dfs_ha_namenode_ids_array_len > 1: + dfs_ha_enabled = True + +if dfs_ha_enabled: + for nn_id in dfs_ha_namemodes_ids_list: + nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')] + if hostname in nn_host: + namenode_id = nn_id + namenode_rpc = nn_host + pass + pass +else: + namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None) + +if namenode_rpc: + nn_rpc_client_port = namenode_rpc.split(':')[1].strip() + +if dfs_ha_enabled: + dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None) + dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None) +else: + dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None) + dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None) + +if dfs_service_rpc_address: + nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip() + +if dfs_lifeline_rpc_address: + nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip() + +is_nn_client_port_configured = False if nn_rpc_client_port is None else True +is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True +is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True + +##### end ##### + +import functools +#create partial functions with common arguments for every HdfsResource call +#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code +HdfsResource = functools.partial( + HdfsResource, + user=hdfs_user, + hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore", + security_enabled = security_enabled, + keytab = hdfs_user_keytab, + kinit_path_local = kinit_path_local, + hadoop_bin_dir = hadoop_bin_dir, + hadoop_conf_dir = hadoop_conf_dir, + principal_name = hdfs_principal_name, + hdfs_site = hdfs_site, + default_fs = default_fs, + immutable_paths = get_not_managed_resources(), + dfs_type = dfs_type +) http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py new file mode 100755 index 0000000..548f051 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python + +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from resource_management.core.resources import File +from resource_management.core.source import StaticFile, Template +from resource_management.libraries.functions import format + + +def create_topology_mapping(): + import params + + File(params.net_topology_mapping_data_file_path, + content=Template("topology_mappings.data.j2"), + owner=params.hdfs_user, + group=params.user_group, + only_if=format("test -d {net_topology_script_dir}")) + +def create_topology_script(): + import params + + File(params.net_topology_script_file_path, + content=StaticFile('topology_script.py'), + mode=0755, + only_if=format("test -d {net_topology_script_dir}")) + +def create_topology_script_and_mapping(): + import params + if params.has_hadoop_env: + create_topology_mapping() + create_topology_script() http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py new file mode 100755 index 0000000..ba9c8fb --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py @@ -0,0 +1,175 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import os +from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil + +from resource_management import * + +def setup_hadoop(): + """ + Setup hadoop files and directories + """ + import params + + Execute(("setenforce","0"), + only_if="test -f /selinux/enforce", + not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)", + sudo=True, + ) + + #directories + if params.has_namenode or params.dfs_type == 'HCFS': + Directory(params.hdfs_log_dir_prefix, + create_parents = True, + owner='root', + group=params.user_group, + mode=0775, + cd_access='a', + ) + if params.has_namenode: + Directory(params.hadoop_pid_dir_prefix, + create_parents = True, + owner='root', + group='root', + cd_access='a', + ) + Directory(params.hadoop_tmp_dir, + create_parents = True, + owner=params.hdfs_user, + cd_access='a', + ) + #files + if params.security_enabled: + tc_owner = "root" + else: + tc_owner = params.hdfs_user + + # if WebHDFS is not enabled we need this jar to create hadoop folders. + if params.host_sys_prepped: + print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped" + elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs): + # for source-code of jar goto contrib/fast-hdfs-resource + File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"), + mode=0644, + content=StaticFile("fast-hdfs-resource.jar") + ) + + if os.path.exists(params.hadoop_conf_dir): + File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'), + owner=tc_owner, + content=Template('commons-logging.properties.j2') + ) + + health_check_template_name = "health_check" + File(os.path.join(params.hadoop_conf_dir, health_check_template_name), + owner=tc_owner, + content=Template(health_check_template_name + ".j2") + ) + + log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties") + if (params.log4j_props != None): + File(log4j_filename, + mode=0644, + group=params.user_group, + owner=params.hdfs_user, + content=params.log4j_props + ) + elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))): + File(log4j_filename, + mode=0644, + group=params.user_group, + owner=params.hdfs_user, + ) + + File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"), + owner=params.hdfs_user, + group=params.user_group, + content=Template("hadoop-metrics2.properties.j2") + ) + + if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list: + create_dirs() + + +def setup_configs(): + """ + Creates configs for services HDFS mapred + """ + import params + + if params.has_namenode or params.dfs_type == 'HCFS': + if os.path.exists(params.hadoop_conf_dir): + File(params.task_log4j_properties_location, + content=StaticFile("task-log4j.properties"), + mode=0755 + ) + + if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')): + File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'), + owner=params.hdfs_user, + group=params.user_group + ) + if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')): + File(os.path.join(params.hadoop_conf_dir, 'masters'), + owner=params.hdfs_user, + group=params.user_group + ) + + generate_include_file() + + +def generate_include_file(): + import params + + if params.has_namenode and params.dfs_hosts and params.has_slaves: + include_hosts_list = params.slave_hosts + File(params.dfs_hosts, + content=Template("include_hosts_list.j2"), + owner=params.hdfs_user, + group=params.user_group + ) + +def create_javahome_symlink(): + if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"): + Directory("/usr/jdk64/", + create_parents = True, + ) + Link("/usr/jdk/jdk1.6.0_31", + to="/usr/jdk64/jdk1.6.0_31", + ) + +def create_dirs(): + import params + params.HdfsResource(params.hdfs_tmp_dir, + type="directory", + action="create_on_execute", + owner=params.hdfs_user, + mode=0777 + ) + params.HdfsResource(params.smoke_hdfs_user_dir, + type="directory", + action="create_on_execute", + owner=params.smoke_user, + mode=params.smoke_hdfs_user_mode + ) + params.HdfsResource(None, + action="execute" + ) + http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2 ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2 new file mode 100755 index 0000000..2197ba5 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2 @@ -0,0 +1,43 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +#/* +# * Licensed to the Apache Software Foundation (ASF) under one +# * or more contributor license agreements. See the NOTICE file +# * distributed with this work for additional information +# * regarding copyright ownership. The ASF licenses this file +# * to you under the Apache License, Version 2.0 (the +# * "License"); you may not use this file except in compliance +# * with the License. You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ + +#Logging Implementation + +#Log4J +org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger + +#JDK Logger +#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2 ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2 new file mode 100755 index 0000000..1adba80 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2 @@ -0,0 +1,21 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +{% for host in hdfs_exclude_file %} +{{host}} +{% endfor %} http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 new file mode 100755 index 0000000..fcd9b23 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 @@ -0,0 +1,104 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# syntax: [prefix].[source|sink|jmx].[instance].[options] +# See package.html for org.apache.hadoop.metrics2 for details + +{% if has_ganglia_server %} +*.period=60 + +*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31 +*.sink.ganglia.period=10 + +# default for supportsparse is false +*.sink.ganglia.supportsparse=true + +.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both +.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 + +# Hook up to the server +namenode.sink.ganglia.servers={{ganglia_server_host}}:8661 +datanode.sink.ganglia.servers={{ganglia_server_host}}:8659 +jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662 +tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658 +maptask.sink.ganglia.servers={{ganglia_server_host}}:8660 +reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660 +resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664 +nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657 +historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666 +journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654 +nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649 +supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650 + +resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue + +{% endif %} + +{% if has_metric_collector %} + +*.period={{metrics_collection_period}} +*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar +*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink +*.sink.timeline.period={{metrics_collection_period}} +*.sink.timeline.sendInterval={{metrics_report_interval}}000 +*.sink.timeline.slave.host.name = {{hostname}} + +# HTTPS properties +*.sink.timeline.truststore.path = {{metric_truststore_path}} +*.sink.timeline.truststore.type = {{metric_truststore_type}} +*.sink.timeline.truststore.password = {{metric_truststore_password}} + +datanode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}} +namenode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}} +resourcemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}} +nodemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}} +jobhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}} +journalnode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}} +maptask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}} +reducetask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}} +applicationhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}} + +resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue + +{% if is_nn_client_port_configured %} +# Namenode rpc ports customization +namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}} +{% endif %} +{% if is_nn_dn_port_configured %} +namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}} +{% endif %} +{% if is_nn_healthcheck_port_configured %} +namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}} +{% endif %} + +{% endif %} http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2 ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2 new file mode 100755 index 0000000..0a03d17 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2 @@ -0,0 +1,81 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +#!/bin/bash +# +#/* +# * Licensed to the Apache Software Foundation (ASF) under one +# * or more contributor license agreements. See the NOTICE file +# * distributed with this work for additional information +# * regarding copyright ownership. The ASF licenses this file +# * to you under the Apache License, Version 2.0 (the +# * "License"); you may not use this file except in compliance +# * with the License. You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ + +err=0; + +function check_disks { + + for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do + fsdev="" + fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`; + if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then + msg_="$msg_ $m(u)" + else + msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`" + fi + done + + if [ -z "$msg_" ] ; then + echo "disks ok" ; exit 0 + else + echo "$msg_" ; exit 2 + fi + +} + +# Run all checks +for check in disks ; do + msg=`check_${check}` ; + if [ $? -eq 0 ] ; then + ok_msg="$ok_msg$msg," + else + err_msg="$err_msg$msg," + fi +done + +if [ ! -z "$err_msg" ] ; then + echo -n "ERROR $err_msg " +fi +if [ ! -z "$ok_msg" ] ; then + echo -n "OK: $ok_msg" +fi + +echo + +# Success! +exit 0 http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2 ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2 new file mode 100755 index 0000000..4a9e713 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2 @@ -0,0 +1,21 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +{% for host in slave_hosts %} +{{host}} +{% endfor %} http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2 ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2 new file mode 100755 index 0000000..15034d6 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2 @@ -0,0 +1,24 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + # +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} +[network_topology] +{% for host in all_hosts %} +{% if host in slave_hosts %} +{{host}}={{all_racks[loop.index-1]}} +{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}} +{% endif %} +{% endfor %} http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml new file mode 100755 index 0000000..ca45822 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml @@ -0,0 +1,22 @@ + + + + + true + + http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json new file mode 100755 index 0000000..8c838db --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json @@ -0,0 +1,308 @@ +{ + "stack_features": [ + { + "name": "snappy", + "description": "Snappy compressor/decompressor support", + "min_version": "2.0.0.0", + "max_version": "2.2.0.0" + }, + { + "name": "lzo", + "description": "LZO libraries support", + "min_version": "2.2.1.0" + }, + { + "name": "express_upgrade", + "description": "Express upgrade support", + "min_version": "2.1.0.0" + }, + { + "name": "rolling_upgrade", + "description": "Rolling upgrade support", + "min_version": "2.2.0.0" + }, + { + "name": "config_versioning", + "description": "Configurable versions support", + "min_version": "2.3.0.0" + }, + { + "name": "datanode_non_root", + "description": "DataNode running as non-root support (AMBARI-7615)", + "min_version": "2.2.0.0" + }, + { + "name": "remove_ranger_hdfs_plugin_env", + "description": "HDFS removes Ranger env files (AMBARI-14299)", + "min_version": "2.3.0.0" + }, + { + "name": "ranger", + "description": "Ranger Service support", + "min_version": "2.2.0.0" + }, + { + "name": "ranger_tagsync_component", + "description": "Ranger Tagsync component support (AMBARI-14383)", + "min_version": "2.5.0.0" + }, + { + "name": "phoenix", + "description": "Phoenix Service support", + "min_version": "2.3.0.0" + }, + { + "name": "nfs", + "description": "NFS support", + "min_version": "2.3.0.0" + }, + { + "name": "tez_for_spark", + "description": "Tez dependency for Spark", + "min_version": "2.2.0.0", + "max_version": "2.3.0.0" + }, + { + "name": "timeline_state_store", + "description": "Yarn application timeline-service supports state store property (AMBARI-11442)", + "min_version": "2.2.0.0" + }, + { + "name": "copy_tarball_to_hdfs", + "description": "Copy tarball to HDFS support (AMBARI-12113)", + "min_version": "2.2.0.0" + }, + { + "name": "spark_16plus", + "description": "Spark 1.6+", + "min_version": "2.4.0.0" + }, + { + "name": "spark_thriftserver", + "description": "Spark Thrift Server", + "min_version": "2.3.2.0" + }, + { + "name": "storm_kerberos", + "description": "Storm Kerberos support (AMBARI-7570)", + "min_version": "2.2.0.0" + }, + { + "name": "storm_ams", + "description": "Storm AMS integration (AMBARI-10710)", + "min_version": "2.2.0.0" + }, + { + "name": "create_kafka_broker_id", + "description": "Ambari should create Kafka Broker Id (AMBARI-12678)", + "min_version": "2.2.0.0", + "max_version": "2.3.0.0" + }, + { + "name": "kafka_listeners", + "description": "Kafka listeners (AMBARI-10984)", + "min_version": "2.3.0.0" + }, + { + "name": "kafka_kerberos", + "description": "Kafka Kerberos support (AMBARI-10984)", + "min_version": "2.3.0.0" + }, + { + "name": "pig_on_tez", + "description": "Pig on Tez support (AMBARI-7863)", + "min_version": "2.2.0.0" + }, + { + "name": "ranger_usersync_non_root", + "description": "Ranger Usersync as non-root user (AMBARI-10416)", + "min_version": "2.3.0.0" + }, + { + "name": "ranger_audit_db_support", + "description": "Ranger Audit to DB support", + "min_version": "2.2.0.0", + "max_version": "2.5.0.0" + }, + { + "name": "accumulo_kerberos_user_auth", + "description": "Accumulo Kerberos User Auth (AMBARI-10163)", + "min_version": "2.3.0.0" + }, + { + "name": "knox_versioned_data_dir", + "description": "Use versioned data dir for Knox (AMBARI-13164)", + "min_version": "2.3.2.0" + }, + { + "name": "knox_sso_topology", + "description": "Knox SSO Topology support (AMBARI-13975)", + "min_version": "2.3.8.0" + }, + { + "name": "atlas_rolling_upgrade", + "description": "Rolling upgrade support for Atlas", + "min_version": "2.3.0.0" + }, + { + "name": "oozie_admin_user", + "description": "Oozie install user as an Oozie admin user (AMBARI-7976)", + "min_version": "2.2.0.0" + }, + { + "name": "oozie_create_hive_tez_configs", + "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)", + "min_version": "2.2.0.0" + }, + { + "name": "oozie_setup_shared_lib", + "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)", + "min_version": "2.2.0.0" + }, + { + "name": "oozie_host_kerberos", + "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)", + "min_version": "2.0.0.0", + "max_version": "2.2.0.0" + }, + { + "name": "falcon_extensions", + "description": "Falcon Extension", + "min_version": "2.5.0.0" + }, + { + "name": "hive_metastore_upgrade_schema", + "description": "Hive metastore upgrade schema support (AMBARI-11176)", + "min_version": "2.3.0.0" + }, + { + "name": "hive_server_interactive", + "description": "Hive server interactive support (AMBARI-15573)", + "min_version": "2.5.0.0" + }, + { + "name": "hive_webhcat_specific_configs", + "description": "Hive webhcat specific configurations support (AMBARI-12364)", + "min_version": "2.3.0.0" + }, + { + "name": "hive_purge_table", + "description": "Hive purge table support (AMBARI-12260)", + "min_version": "2.3.0.0" + }, + { + "name": "hive_server2_kerberized_env", + "description": "Hive server2 working on kerberized environment (AMBARI-13749)", + "min_version": "2.2.3.0", + "max_version": "2.2.5.0" + }, + { + "name": "hive_env_heapsize", + "description": "Hive heapsize property defined in hive-env (AMBARI-12801)", + "min_version": "2.2.0.0" + }, + { + "name": "ranger_kms_hsm_support", + "description": "Ranger KMS HSM support (AMBARI-15752)", + "min_version": "2.5.0.0" + }, + { + "name": "ranger_log4j_support", + "description": "Ranger supporting log-4j properties (AMBARI-15681)", + "min_version": "2.5.0.0" + }, + { + "name": "ranger_kerberos_support", + "description": "Ranger Kerberos support", + "min_version": "2.5.0.0" + }, + { + "name": "hive_metastore_site_support", + "description": "Hive Metastore site support", + "min_version": "2.5.0.0" + }, + { + "name": "ranger_usersync_password_jceks", + "description": "Saving Ranger Usersync credentials in jceks", + "min_version": "2.5.0.0" + }, + { + "name": "ranger_install_infra_client", + "description": "Ambari Infra Service support", + "min_version": "2.5.0.0" + }, + { + "name": "falcon_atlas_support_2_3", + "description": "Falcon Atlas integration support for 2.3 stack", + "min_version": "2.3.99.0", + "max_version": "2.4.0.0" + }, + { + "name": "falcon_atlas_support", + "description": "Falcon Atlas integration", + "min_version": "2.5.0.0" + }, + { + "name": "hbase_home_directory", + "description": "Hbase home directory in HDFS needed for HBASE backup", + "min_version": "2.5.0.0" + }, + { + "name": "spark_livy", + "description": "Livy as slave component of spark", + "min_version": "2.5.0.0" + }, + { + "name": "atlas_ranger_plugin_support", + "description": "Atlas Ranger plugin support", + "min_version": "2.5.0.0" + }, + { + "name": "atlas_conf_dir_in_path", + "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon", + "min_version": "2.3.0.0", + "max_version": "2.4.99.99" + }, + { + "name": "atlas_upgrade_support", + "description": "Atlas supports express and rolling upgrades", + "min_version": "2.5.0.0" + }, + { + "name": "atlas_hook_support", + "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop", + "min_version": "2.5.0.0" + }, + { + "name": "ranger_pid_support", + "description": "Ranger Service support pid generation AMBARI-16756", + "min_version": "2.5.0.0" + }, + { + "name": "ranger_kms_pid_support", + "description": "Ranger KMS Service support pid generation", + "min_version": "2.5.0.0" + }, + { + "name": "ranger_admin_password_change", + "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)", + "min_version": "2.5.0.0" + }, + { + "name": "storm_metrics_apache_classes", + "description": "Metrics sink for Storm that uses Apache class names", + "min_version": "2.5.0.0" + }, + { + "name": "spark_java_opts_support", + "description": "Allow Spark to generate java-opts file", + "min_version": "2.2.0.0", + "max_version": "2.4.0.0" + }, + { + "name": "atlas_hbase_setup", + "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.", + "min_version": "2.5.0.0" + } + ] +} http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json new file mode 100755 index 0000000..d1aab4b --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json @@ -0,0 +1,4 @@ +{ + "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], + "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml new file mode 100755 index 0000000..ab4f25f --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml @@ -0,0 +1,33 @@ + + + + + + http://repo.odpi.org/ODPi/1.0/centos-6/ + ODPi-1.0 + ODPi + + + + + http://repo.odpi.org/ODPi/1.0/ubuntu-14.04/apt + ODPi-1.0 + odpi + + + http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json b/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json new file mode 100755 index 0000000..ab56c7f --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json @@ -0,0 +1,41 @@ +{ + "_comment" : "Record format:", + "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]", + "general_deps" : { + "_comment" : "dependencies for all cases", + "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"], + "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"] + }, + "_comment" : "GLUSTERFS-specific dependencies", + "optional_glusterfs": { + "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"] + }, + "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster", + "optional_no_glusterfs": { + "SECONDARY_NAMENODE-START": ["NAMENODE-START"], + "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"], + "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"], + "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"], + "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START", + "SECONDARY_NAMENODE-START"], + "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", + "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"], + "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"], + "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP", + "HISTORYSERVER-STOP"], + "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP", + "HISTORYSERVER-STOP"] + }, + "_comment" : "Dependencies that are used in HA NameNode cluster", + "namenode_optional_ha": { + "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"], + "ZKFC-START": ["ZOOKEEPER_SERVER-START"] + }, + "_comment" : "Dependencies that are used in ResourceManager HA cluster", + "resourcemanager_optional_ha" : { + "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"] + } +} + http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml new file mode 100755 index 0000000..d6e30b7 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml @@ -0,0 +1,27 @@ + + + + 2.0 + + + HDFS + 2.7.1+odpi + common-services/HDFS/2.1.0.2.0 + + + http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml new file mode 100755 index 0000000..6458e29 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml @@ -0,0 +1,33 @@ + + + + + 2.0 + + + YARN + 2.7.1+odpi + common-services/YARN/2.1.0.2.0 + + + MAPREDUCE2 + 2.7.1+odpi + common-services/MAPREDUCE2/2.1.0.2.0.6.0 + + + http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml ---------------------------------------------------------------------- diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml new file mode 100755 index 0000000..0a89dc2 --- /dev/null +++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml @@ -0,0 +1,27 @@ + + + + 2.0 + + + ZOOKEEPER + 3.4.6+odpi + common-services/ZOOKEEPER/3.4.5 + + +