ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jonathanhur...@apache.org
Subject [4/4] ambari git commit: AMBARI-9408 - JournalNode failed to restart (jonathanhurley)
Date Thu, 29 Jan 2015 22:20:22 GMT
AMBARI-9408 - JournalNode failed to restart (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/72dcffc3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/72dcffc3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/72dcffc3

Branch: refs/heads/trunk
Commit: 72dcffc313b022be819ec5df9b2bfa7c5fe94fe0
Parents: a4f16b9
Author: Jonathan Hurley <jhurley@hortonworks.com>
Authored: Thu Jan 29 17:17:02 2015 -0500
Committer: Jonathan Hurley <jhurley@hortonworks.com>
Committed: Thu Jan 29 17:17:02 2015 -0500

----------------------------------------------------------------------
 .../package/scripts/journalnode_upgrade.py      |  20 +-
 .../HDFS/2.1.0.2.0/package/scripts/params.py    |   2 -
 .../state/cluster/ClusterDeadlockTest.java      |   4 +-
 .../stacks/2.0.6/HDFS/test_journalnode.py       |  75 +-
 .../journalnode-upgrade-hdfs-secure.json        | 277 ++++++
 .../2.2/configs/journalnode-upgrade-jmx.json    | 889 +++++++++++++++++
 .../journalnode-upgrade-namenode-jmx.json       | 997 +++++++++++++++++++
 .../stacks/2.2/configs/journalnode-upgrade.json | 277 ++++++
 .../src/test/python/stacks/utils/RMFTestCase.py |   6 +-
 9 files changed, 2527 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/72dcffc3/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
index 4e85130..2881c3f 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
@@ -18,14 +18,12 @@ limitations under the License.
 """
 
 import time
-import json
 
 from resource_management.core.logger import Logger
 from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.default import default
 from resource_management.core.exceptions import Fail
-from utils import get_jmx_data, get_port
+from utils import get_jmx_data
 
 
 
@@ -97,25 +95,19 @@ def ensure_jns_have_new_txn(nodes, last_txn_id):
   num_of_jns = len(nodes)
   actual_txn_ids = {}
   jns_updated = 0
-  protocol = 'http'
 
-  journal_node_address = default("/configurations/hdfs-site/dfs.journalnode.https-address",
None)
-  if journal_node_address:
-    protocol = "https"
-  else:
-    journal_node_address = default("/configurations/hdfs-site/dfs.journalnode.http-address",
None)
-
-  if not journal_node_address:
+  if params.journalnode_address is None:
     raise Fail("Could not retrieve Journal node address")
 
-  jn_port = get_port(journal_node_address)    # default is 8480, encrypted is 8481
-  if not jn_port:
+  if params.journalnode_port is None:
     raise Fail("Could not retrieve Journalnode port")
 
   time_out_secs = 3 * 60
   step_time_secs = 10
   iterations = int(time_out_secs/step_time_secs)
 
+  protocol = "https" if params.https_only else "http"
+
   Logger.info("Checking if all Journalnodes are updated.")
   for i in range(iterations):
     Logger.info('Try %d out of %d' % (i+1, iterations))
@@ -129,7 +121,7 @@ def ensure_jns_have_new_txn(nodes, last_txn_id):
       if node in actual_txn_ids and actual_txn_ids[node] and actual_txn_ids[node] >= last_txn_id:
         continue
 
-      url = '%s://%s:%s' % (protocol, node, jn_port)
+      url = '%s://%s:%s' % (protocol, node, params.journalnode_port)
       data = get_jmx_data(url, 'Journal-', 'LastWrittenTxId')
       if data:
         actual_txn_ids[node] = int(data)

http://git-wip-us.apache.org/repos/asf/ambari/blob/72dcffc3/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
index b185058..472d684 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
@@ -209,7 +209,6 @@ if dfs_ha_enabled:
       namenode_id = nn_id
       namenode_rpc = nn_host
 
-
 if dfs_http_policy == "HTTPS_ONLY":
   https_only = True
   journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.https-address',
None)
@@ -312,7 +311,6 @@ has_ranger_admin = not len(ranger_admin_hosts) == 0
 if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
   # setting flag value for ranger hdfs plugin
   enable_ranger_hdfs = False
-  user_input = config['configurations']['ranger-hdfs-plugin-properties']['ranger-hdfs-plugin-enabled']
   if  user_input.lower() == 'yes':
     enable_ranger_hdfs = True
   elif user_input.lower() == 'no':

http://git-wip-us.apache.org/repos/asf/ambari/blob/72dcffc3/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
index 7f9248b..85a3c64 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
@@ -120,7 +120,7 @@ public class ClusterDeadlockTest {
    *
    * @throws Exception
    */
-  @Test(timeout = 15000)
+  @Test(timeout = 30000)
   public void testDeadlockBetweenImplementations() throws Exception {
     Cluster cluster = clusters.getCluster("c1");
     Service service = cluster.getService("HDFS");
@@ -157,7 +157,7 @@ public class ClusterDeadlockTest {
    *
    * @throws Exception
    */
-  @Test(timeout = 15000)
+  @Test(timeout = 30000)
   public void testAddingHostComponentsWhileReading() throws Exception {
     Cluster cluster = clusters.getCluster("c1");
     Service service = cluster.getService("HDFS");

http://git-wip-us.apache.org/repos/asf/ambari/blob/72dcffc3/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
index 79f52ea..2414214 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -17,13 +17,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 '''
+import os
 from stacks.utils.RMFTestCase import *
-from ambari_commons import OSCheck
 from mock.mock import MagicMock, patch
 
+
 class TestJournalnode(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
   STACK_VERSION = "2.0.6"
+  UPGRADE_STACK_VERSION = "2.2"
 
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
@@ -244,3 +246,74 @@ class TestJournalnode(RMFTestCase):
                               content = Template('slaves.j2'),
                               owner = 'root',
                               )
+
+
+  @patch('time.sleep')
+  @patch("urllib2.urlopen")
+  def test_post_rolling_restart(self, urlopen_mock, time_mock):
+    # load the NN and JN JMX files so that the urllib2.urlopen mock has data
+    # to return
+    journalnode_jmx_file = os.path.join(RMFTestCase._getStackTestsFolder(),
+      self.UPGRADE_STACK_VERSION, "configs", "journalnode-upgrade-jmx.json" )
+
+    namenode_jmx_file = os.path.join(RMFTestCase._getStackTestsFolder(),
+      self.UPGRADE_STACK_VERSION, "configs", "journalnode-upgrade-namenode-jmx.json" )
+
+    journalnode_jmx = open(journalnode_jmx_file, 'r').read()
+    namenode_jmx = open(namenode_jmx_file, 'r').read()
+
+    url_stream_mock = MagicMock()
+    url_stream_mock.read.side_effect = [namenode_jmx, journalnode_jmx,
+      journalnode_jmx, journalnode_jmx]
+
+    urlopen_mock.return_value = url_stream_mock
+
+    # run the post_rolling_restart using the data from above
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
+      classname = "JournalNode", command = "post_rolling_restart",
+      config_file = "journalnode-upgrade.json",
+      hdp_stack_version = self.UPGRADE_STACK_VERSION,
+      target = RMFTestCase.TARGET_COMMON_SERVICES )
+
+    # ensure that the mock was called with the http-style version of the URL
+    urlopen_mock.assert_called
+    urlopen_mock.assert_called_with( "http://c6403.ambari.apache.org:8480/jmx" )
+
+    url_stream_mock.reset_mock()
+    url_stream_mock.read.side_effect = [namenode_jmx, journalnode_jmx,
+      journalnode_jmx, journalnode_jmx]
+
+    urlopen_mock.return_value = url_stream_mock
+
+    # now try with HDFS on SSL
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
+      classname = "JournalNode", command = "post_rolling_restart",
+      config_file = "journalnode-upgrade-hdfs-secure.json",
+      hdp_stack_version = self.UPGRADE_STACK_VERSION,
+      target = RMFTestCase.TARGET_COMMON_SERVICES )
+
+    # ensure that the mock was called with the http-style version of the URL
+    urlopen_mock.assert_called
+    urlopen_mock.assert_called_with( "https://c6403.ambari.apache.org:8481/jmx" )
+
+
+
+  @patch('time.sleep')
+  @patch("urllib2.urlopen")
+  def test_post_rolling_restart_bad_jmx(self, urlopen_mock, time_mock):
+    urlopen_mock_response = '{ "bad_data" : "gonna_mess_you_up" }'
+
+    url_stream_mock = MagicMock()
+    url_stream_mock.read.side_effect = [urlopen_mock_response]
+    urlopen_mock.return_value = url_stream_mock
+
+    try:
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
+        classname = "JournalNode", command = "post_rolling_restart",
+        config_file = "journalnode-upgrade.json",
+        hdp_stack_version = self.UPGRADE_STACK_VERSION,
+        target = RMFTestCase.TARGET_COMMON_SERVICES )
+
+      self.fail("Expected a failure since the JMX JSON for JournalTransactionInfo was missing")
+    except:
+      pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/72dcffc3/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
new file mode 100644
index 0000000..0686c57
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
@@ -0,0 +1,277 @@
+{
+    "configuration_attributes": {
+        "webhcat-env": {},
+        "hcat-env": {},
+        "hdfs-site": {
+            "final": {
+                "dfs.support.append": "true",
+                "dfs.namenode.http-address": "true"
+            }
+        },
+        "hive-log4j": {},
+        "hive-site": {},
+        "hive-exec-log4j": {},
+        "core-site": {
+            "final": {
+                "fs.defaultFS": "true"
+            }
+        },
+        "hive-env": {},
+        "webhcat-site": {},
+        "cluster-env": {}
+    },
+    "commandParams": {
+        "service_package_folder": "common-services/HDFS/2.1.0.2.0/package",
+        "script": "scripts/journalnode.py",
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "restart_type": "rolling_upgrade",
+        "version": "2.2.1.0-2065",
+        "command_timeout": "900",
+        "script_type": "PYTHON"
+    },
+    "roleCommand": "CUSTOM_COMMAND",
+    "kerberosCommandParams": [],
+    "clusterName": "c1",
+    "hostname": "c6402.ambari.apache.org",
+    "hostLevelParams": {
+        "jdk_location": "http://hw10897.ix:8080/resources/",
+        "ambari_db_rca_password": "mapred",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "ambari_db_rca_url": "jdbc:postgresql://hw10897.ix/ambarirca",
+        "stack_name": "HDP",
+        "custom_command": "RESTART",
+        "oracle_jdbc_url": "http://hw10897.ix:8080/resources//ojdbc6.jar",
+        "repo_info": "[{\"baseUrl\":\"http://repo.ambari.apache.org/hdp/centos6/HDP-2.2.0.0/\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/GA/2.2.0.0\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/GA/2.2.0.0\"},{\"baseUrl\":\"http://repo.ambari.apache.org/hdp/centos6/HDP-UTILS-1.1.0.20/\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]",
+        "group_list": "[\"hadoop\",\"nobody\",\"users\"]",
+        "agentCacheDir": "/var/lib/ambari-agent/cache",
+        "stack_version": "2.2",
+        "db_name": "ambari",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
+        "jdk_name": "jdk-7u45-linux-x64.tar.gz",
+        "ambari_db_rca_username": "mapred",
+        "db_driver_filename": "mysql-connector-java.jar",
+        "user_list": "[\"nobody\",\"hive\",\"mapred\",\"ambari-qa\",\"zookeeper\",\"tez\",\"hdfs\",\"yarn\",\"hcat\"]",
+        "mysql_jdbc_url": "http://hw10897.ix:8080/resources//mysql-connector-java.jar"
+    },
+    "commandType": "EXECUTION_COMMAND",
+    "roleParams": {
+        "component_category": "MASTER"
+    },
+    "serviceName": "HDFS",
+    "role": "JOURNALNODE",
+    "forceRefreshConfigTags": [],
+    "taskId": 77,
+    "public_hostname": "c6402.ambari.apache.org",
+    "configurations": {
+        "hdfs-site": {
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+            "dfs.namenode.checkpoint.txns": "1000000",
+            "dfs.block.access.token.enable": "true",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:50010",
+            "dfs.cluster.administrators": " hdfs",
+            "dfs.replication": "3",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1.0f",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.permissions.enabled": "true",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode",
+            "dfs.blocksize": "134217728",
+            "dfs.datanode.max.transfer.threads": "4096",
+            "dfs.heartbeat.interval": "3",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.namenode.handler.count": "40",
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
+            "fs.permissions.umask-mode": "022",
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.namenode.accesstime.precision": "0",
+            "dfs.datanode.https.address": "0.0.0.0:50475",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.datanode.du.reserved": "1073741824",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.http.policy": "HTTPS_ONLY",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.https.port": "50470",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.journalnode.https-address": "0.0.0.0:8481",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.replication.max": "50",
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
+        },
+        "hadoop-policy": {
+            "security.job.client.protocol.acl": "*",
+            "security.job.task.protocol.acl": "*",
+            "security.datanode.protocol.acl": "*",
+            "security.namenode.protocol.acl": "*",
+            "security.client.datanode.protocol.acl": "*",
+            "security.inter.tracker.protocol.acl": "*",
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
+            "security.client.protocol.acl": "*",
+            "security.refresh.policy.protocol.acl": "hadoop",
+            "security.admin.operations.protocol.acl": "hadoop",
+            "security.inter.datanode.protocol.acl": "*"
+        },
+        "core-site": {
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "hadoop.proxyuser.hcat.groups": "users",
+            "proxyuser_group": "users",
+            "fs.trash.interval": "360",
+            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org",
+            "hadoop.proxyuser.hive.groups": "users",
+            "hadoop.security.authentication": "simple",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "hadoop.security.authorization": "false",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "hadoop.http.authentication.simple.anonymous.allowed": "true",
+            "ipc.client.connect.max.retries": "50",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "ipc.server.tcpnodelay": "true",
+            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org",
+            "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n
       RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n  
     RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT",
+            "ipc.client.connection.maxidletime": "30000"
+        },
+        "hadoop-env": {
+            "dtnode_heapsize": "1024m",
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "namenode_heapsize": "1024m",
+            "proxyuser_group": "users",
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only
required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed
configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined
on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport
HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n#
Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required
by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap
to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.
 Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n#
Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8
-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}}
-XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}}
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}}
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date
+'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console
${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4
-XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m
-XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date
+'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport
HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m
-XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode
as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n#
Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n#
Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n#
History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where
log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n#
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n#
host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset
by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise
arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The
directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server
pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n#
The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n#
Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql
connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add
libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the
HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\"
]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current
folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n
 fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#
Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION
$HADOOP_
 OPTS\"",
+            "hdfs_user": "hdfs",
+            "namenode_opt_newsize": "200m",
+            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist",
+            "hadoop_root_logger": "INFO,RFA",
+            "hadoop_heapsize": "1024",
+            "namenode_opt_maxpermsize": "256m",
+            "namenode_opt_permsize": "128m"
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "pig_tar_source": "/usr/hdp/current/pig-client/pig.tar.gz",
+            "hadoop-streaming_tar_source": "/usr/hdp/current/hadoop-mapreduce-client/hadoop-streaming.jar",
+            "sqoop_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/",
+            "hadoop-streaming_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version
}}/mapreduce/",
+            "ignore_groupsusers_create": "false",
+            "kerberos_domain": "EXAMPLE.COM",
+            "hive_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/hive/",
+            "tez_tar_source": "/usr/hdp/current/tez-client/lib/tez.tar.gz",
+            "mapreduce_tar_source": "/usr/hdp/current/hadoop-client/mapreduce.tar.gz",
+            "hive_tar_source": "/usr/hdp/current/hive-client/hive.tar.gz",
+            "tez_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/tez/",
+            "user_group": "hadoop",
+            "sqoop_tar_source": "/usr/hdp/current/sqoop-client/sqoop.tar.gz",
+            "smokeuser": "ambari-qa",
+            "mapreduce_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mapreduce/",
+            "pig_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/"
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        }
+    },
+    "configurationTags": {
+        "webhcat-env": {
+            "tag": "version1"
+        },
+        "hcat-env": {
+            "tag": "version1"
+        },
+        "hdfs-site": {
+            "tag": "version1"
+        },
+        "hadoop-policy": {
+            "tag": "version1"
+        },
+        "hive-exec-log4j": {
+            "tag": "version1"
+        },
+        "core-site": {
+            "tag": "version1"
+        },
+        "hive-env": {
+            "tag": "version1"
+        },
+        "hadoop-env": {
+            "tag": "version1"
+        },
+        "hive-site": {
+            "tag": "version1"
+        },
+        "webhcat-site": {
+            "tag": "version1"
+        },
+        "hive-log4j": {
+            "tag": "version1"
+        },
+        "hiveserver2-site": {
+            "tag": "version1"
+        },
+        "cluster-env": {
+            "tag": "version1"
+        }
+    },
+    "commandId": "6-5",
+    "clusterHostInfo": {
+        "journalnode_hosts": [
+          "c6401.ambari.apache.org",
+          "c6402.ambari.apache.org",
+          "c6403.ambari.apache.org"
+        ],
+        "snamenode_host": [
+            "c6402.ambari.apache.org"
+        ],
+        "nm_hosts": [
+            "c6403.ambari.apache.org"
+        ],
+        "app_timeline_server_hosts": [
+            "c6402.ambari.apache.org"
+        ],
+        "all_ping_ports": [
+            "8670",
+            "8670",
+            "8670"
+        ],
+        "all_hosts": [
+            "c6403.ambari.apache.org",
+            "c6401.ambari.apache.org",
+            "c6402.ambari.apache.org"
+        ],
+        "slave_hosts": [
+            "c6403.ambari.apache.org"
+        ],
+        "namenode_host": [
+            "c6401.ambari.apache.org"
+        ],
+        "ambari_server_host": [
+            "hw10897.ix"
+        ],
+        "zookeeper_hosts": [
+            "c6403.ambari.apache.org",
+            "c6401.ambari.apache.org",
+            "c6402.ambari.apache.org"
+        ],
+        "hs_host": [
+            "c6402.ambari.apache.org"
+        ],
+        "hive_server_host": [
+            "c6402.ambari.apache.org"
+        ]
+    }
+}


Mime
View raw message