ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jlun...@apache.org
Subject ambari git commit: AMBARI-13645: HAWQ service requires additional HDFS config properties to be defined (jluniya)
Date Thu, 29 Oct 2015 22:41:07 GMT
Repository: ambari
Updated Branches:
  refs/heads/branch-2.1.2 2f87dcbf0 -> 75e2a2653


AMBARI-13645: HAWQ service requires additional HDFS config properties to be defined (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/75e2a265
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/75e2a265
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/75e2a265

Branch: refs/heads/branch-2.1.2
Commit: 75e2a2653570697f2fc8c1d98ae9382751380a44
Parents: 2f87dcb
Author: Jayush Luniya <jluniya@hortonworks.com>
Authored: Thu Oct 29 15:22:17 2015 -0700
Committer: Jayush Luniya <jluniya@hortonworks.com>
Committed: Thu Oct 29 15:40:52 2015 -0700

----------------------------------------------------------------------
 .../GenerateStackDefinition.py                  |  10 +-
 .../services/HDFS/configuration/core-site.xml   |  73 ++++++++++++
 .../services/HDFS/configuration/hdfs-site.xml   | 116 +++++++++++++++++++
 3 files changed, 197 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/75e2a265/ambari-common/src/main/python/pluggable_stack_definition/GenerateStackDefinition.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/pluggable_stack_definition/GenerateStackDefinition.py
b/ambari-common/src/main/python/pluggable_stack_definition/GenerateStackDefinition.py
index 3038f90..fece46b 100644
--- a/ambari-common/src/main/python/pluggable_stack_definition/GenerateStackDefinition.py
+++ b/ambari-common/src/main/python/pluggable_stack_definition/GenerateStackDefinition.py
@@ -474,11 +474,13 @@ class GeneratorHelper(object):
 
   def copy_stacks(self):
     original_folder = os.path.join(self.resources_folder, 'stacks', self.config_data.baseStackName)
+    partial_target_folder = os.path.join(self.resources_folder, 'stacks', self.config_data.stackName)
     target_folder = os.path.join(self.output_folder, 'stacks', self.config_data.stackName)
 
     for stack in self.config_data.versions:
       original_stack = os.path.join(original_folder, stack.baseVersion)
       target_stack = os.path.join(target_folder, stack.version)
+      partial_target_stack = os.path.join(partial_target_folder, stack.version)
 
       desired_services = [service.name for service in stack.services]
       desired_services.append('stack_advisor.py')  # stack_advisor.py placed in stacks folder
@@ -530,8 +532,12 @@ class GeneratorHelper(object):
           process_other_files(target, self.config_data, self.stack_version_changes)
 
       copy_tree(original_stack, target_stack, ignored_files, post_copy=post_copy)
-      # copy default stack advisor
-      shutil.copy(os.path.join(self.resources_folder, 'stacks', 'stack_advisor.py'), os.path.join(target_folder,
'../stack_advisor.py'))
+      # After generating target stack from base stack, overlay target stack partial definition
defined under
+      # <resourceDir>/stacks/<targetStackName>/<targetStackVersion>
+      copy_tree(partial_target_stack, target_stack, ignored_files, post_copy=None)
+
+    # copy default stack advisor
+    shutil.copy(os.path.join(self.resources_folder, 'stacks', 'stack_advisor.py'), os.path.join(target_folder,
'../stack_advisor.py'))
 
   def copy_common_services(self, common_services = []):
     ignored_files = ['.pyc']

http://git-wip-us.apache.org/repos/asf/ambari/blob/75e2a265/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
new file mode 100644
index 0000000..179f578
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <property>
+    <name>hadoop.http.authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description></description>
+  </property>
+
+  <property>
+    <name>hadoop.security.key.provider.path</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_host</name>
+      </property>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_port</name>
+      </property>
+      <property>
+        <type>kms-env</type>
+        <name>kms_port</name>
+      </property>
+      <property>
+        <type>ranger-kms-site</type>
+        <name>ranger.service.https.attrib.ssl.enabled</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <!-- HDFS properties required for HAWQ -->
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>3600000</value>
+  </property>
+
+  <property>
+    <name>ipc.client.connect.timeout</name>
+    <value>300000</value>
+  </property>
+
+  <property>
+    <name>ipc.server.listen.queue.size</name>
+    <value>3300</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/75e2a265/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..3a81220
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,116 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>dfs.namenode.startup.delay.block.deletion.sec</name>
+    <value>3600</value>
+    <description></description>
+  </property>
+
+  <property>
+    <name>dfs.journalnode.edits.dir</name>
+    <value>/hadoop/hdfs/journalnode</value>
+    <description>The path where the JournalNode daemon will store its local state.
</description>
+  </property>
+  
+  <property>
+    <name>dfs.client.retry.policy.enabled</name>
+    <value>false</value>
+    <description>Enables HDFS client retry in the event of a NameNode failure.</description>
+  </property>
+
+  <property>
+    <name>dfs.content-summary.limit</name>
+    <value>5000</value>
+    <description>Dfs content summary limit.</description>
+  </property>
+
+  <property>
+    <name>dfs.encryption.key.provider.uri</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_host</name>
+      </property>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_port</name>
+      </property>
+      <property>
+        <type>kms-env</type>
+        <name>kms_port</name>
+      </property>
+      <property>
+        <type>ranger-kms-site</type>
+        <name>ranger.service.https.attrib.ssl.enabled</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <!-- HDFS properties required for HAWQ -->
+  <property>
+    <name>dfs.allow.truncate</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>gpadmin</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>40960</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.handler.count</name>
+    <value>60</value>
+  </property>
+
+  <property>
+    <name>dfs.namenode.accesstime.precision</name>
+    <value>-1</value>
+  </property>
+
+</configuration>


Mime
View raw message