incubator-ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From maha...@apache.org
Subject svn commit: r1387826 [2/3] - in /incubator/ambari/branches/AMBARI-666: ./ ambari-agent/src/main/puppet/ ambari-agent/src/main/puppet/manifestloader/ ambari-agent/src/main/puppet/modules/ ambari-agent/src/main/puppet/modules/configgenerator/ ambari-agen...
Date Thu, 20 Sep 2012 00:27:05 GMT
Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp Thu Sep 20 00:27:02 2012
@@ -0,0 +1,98 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::snamenode(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hadoop::params  
+{
+  $hdp::params::service_exists['hdp-hadoop::snamenode'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+  Hdp-hadoop::Package<||>{include_64_bit => true}
+  Hdp-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+    $fs_checkpoint_dir = $hdp-hadoop::params::fs_checkpoint_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      if ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) {
+        $masterHost = $kerberos_adminclient_host[0]
+        hdp::download_keytab { 'snamenode_service_keytab' :
+          masterhost => $masterHost,
+          keytabdst => "${$keytab_path}/nn.service.keytab",
+          keytabfile => 'nn.service.keytab',
+          owner => $hdp-hadoop::params::hdfs_user
+        }
+        hdp::download_keytab { 'snamenode_spnego_keytab' :   
+          masterhost => $masterHost,
+          keytabdst => "${$keytab_path}/spnego.service.keytab",
+          keytabfile => 'spnego.service.keytab', 
+          owner => $hdp-hadoop::params::hdfs_user,
+          mode => '0440',
+          group => 'hadoop'
+        }
+      }
+    }
+ 
+    Hdp-Hadoop::Configfile<||>{snamenode_host => $hdp::params::host_address}
+  
+    hdp-hadoop::snamenode::create_name_dirs { $fs_checkpoint_dir: 
+      service_state => $service_state
+    }
+    
+    if ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) {
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+    
+    hdp-hadoop::service{ 'secondarynamenode':
+      ensure         => $service_state,
+      user           => $hdp-hadoop::params::hdfs_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+  
+    #top level does not need anchors
+    Class['hdp-hadoop'] -> Hdp-hadoop::Service['secondarynamenode']
+    Hdp-hadoop::Namenode::Create_name_dirs<||> -> Hdp-hadoop::Service['secondarynamenode']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::snamenode::create_name_dirs($service_state)
+{
+   $dirs = hdp_array_from_comma_list($name)
+   hdp::directory_recursive_create { $dirs :
+     owner => $hdp-hadoop::params::hdfs_user,
+     mode => '0755',
+     service_state => $service_state,
+     force => true
+  }
+}

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp Thu Sep 20 00:27:02 2012
@@ -0,0 +1,94 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hadoop::tasktracker(
+  $service_state = $hdp::params::cluster_service_state,
+  $opts = {}
+) inherits hdp-hadoop::params
+{
+  $hdp::params::service_exists['hdp-hadoop::tasktracker'] = true
+
+  Hdp-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp::params::use_32_bits_on_slaves == true) {
+    Hdp-hadoop::Package<||>{include_32_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp-hadoop::Package<||>{include_64_bit => true}
+    Hdp-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp-hadoop::initialize
+
+    if ( ($service_state == 'installed_and_configured') and
+         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
+      $masterHost = $kerberos_adminclient_host[0]
+      hdp::download_keytab { 'tasktracker_service_keytab' :
+        masterhost => $masterHost,
+        keytabdst => "${$keytab_path}/tt.service.keytab",
+        keytabfile => 'tt.service.keytab',
+        owner => $hdp-hadoop::params::mapred_user
+      }
+    }
+  
+    hdp-hadoop::tasktracker::create_local_dirs { $mapred_local_dir: 
+      service_state => $service_state
+    }
+    
+    if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) {
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+
+    hdp-hadoop::service{ 'tasktracker':
+      ensure => $service_state,
+      user   => $hdp-hadoop::params::mapred_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+  
+    #top level does not need anchors
+    Class['hdp-hadoop'] -> Hdp-hadoop::Service['tasktracker']
+    Hdp-hadoop::Tasktracker::Create_local_dirs<||> -> Hdp-hadoop::Service['tasktracker']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp-hadoop::tasktracker::create_local_dirs($service_state)
+{
+  if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] != true) {
+    $dirs = hdp_array_from_comma_list($name)
+    hdp::directory_recursive_create { $dirs :
+      owner => $hdp-hadoop::params::mapred_user,
+      mode => '0755',
+      service_state => $service_state,
+      force => true
+    }
+  }
+}

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/capacity-scheduler.xml.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/capacity-scheduler.xml.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/capacity-scheduler.xml.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/capacity-scheduler.xml.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- This is the configuration file for the resource manager in Hadoop. -->
+<!-- You can configure various scheduling parameters related to queues. -->
+<!-- The properties for a queue follow a naming convention,such as, -->
+<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.capacity</name>
+    <value>100</value>
+    <description>Percentage of the number of slots in the cluster that are
+      guaranteed to be available for jobs in this queue.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
+    <value>false</value>
+    <description>If true, priorities of jobs will be taken into 
+      account in scheduling decisions.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
+    <value>100</value>
+    <description> Each queue enforces a limit on the percentage of resources 
+    allocated to a user at any given time, if there is competition for them. 
+    This user limit can vary between a minimum and maximum value. The former
+    depends on the number of users who have submitted jobs, and the latter is
+    set to this property value. For example, suppose the value of this 
+    property is 25. If two users have submitted jobs to a queue, no single 
+    user can use more than 50% of the queue resources. If a third user submits
+    a job, no single user can use more than 33% of the queue resources. With 4 
+    or more users, no user can use more than 25% of the queue's resources. A 
+    value of 100 implies no user limits are imposed. 
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-jobs-per-user</name>
+    <value>25</value>
+    <description>The maximum number of jobs to be pre-initialized for a user
+    of the job queue.
+    </description>
+  </property>
+
+</configuration>

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/core-site.xml.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/core-site.xml.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/core-site.xml.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/core-site.xml.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,254 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+  </property>
+
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+  </property>
+
+  <property>
+    <name>io.compression.codecs</name>
+    <value><%=scope.function_hdp_template_var("compression_codecs")%></value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+  </property>
+
+  <property>
+    <name>io.compression.codec.lzo.class</name>
+    <value>com.hadoop.compression.lzo.LzoCodec</value>
+    <description>The implementation for lzo codec.</description>
+  </property>
+
+<!-- file system properties -->
+
+  <property>
+    <name>fs.default.name</name>
+    <!-- cluster variant -->
+    <value>hdfs://<%=scope.function_hdp_host("namenode_host")%>:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes between trash checkpoints.
+  If zero, the trash feature is disabled.
+  </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.dir</name>
+    <value><%=scope.function_hdp_template_var("fs_checkpoint_dir")%></value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary images to merge.
+        If this is a comma-delimited list of directories then the image is
+        replicated in all of the directories for redundancy.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.edits.dir</name>
+    <value>${fs.checkpoint.dir}</value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary edits to merge.
+        If this is a comma-delimited list of directoires then teh edits is
+        replicated in all of the directoires for redundancy.
+        Default value is same as fs.checkpoint.dir
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.period</name>
+    <value>21600</value>
+    <description>The number of seconds between two periodic checkpoints.
+  </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.size</name>
+    <value>536870912</value>
+    <description>The size of the current edit log (in bytes) that triggers
+       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+  </description>
+  </property>
+
+  <!-- ipc properties: copied from kryptonite configuration -->
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+  </property>
+
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>webinterface.private.actions</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of JT and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+  </description>
+  </property>
+
+ <property>
+   <name>hadoop.security.authentication</name>
+   <value><%=scope.function_hdp_template_var("security_type")%></value>
+   <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+ </property>
+<property>
+  <name>hadoop.security.authorization</name>
+  <value><%=scope.function_hdp_template_var("enable_security_authorization")%></value>
+  <description>
+     Enable authorization for different protocols.
+  </description>
+</property>
+
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>
+        RULE:[2:$1@$0]([jt]t@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("mapred_user")%>/
+        RULE:[2:$1@$0]([nd]n@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hdfs_user")%>/
+        RULE:[2:$1@$0](hm@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hbase_user")%>/
+        RULE:[2:$1@$0](rs@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hbase_user")%>/
+        DEFAULT</value>
+<description>The mapping from kerberos principal names to local OS user names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+  </property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("hive_user")%>.groups</name>
+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
+  <description>
+     Proxy group for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("hive_user")%>.hosts</name>
+  <value><%=scope.function_hdp_host("hive_server_host")%></value>
+  <description>
+     Proxy host for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("oozie_user")%>.groups</name>
+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
+  <description>
+     Proxy group for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("oozie_user")%>.hosts</name>
+  <value><%=scope.function_hdp_host("oozie_server")%></value>
+  <description>
+     Proxy host for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("templeton_user")%>.groups</name>
+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
+  <description>
+    Proxy group for templeton.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("templeton_user")%>.hosts</name>
+  <value><%=scope.function_hdp_host("templeton_server_host")%></value>
+  <description>
+    Proxy host for templeton.
+  </description>
+</property>
+</configuration>

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,89 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME=<%=scope.function_hdp_java_home()%>
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms<%=scope.function_hdp_template_var("namenode_heapsize")%>"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("ttnode_heapsize")%> -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%> ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx128m ${HADOOP_CLIENT_OPTS}"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData ${HADOOP_JAVA_PLATFORM_OPTS}"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=<%=scope.function_hdp_template_var("hdfs_user")%>
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER
+
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$USER
+export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$HADOOP_SECURE_DN_USER
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
+datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
+tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
+datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
+tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-policy.xml.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-policy.xml.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-policy.xml.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-policy.xml.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.submission.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to
+    communciate with the jobtracker for job submission, querying job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.task.umbilical.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+<property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+
+</configuration>

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_taskcontroller {
+  if [ "<%=scope.function_hdp_template_var("security_enabled")%>" == "true" ]; then
+    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
+    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
+      echo "taskcontroller ok"
+    else
+      echo 'check taskcontroller' ; exit 1
+    fi
+  fi
+}
+
+function check_jetty {
+  hname=`hostname`
+  jmx=`curl -s -S -m 5 "http://$hname:50060/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
+  if [ $? -eq 0 ] ; then
+    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
+    e=${e:-0} # no jmx servlet ?
+    if [ $e -gt 10 ] ; then
+      echo "check jetty: shuffle_exceptions=$e" ; exit 1
+    else
+      echo "jetty ok"
+    fi
+  else
+    echo "check jetty: ping failed" ; exit 1
+  fi
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks taskcontroller jetty; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,171 @@
+# Copyright 2011 The Apache Software Foundation
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-queue-acls.xml.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-queue-acls.xml.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-queue-acls.xml.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-queue-acls.xml.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,39 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- mapred-queue-acls.xml -->
+<configuration>
+
+
+<!-- queue default -->
+
+  <property>
+    <name>mapred.queue.default.acl-submit-job</name>
+    <value>*</value>
+  </property>
+
+  <property>
+    <name>mapred.queue.default.acl-administer-jobs</name>
+    <value>*</value>
+  </property>
+
+  <!-- END ACLs -->
+
+</configuration>

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-site.xml.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-site.xml.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-site.xml.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/mapred-site.xml.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,531 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.sort.mb</name>
+    <value><%=scope.function_hdp_template_var("io_sort_mb")%></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.record.percent</name>
+    <value>.2</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.spill.percent</name>
+    <value><%=scope.function_hdp_template_var("io_sort_spill_percent")%></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.factor</name>
+    <value>100</value>
+    <description>No description</description>
+  </property>
+
+<!-- map/reduce properties -->
+
+<property>
+  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+  <value>250</value>
+  <description>Normally, this is the amount of time before killing
+  processes, and the recommended-default is 5.000 seconds - a value of
+  5000 here.  In this case, we are using it solely to blast tasks before
+  killing them, and killing them very quickly (1/4 second) to guarantee
+  that we do not leave VMs around for later jobs.
+  </description>
+</property>
+
+  <property>
+    <name>mapred.job.tracker.handler.count</name>
+    <value>50</value>
+    <description>
+    The number of server threads for the JobTracker. This should be roughly
+    4% of the number of tasktracker nodes.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.system.dir</name>
+    <value><%=scope.function_hdp_template_var("mapred_system_dir")%></value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker</name>
+    <!-- cluster variant -->
+    <value><%=scope.function_hdp_host("jtnode_host")%>:50300</value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.http.address</name>
+    <!-- cluster variant -->
+    <value><%=scope.function_hdp_host("jtnode_host")%>:50030</value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <!-- cluster specific -->
+    <name>mapred.local.dir</name>
+    <value><%=scope.function_hdp_template_var("mapred_local_dir")%></value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+  <name>mapreduce.cluster.administrators</name>
+  <value> hadoop</value>
+  </property>
+
+  <property>
+    <name>mapred.reduce.parallel.copies</name>
+    <value>30</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.map.tasks.maximum</name>
+    <value><%=scope.function_hdp_template_var("mapred_map_tasks_max")%></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.reduce.tasks.maximum</name>
+    <value><%=scope.function_hdp_template_var("mapred_red_tasks_max")%></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>tasktracker.http.threads</name>
+    <value>50</value>
+  </property>
+
+  <property>
+    <name>mapred.map.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some map tasks
+               may be executed in parallel.</description>
+  </property>
+
+  <property>
+    <name>mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some reduce tasks
+               may be executed in parallel.</description>
+  </property>
+
+  <property>
+    <name>mapred.reduce.slowstart.completed.maps</name>
+    <value>0.05</value>
+  </property>
+
+  <property>
+    <name>mapred.inmem.merge.threshold</name>
+    <value>1000</value>
+    <description>The threshold, in terms of the number of files
+  for the in-memory merge process. When we accumulate threshold number of files
+  we initiate the in-memory merge and spill to disk. A value of 0 or less than
+  0 indicates we want to DON'T have any threshold and instead depend only on
+  the ramfs's memory consumption to trigger the merge.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.job.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>The usage threshold at which an in-memory merge will be
+  initiated, expressed as a percentage of the total memory allocated to
+  storing in-memory map outputs, as defined by
+  mapred.job.shuffle.input.buffer.percent.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.job.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>The percentage of memory to be allocated from the maximum heap
+  size to storing map outputs during the shuffle.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.map.output.compression.codec</name>
+    <value><%=scope.function_hdp_template_var("mapred_map_output_compression_codec")%></value>
+    <description>If the map outputs are compressed, how should they be
+      compressed
+    </description>
+  </property>
+
+<property>
+  <name>mapred.output.compression.type</name>
+  <value>BLOCK</value>
+  <description>If the job outputs are to compressed as SequenceFiles, how should
+               they be compressed? Should be one of NONE, RECORD or BLOCK.
+  </description>
+</property>
+
+
+  <property>
+    <name>mapred.jobtracker.completeuserjobs.maximum</name>
+    <value>0</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.taskScheduler</name>
+    <value><%=scope.function_hdp_template_var("scheduler_name")%></value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.restart.recover</name>
+    <value>false</value>
+    <description>"true" to enable (job) recovery upon restart,
+               "false" to start afresh
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>The percentage of memory- relative to the maximum heap size- to
+  retain map outputs during the reduce. When the shuffle is concluded, any
+  remaining map outputs in memory must consume less than this threshold before
+  the reduce can begin.
+  </description>
+  </property>
+
+ <property>
+  <name>mapreduce.reduce.input.limit</name>
+  <value>10737418240</value>
+  <description>The limit on the input size of the reduce. (This value
+  is 10 Gb.)  If the estimated input size of the reduce is greater than
+  this value, job is failed. A value of -1 means that there is no limit
+  set. </description>
+</property>
+
+
+  <!-- copied from kryptonite configuration -->
+  <property>
+    <name>mapred.compress.map.output</name>
+    <value><%=scope.function_hdp_template_var("mapred_compress_map_output")%></value>
+  </property>
+
+
+  <property>
+    <name>mapred.task.timeout</name>
+    <value>600000</value>
+    <description>The number of milliseconds before a task will be
+  terminated if it neither reads an input, writes an output, nor
+  updates its status string.
+  </description>
+  </property>
+
+  <property>
+    <name>jetty.connector</name>
+    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.task.tracker.task-controller</name>
+    <value><%=scope.function_hdp_template_var("task_controller")%></value>
+   <description>
+     TaskController which is used to launch and manage task execution.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.child.root.logger</name>
+    <value>INFO,TLA</value>
+  </property>
+
+  <property>
+    <name>mapred.child.java.opts</name>
+    <value>-server <%=scope.function_hdp_template_var("mapred_child_java_opts_sz")%> -Djava.net.preferIPv4Stack=true</value>
+
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.cluster.map.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_cluster_map_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.cluster.reduce.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_cluster_red_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.job.map.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_job_map_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.job.reduce.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_job_red_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.cluster.max.map.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_cluster_max_map_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.cluster.max.reduce.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_cluster_max_red_mem_mb")%></value>
+  </property>
+
+<property>
+  <name>mapred.hosts</name>
+  <value><%=scope.function_hdp_template_var("conf_dir")%>/<%=scope.function_hdp_template_var("mapred_hosts_include")%></value>
+</property>
+
+<property>
+  <name>mapred.hosts.exclude</name>
+  <value><%=scope.function_hdp_template_var("conf_dir")%>/<%=scope.function_hdp_template_var("mapred_hosts_exclude")%></value>
+</property>
+
+<property>
+  <name>mapred.max.tracker.blacklists</name>
+  <value>16</value>
+  <description>
+    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+  </description>
+</property>
+
+<property>
+  <name>mapred.healthChecker.script.path</name>
+  <value><%=scope.function_hdp_template_var("conf_dir")%>/health_check</value>
+</property>
+
+<property>
+  <name>mapred.healthChecker.interval</name>
+  <value>135000</value>
+</property>
+
+<property>
+  <name>mapred.healthChecker.script.timeout</name>
+  <value>60000</value>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.active</name>
+  <value>false</value>
+  <description>Indicates if persistency of job status information is
+  active or not.
+  </description>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.hours</name>
+  <value>1</value>
+  <description>The number of hours job status information is persisted in DFS.
+    The job status information will be available after it drops of the memory
+    queue and between jobtracker restarts. With a zero value the job status
+    information is not persisted at all in DFS.
+  </description>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.dir</name>
+  <value><%=scope.function_hdp_template_var("mapred_jobstatus_dir")%></value>
+  <description>The directory where the job status information is persisted
+   in a file system to be available after it drops of the memory queue and
+   between jobtracker restarts.
+  </description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.retirejob.check</name>
+  <value>10000</value>
+</property>
+
+<property>
+  <name>mapred.jobtracker.retirejob.interval</name>
+  <value>0</value>
+</property>
+
+<property>
+  <name>mapred.job.tracker.history.completed.location</name>
+  <value>/mapred/history/done</value>
+  <description>No description</description>
+</property>
+
+<property>
+  <name>mapred.task.maxvmem</name>
+  <value></value>
+  <final>true</final>
+   <description>No description</description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.maxtasks.per.job</name>
+  <value><%=scope.function_hdp_template_var("maxtasks_per_job")%></value>
+  <final>true</final>
+  <description>The maximum number of tasks for a single job.
+  A value of -1 indicates that there is no maximum.  </description>
+</property>
+
+<property>
+  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+  <value>false</value>
+</property>
+
+<property>
+  <name>mapred.userlog.retain.hours</name>
+  <value><%=scope.function_hdp_template_var("mapreduce_userlog_retainhours")%></value>
+</property>
+
+<property>
+  <name>mapred.job.reuse.jvm.num.tasks</name>
+  <value>1</value>
+  <description>
+    How many tasks to run per jvm. If set to -1, there is no limit
+  </description>
+  <final>true</final>
+</property>
+
+<property>
+  <name>mapreduce.jobtracker.kerberos.principal</name>
+  <value>jt/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+  <description>
+      JT user name key.
+ </description>
+</property>
+
+<property>
+  <name>mapreduce.tasktracker.kerberos.principal</name>
+   <value>tt/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+  <description>
+       tt user name key. "_HOST" is replaced by the host name of the task tracker.
+   </description>
+</property>
+
+
+  <property>
+    <name>hadoop.job.history.user.location</name>
+    <value>none</value>
+    <final>true</final>
+  </property>
+
+
+ <property>
+   <name>mapreduce.jobtracker.keytab.file</name>
+   <value><%=scope.function_hdp_template_var("keytab_path")%>/jt.service.keytab</value>
+   <description>
+       The keytab for the jobtracker principal.
+   </description>
+
+</property>
+
+ <property>
+   <name>mapreduce.tasktracker.keytab.file</name>
+   <value><%=scope.function_hdp_template_var("keytab_path")%>/tt.service.keytab</value>
+    <description>The filename of the keytab for the task tracker</description>
+ </property>
+
+ <property>
+   <name>mapreduce.jobtracker.staging.root.dir</name>
+   <value>/user</value>
+ <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
+   name. It is a path in the default file system.</description>
+ </property>
+
+ <property>
+      <name>mapreduce.tasktracker.group</name>
+      <value>hadoop</value>
+      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
+
+ </property>
+
+  <property>
+    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
+    <value>50000000</value>
+    <final>true</final>
+     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+    initialize.
+   </description>
+  </property>
+  <property>
+    <name>mapreduce.history.server.embedded</name>
+    <value>false</value>
+    <description>Should job history server be embedded within Job tracker
+process</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.history.server.http.address</name>
+    <!-- cluster variant -->
+    <value><%=scope.function_hdp_host("jtnode_host")%>:51111</value>
+    <description>Http address of the history server</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.kerberos.principal</name>
+    <!-- cluster variant -->
+  <value>jt/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+    <description>Job history user name key. (must map to same user as JT
+user)</description>
+  </property>
+
+ <property>
+   <name>mapreduce.jobhistory.keytab.file</name>
+    <!-- cluster variant -->
+   <value><%=scope.function_hdp_template_var("keytab_path")%>/jt.service.keytab</value>
+   <description>The keytab for the job history server principal.</description>
+ </property>
+
+<property>
+  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
+  <value>180</value>
+  <description>
+    3-hour sliding window (value is in minutes)
+  </description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
+  <value>15</value>
+  <description>
+    15-minute bucket size (value is in minutes)
+  </description>
+</property>
+
+<property>
+  <name>mapred.queue.names</name>
+  <value>default</value>
+  <description> Comma separated list of queues configured for this jobtracker.</description>
+</property>
+
+</configuration>

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,3 @@
+<%h=scope.function_hdp_host("slave_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
+<%= host %>
+<%end-%>

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb Thu Sep 20 00:27:02 2012
@@ -0,0 +1,20 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir=<%=scope.function_hdp_template_var("mapred_local_dir")%>
+mapreduce.tasktracker.group=hadoop
+hadoop.log.dir=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/security.py
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/security.py?rev=1387826&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/security.py (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/security.py Thu Sep 20 00:27:02 2012
@@ -0,0 +1,65 @@
+import httplib
+import urllib2
+from urllib2 import Request
+import socket
+import ssl
+import os
+import logging
+from subprocess import Popen, PIPE
+import AmbariConfig
+
+logger = logging.getLogger()
+
+GEN_AGENT_KEY="openssl req -new -newkey rsa:1024 -nodes -keyout %(keysdir)s/%(hostname)s.key\
+	-subj /OU=%(hostname)s/\
+        -out %(keysdir)s/%(hostname)s.csr"
+
+class CertificateManager():
+    def __init__(self, config):
+        self.config = config
+        self.keysdir = self.config.get('security', 'keysdir')
+        self.server_crt=self.config.get('security', 'server_crt')
+    def getAgentKeyName(self):
+        return self.keysdir + os.sep + socket.gethostname() + ".key"
+    def getAgentCrtName(self):
+        return self.keysdir + os.sep + socket.gethostname() + ".key"
+    def getSrvrCrtName(self):
+        return self.keysdir + os.sep + "ca.crt"
+        
+    def checkCertExists(self):
+        
+        server_crt_exists = os.path.exists(self.getSrvrCrtName())
+        
+        if not server_crt_exists:
+            logger.info("Server certicate not exists, downloading")
+            self.loadSrvrCrt()
+        else:
+            logger.info("Server certicate exists, ok")
+            
+        agent_crt_exists = os.path.exists(self.getAgentCrtName())
+        
+        logger.info(self.getAgentCrtName())
+        
+        if not agent_crt_exists:
+            logger.info("Agent certicate not exists, generating request")
+            self.genAgentCrtReq()
+        else:
+            logger.info("Agent certicate exists, ok")
+            
+        
+    def loadSrvrCrt(self):
+      get_ca_url = self.config.get('server', 'url') + '/cert/ca/'
+      stream = urllib2.urlopen(get_ca_url)
+      response = stream.read()
+      stream.close()
+      srvr_crt_f = open(self.getSrvrCrtName(), 'w+')
+      srvr_crt_f.write(response)
+      
+    def genAgentCrtReq(self):
+        generate_script = GEN_AGENT_KEY % {'hostname': socket.gethostname(),
+                                           'keysdir' : self.config.get('security', 'keysdir')}
+        logger.info(generate_script)
+        pp = Popen([generate_script], shell=True, stdout=PIPE)
+
+    def initSecurity(self):
+        self.checkCertExists()
\ No newline at end of file

Modified: incubator/ambari/branches/AMBARI-666/ambari-project/pom.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-project/pom.xml?rev=1387826&r1=1387825&r2=1387826&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-project/pom.xml (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-project/pom.xml Thu Sep 20 00:27:02 2012
@@ -110,6 +110,46 @@
         <version>10.9.1.0</version>
       </dependency>
       <dependency>
+        <groupId>org.springframework.security</groupId>
+        <artifactId>spring-security-core</artifactId>
+        <version>3.1.2.RELEASE</version>
+      </dependency>
+      <dependency>
+        <groupId>org.springframework.security</groupId>
+        <artifactId>spring-security-config</artifactId>
+        <version>3.1.2.RELEASE</version>
+      </dependency>
+      <dependency>
+        <groupId>org.springframework.security</groupId>
+        <artifactId>spring-security-web</artifactId>
+        <version>3.1.2.RELEASE</version>
+      </dependency>
+      <dependency>
+        <groupId>org.springframework.security</groupId>
+        <artifactId>spring-security-ldap</artifactId>
+        <version>3.1.2.RELEASE</version>
+      </dependency>
+      <dependency>
+        <groupId>org.springframework.ldap</groupId>
+        <artifactId>spring-ldap-core</artifactId>
+        <version>1.3.1.RELEASE</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.directory.server</groupId>
+        <artifactId>apacheds-all</artifactId>
+        <version>1.5.5</version>
+      </dependency>
+      <dependency>
+        <groupId>org.slf4j</groupId>
+        <artifactId>slf4j-api</artifactId>
+        <version>1.6.6</version>
+      </dependency>
+      <dependency>
+        <groupId>org.slf4j</groupId>
+        <artifactId>slf4j-log4j12</artifactId>
+        <version>1.0.1</version>
+      </dependency>
+      <dependency>
         <groupId>org.eclipse.persistence</groupId>
         <artifactId>eclipselink</artifactId>
         <version>2.4.0</version>

Modified: incubator/ambari/branches/AMBARI-666/ambari-server/pom.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/pom.xml?rev=1387826&r1=1387825&r2=1387826&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/pom.xml (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/pom.xml Thu Sep 20 00:27:02 2012
@@ -77,6 +77,46 @@
       <artifactId>derby</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-config</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-web</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-ldap</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.ldap</groupId>
+      <artifactId>spring-ldap-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.directory.server</groupId>
+      <artifactId>apacheds-all</artifactId>
+    </dependency>
+    <!--<dependency>-->
+      <!--<groupId>org.apache.directory.shared</groupId>-->
+      <!--<artifactId>shared-ldap</artifactId>-->
+    <!--</dependency>-->
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+    </dependency>
+    <dependency>
+    <groupId>log4j</groupId>
+    <artifactId>log4j</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.eclipse.persistence</groupId>
       <artifactId>eclipselink</artifactId>
     </dependency>

Modified: incubator/ambari/branches/AMBARI-666/ambari-server/src/main/assemblies/server.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/main/assemblies/server.xml?rev=1387826&r1=1387825&r2=1387826&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/main/assemblies/server.xml (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/main/assemblies/server.xml Thu Sep 20 00:27:02 2012
@@ -28,6 +28,12 @@
       <source>${project.build.directory}/${artifact.artifactId}-${artifact.version}.jar</source>
       <outputDirectory>ambari-server-${project.version}/lib</outputDirectory>
     </file>
+
+    <file>
+      <source>${basedir}/src/main/resources/pass.txt</source>
+      <outputDirectory>/ambari-server-${project.version}</outputDirectory>
+    </file>
+
   </files>
   <fileSets>
     <!-- Distro files, readme, licenses, etc -->

Modified: incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java?rev=1387826&r1=1387825&r2=1387826&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java Thu Sep 20 00:27:02 2012
@@ -21,26 +21,57 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Properties;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import com.google.inject.Singleton;
+
 
 /**
  * Ambari configuration.
  * Reads properties from ambari.properties
  */
+@Singleton
 public class Configuration {
+
   private static final String AMBARI_CONF_VAR = "AMBARI_CONF_DIR";
   private static final String CONFIG_FILE = "ambari.properties";
   public static final String BOOTSTRAP_DIR = "bootstrap.dir";
   public static final String BOOTSTRAP_SCRIPT = "bootstrap.script";
+  public static final String SRVR_KSTR_DIR_KEY = "security.server.keys_dir";
+  public static final String SRVR_CRT_NAME_KEY = "security.server.cert_name";
+  public static final String SRVR_KEY_NAME_KEY = "security.server.key_name";
+  public static final String KSTR_NAME_KEY = "security.server.keystore_name";
+  public static final String SRVR_CRT_PASS_FILE_KEY = "security.server.crt_pass_file";
+  public static final String SRVR_CRT_PASS_KEY = "security.server.crt_pass";
+  public static final String CLIENT_SECURITY_KEY = "client.security";
+  private static final String SRVR_KSTR_DIR_DEFAULT = ".";
+  private static final String SRVR_CRT_NAME_DEFAULT = "ca.crt";
+  private static final String SRVR_KEY_NAME_DEFAULT = "ca.key";
+  private static final String KSTR_NAME_DEFAULT = "keystore.p12";
+  private static final String SRVR_CRT_PASS_FILE_DEFAULT ="pass.txt";
+  private static final String CLIENT_SECURITY_DEFAULT = "local";
+
 
+
+  
   private static final Log LOG = LogFactory.getLog(Configuration.class);
 
+  private static Configuration instance;
+
   private Properties properties;
-  
+
+
+  private Map<String, String> configsMap;
+
+
   Configuration() {
     this(readConfigFile());
   }
@@ -52,6 +83,29 @@ public class Configuration {
    */
   public Configuration(Properties properties) {
     this.properties = properties;
+
+    configsMap = new HashMap<String, String>();
+    configsMap.put(SRVR_KSTR_DIR_KEY, properties.getProperty(SRVR_KSTR_DIR_KEY, SRVR_KSTR_DIR_DEFAULT));
+    configsMap.put(SRVR_KSTR_DIR_KEY, properties.getProperty(SRVR_KSTR_DIR_KEY, SRVR_KSTR_DIR_DEFAULT));
+    configsMap.put(SRVR_CRT_NAME_KEY, properties.getProperty(SRVR_CRT_NAME_KEY, SRVR_CRT_NAME_DEFAULT));
+    configsMap.put(SRVR_KEY_NAME_KEY, properties.getProperty(SRVR_KEY_NAME_KEY, SRVR_KEY_NAME_DEFAULT));
+    configsMap.put(KSTR_NAME_KEY, properties.getProperty(KSTR_NAME_KEY, KSTR_NAME_DEFAULT));
+    configsMap.put(SRVR_CRT_PASS_FILE_KEY, properties.getProperty(SRVR_CRT_PASS_FILE_KEY, SRVR_CRT_PASS_FILE_DEFAULT));
+    configsMap.put(CLIENT_SECURITY_KEY, properties.getProperty(CLIENT_SECURITY_KEY, CLIENT_SECURITY_DEFAULT));
+
+    try {
+        File passFile = new File(configsMap.get(SRVR_KSTR_DIR_KEY) + File.separator 
+            + configsMap.get(SRVR_CRT_PASS_FILE_KEY));
+        if (passFile.exists()) {
+          String srvrCrtPass = FileUtils.readFileToString(passFile);
+          configsMap.put(SRVR_CRT_PASS_KEY, srvrCrtPass.trim());
+        } else {
+          LOG.info("Not found pass file at " + passFile);
+        }
+      } catch (IOException e) {
+      e.printStackTrace();
+      throw new RuntimeException("Error reading certificate password from file");
+    }
   }
 
   /**
@@ -75,11 +129,11 @@ public class Configuration {
       LOG.info("No configuration file " + filename + " found.", fnf);
     } catch (IOException ie) {
       throw new IllegalArgumentException("Can't read configuration file " +
-                                         filename, ie);
+          filename, ie);
     }
     return properties;
   }
-  
+
   public File getBootStrapDir() {
     String fileName = properties.getProperty(BOOTSTRAP_DIR);
     if (fileName == null) {
@@ -87,7 +141,7 @@ public class Configuration {
     }
     return new File(fileName);
   }
-  
+
   public String getBootStrapScript() {
     String bootscript = properties.getProperty(BOOTSTRAP_SCRIPT);
     if (bootscript == null) {
@@ -95,4 +149,14 @@ public class Configuration {
     }
     return bootscript;
   }
+  
+  /**
+   * Get the map with server config parameters.
+   * Keys - public constants of this class
+   * @return the map with server config parameters
+   */
+  public Map<String, String> getConfigsMap() {
+    return configsMap;
+  }
+
 }



Mime
View raw message