incubator-ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From maha...@apache.org
Subject svn commit: r1400790 [1/2] - in /incubator/ambari/branches/AMBARI-666: ./ ambari-agent/src/main/puppet/modules/configgenerator/manifests/ ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/ ambari-agent/src/main/puppet/modules/hdp-hcat-old/manif...
Date Mon, 22 Oct 2012 07:44:07 GMT
Author: mahadev
Date: Mon Oct 22 07:44:06 2012
New Revision: 1400790

URL: http://svn.apache.org/viewvc?rev=1400790&view=rev
Log:
AMBARI-877. Refactor resource provider implementation for changes to management interface. (Tom Beerbower via mahadev)

Added:
    incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
    incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleSuccessCriteriaDAO.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StageDAO.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExecutionCommandEntity.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntity.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntityPK.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntityPK.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/state/RepositoryInfo.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/resources/Ambari-DDL.sql
Modified:
    incubator/ambari/branches/AMBARI-666/AMBARI-666-CHANGES.txt
    incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
    incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
    incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp
    incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp
    incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp
    incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp
    incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderImpl.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterEntity.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostEntity.java
    incubator/ambari/branches/AMBARI-666/ambari-server/src/main/resources/META-INF/persistence.xml
    incubator/ambari/branches/AMBARI-666/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ResourceProviderImplTest.java

Modified: incubator/ambari/branches/AMBARI-666/AMBARI-666-CHANGES.txt
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/AMBARI-666-CHANGES.txt?rev=1400790&r1=1400789&r2=1400790&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/AMBARI-666-CHANGES.txt (original)
+++ incubator/ambari/branches/AMBARI-666/AMBARI-666-CHANGES.txt Mon Oct 22 07:44:06 2012
@@ -12,6 +12,9 @@ AMBARI-666 branch (unreleased changes)
 
   NEW FEATURES
 
+  AMBARI-877. Refactor resource provider implementation for changes to
+  management interface. (Tom Beerbower via mahadev)
+
   AMBARI-876. Put metrics under metrics category. (Tom Beerbower via 
   mahadev)
 

Modified: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp?rev=1400790&r1=1400789&r2=1400790&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp Mon Oct 22 07:44:06 2012
@@ -1,61 +1,61 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-#
-# Generates xml configs from the given key-value hash maps
-#
-# Config file format:
-#
-# <configuration>
-#   <property>
-#     <name>name1</name><value>value1</value>
-#   </property>
-#     ..
-#   <property>
-#     <name>nameN</name><value>valueN</value>
-#   </property>
-# </configuration>
-#
-# Params:
-# - configname - name of the config file (class title by default)
-# - modulespath - modules path ('/etc/puppet/modules' by default)
-# - module - module name
-# - properties - set of the key-value pairs (puppet hash) which corresponds to property name - property value pairs of config file
-#
-# Note: Set correct $modulespath in the configgenerator (or pass it as parameter)
-#
-
-define configgenerator::configfile ($modulespath='/etc/puppet/modules', $filename, $module, $configuration) {
-  $configcontent = inline_template('<configuration>
-  <% configuration.props.each do |key,value| -%>
-  <property>
-    <name><%=key %></name>
-    <value><%=value %></value>
-  </property>
-  <% end -%>
-</configuration>')
- 
-file {'config':
-  ensure  => present,
-  content => $configcontent,
-  path => "${modulespath}/${module}/templates/${filename}",
-}
-} 
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+#
+# Generates xml configs from the given key-value hash maps
+#
+# Config file format:
+#
+# <configuration>
+#   <property>
+#     <name>name1</name><value>value1</value>
+#   </property>
+#     ..
+#   <property>
+#     <name>nameN</name><value>valueN</value>
+#   </property>
+# </configuration>
+#
+# Params:
+# - configname - name of the config file (class title by default)
+# - modulespath - modules path ('/etc/puppet/modules' by default)
+# - module - module name
+# - properties - set of the key-value pairs (puppet hash) which corresponds to property name - property value pairs of config file
+#
+# Note: Set correct $modulespath in the configgenerator (or pass it as parameter)
+#
+
+define configgenerator::configfile ($modulespath='/etc/puppet/modules', $filename, $module, $configuration) {
+  $configcontent = inline_template('<configuration>
+  <% configuration.each do |key,value| -%>
+  <property>
+    <name><%=key %></name>
+    <value><%=value %></value>
+  </property>
+  <% end -%>
+</configuration>')
+ 
+file {'config':
+  ensure  => present,
+  content => $configcontent,
+  path => "${modulespath}/${module}/templates/${filename}",
+}
+} 

Modified: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp?rev=1400790&r1=1400789&r2=1400790&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp Mon Oct 22 07:44:06 2012
@@ -1,277 +1,282 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp-hadoop and still use include
-define hdp-hadoop::common(
-  $service_states = []
-)
-{
-  class { 'hdp-hadoop':
-    service_states => $service_states    
-  }
-  anchor{'hdp-hadoop::common::begin':} -> Class['hdp-hadoop'] -> anchor{'hdp-hadoop::common::end':} 
-}
-
-class hdp-hadoop::initialize()
-{
-  if ($hdp::params::component_exists['hdp-hadoop'] == true) {
-  } else {
-    $hdp::params::component_exists['hdp-hadoop'] = true
-  }
-  hdp-hadoop::common { 'common':}
-  anchor{'hdp-hadoop::initialize::begin':} -> Hdp-hadoop::Common['common'] -> anchor{'hdp-hadoop::initialize::end':}
-
-# Configs generation  
-# site.pp must have following configurations: 
-# hdp_hadoop__mapred_queue_acls
-# hdp_hadoop__hadoop_policy
-# hdp_hadoop__core_site
-# hdp_hadoop__mapred_site
-# hdp_hadoop__capacity_scheduler
-# hdp_hadoop__hdfs_site
-  
-  configgenerator::configfile{'mapred_queue_acls_xml': 
-    filename => 'mapred-queue-acls.xml',
-    module => 'hdp-hadoop',
-    configuration => $configuration['hdp_hadoop__mapred_queue_acls']
-  }
-  
-  configgenerator::configfile{'hadoop_policy_xml': 
-    filename => 'hadoop-policy.xml',
-    module => 'hdp-hadoop',
-    configuration => $configuration['hdp_hadoop__hadoop_policy']
-  }
-  
-  configgenerator::configfile{'core_site_xml': 
-    filename => 'core-site.xml',
-    module => 'hdp-hadoop',
-    configuration => $configuration['hdp_hadoop__core_site']
-  }
-
-  configgenerator::configfile{'mapred_site_xml': 
-    filename => 'mapred-site.xml',
-    module => 'hdp-hadoop',
-    configuration => $configuration['hdp_hadoop__mapred_site']
-  }
-  
-  configgenerator::configfile{'capacity_scheduler_xml': 
-    filename => 'capacity-scheduler.xml',
-    module => 'hdp-hadoop',
-    configuration => $configuration['hdp_hadoop__capacity_scheduler']
-  }
-
-  configgenerator::configfile{'hdfs_site_xml': 
-    filename => 'hdfs-site.xml',
-    module => 'hdp-hadoop',
-    configuration => $configuration['hdp_hadoop__hdfs_site']
-  }
-}
-
-class hdp-hadoop(
-  $service_states  = []
-)
-{
-  include hdp-hadoop::params
-  $hadoop_config_dir = $hdp-hadoop::params::conf_dir
-  $mapred_user = $hdp-hadoop::params::mapred_user  
-  $hdfs_user = $hdp-hadoop::params::hdfs_user  
-
-  anchor{'hdp-hadoop::begin':} 
-  anchor{'hdp-hadoop::end':} 
-
-  if ('uninstalled' in $service_states) {
-    hdp-hadoop::package { 'hadoop':
-      ensure => 'uninstalled'
-    }
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> -> Hdp::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp-hadoop::end']
-  } else {
-    
-    hdp-hadoop::package { 'hadoop':}
-
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $service_state,
-      force => true
-    }
- 
-    hdp::user{ $hdfs_user:}
-    hdp::user { $mapred_user:}
-
-    $logdirprefix = $hdp-hadoop::params::hadoop_logdirprefix
-    hdp::directory_recursive_create { $logdirprefix: 
-        owner => 'root'
-    }
-    $piddirprefix = $hdp-hadoop::params::hadoop_piddirprefix
-    hdp::directory_recursive_create { $piddirprefix: 
-        owner => 'root'
-    }
- 
-    #taskcontroller.cfg properties conditional on security
-    if ($hdp::params::security_enabled == true) {
-      file { "${hdp::params::hadoop_bin}/task-controller":
-        owner   => 'root',
-        group   => $hdp::params::hadoop_user_group,
-        mode    => '6050',
-        require => Hdp-hadoop::Package['hadoop'],
-        before  => Anchor['hdp-hadoop::end']
-      }
-      $tc_owner = 'root'
-      $tc_mode = '0400'
-    } else {
-      $tc_owner = $hdfs_user
-      $tc_mode = undef
-    }
-    hdp-hadoop::configfile { 'taskcontroller.cfg' :
-      tag   => 'common',
-      owner => $tc_owner,
-      mode  => $tc_mode
-    }
-
-    $template_files = ['hadoop-env.sh','core-site.xml','hadoop-policy.xml','health_check','capacity-scheduler.xml','commons-logging.properties','log4j.properties','mapred-queue-acls.xml','slaves']
-    hdp-hadoop::configfile { $template_files:
-      tag   => 'common', 
-      owner => $hdfs_user
-    }
-    
-    hdp-hadoop::configfile { 'hadoop-metrics2.properties' : 
-      tag   => 'common', 
-      owner => $hdfs_user,
-    }
-
-    hdp-hadoop::configfile { 'mapred-site.xml': 
-      tag => 'common', 
-      owner => $mapred_user
-    }
-
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::Directory_recursive_create[$hadoop_config_dir] ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|> 
-    -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Anchor['hdp-hadoop::end']
-    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$logdirprefix] -> Anchor['hdp-hadoop::end']
-    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$piddirprefix] -> Anchor['hdp-hadoop::end']
-  }
-}
-
-class hdp-hadoop::enable-ganglia()
-{
-  Hdp-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
-}
-
-###config file helper
-define hdp-hadoop::configfile(
-  $owner = undef,
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir,
-  $mode = undef,
-  $namenode_host = undef,
-  $jtnode_host = undef,
-  $snamenode_host = undef,
-  $template_tag = undef,
-  $size = undef, #TODO: deprecate
-  $sizes = []
-) 
-{
-  #TODO: may need to be fixed 
-  if ($jtnode_host == undef) {
-    $calc_jtnode_host = $namenode_host
-  } else {
-    $calc_jtnode_host = $jtnode_host 
-  }
- 
-  #only set 32 if theer is a 32 bit component and no 64 bit components
-  if (64 in $sizes) {
-    $common_size = 64
-  } elsif (32 in $sizes) {
-    $common_size = 32
-  } else {
-    $common_size = 6
-  }
-  
-  hdp::configfile { "${hadoop_conf_dir}/${name}":
-    component      => 'hadoop',
-    owner          => $owner,
-    mode           => $mode,
-    namenode_host  => $namenode_host,
-    snamenode_host => $snamenode_host,
-    jtnode_host    => $calc_jtnode_host,
-    template_tag   => $template_tag,
-    size           => $common_size
-  }
-}
-
-#####
-define hdp-hadoop::exec-hadoop(
-  $command,
-  $unless = undef,
-  $refreshonly = undef,
-  $echo_yes = false,
-  $kinit_override = false,
-  $tries = 1,
-  $timeout = 900,
-  $try_sleep = undef,
-  $user = undef,
-  $logoutput = undef
-)
-{
-  include hdp-hadoop::params
-  $security_enabled = $hdp::params::security_enabled
-  $conf_dir = $hdp-hadoop::params::conf_dir
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-
-  if ($user == undef) {
-    $run_user = $hdfs_user
-  } else {
-    $run_user = $user
-  }
-
-  if (($security_enabled == true) and ($kinit_override == false)) {
-    #TODO: may figure out so dont need to call kinit if auth in caceh already
-    if ($run_user in [$hdfs_user,'root']) {
-      $keytab = "${hdp-hadoop::params::keytab_path}/${hdfs_user}.headless.keytab"
-      $principal = $hdfs_user
-    } else {
-      $keytab = "${hdp-hadoop::params::keytab_path}/${user}.headless.keytab" 
-      $principal = $user
-    }
-    $kinit_if_needed = "/usr/kerberos/bin/kinit  -kt ${keytab} ${principal}; "
-  } else {
-    $kinit_if_needed = ""
-  }
- 
-  if ($echo_yes == true) {
-    $cmd = "${kinit_if_needed}yes Y | hadoop --config ${conf_dir} ${command}"
-  } else {
-    $cmd = "${kinit_if_needed}hadoop --config ${conf_dir} ${command}"
-  }
-
-  hdp::exec { $cmd:
-    command     => $cmd,
-    user        => $run_user,
-    unless      => $unless,
-    refreshonly => $refreshonly,
-    tries       => $tries,
-    timeout     => $timeout,
-    try_sleep   => $try_sleep,
-    logoutput   => $logoutput
-  }
-}
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp-hadoop and still use include
+define hdp-hadoop::common(
+  $service_states = []
+)
+{
+  class { 'hdp-hadoop':
+    service_states => $service_states    
+  }
+  anchor{'hdp-hadoop::common::begin':} -> Class['hdp-hadoop'] -> anchor{'hdp-hadoop::common::end':} 
+}
+
+class hdp-hadoop::initialize()
+{
+  if ($hdp::params::component_exists['hdp-hadoop'] == true) {
+  } else {
+    $hdp::params::component_exists['hdp-hadoop'] = true
+  }
+  hdp-hadoop::common { 'common':}
+  anchor{'hdp-hadoop::initialize::begin':} -> Hdp-hadoop::Common['common'] -> anchor{'hdp-hadoop::initialize::end':}
+
+# Configs generation  
+
+  if has_key($configuration, 'hdp_hadoop__mapred_queue_acls') {
+    configgenerator::configfile{'mapred_queue_acls_xml': 
+      filename => 'mapred-queue-acls.xml',
+      module => 'hdp-hadoop',
+      configuration => $configuration['hdp_hadoop__mapred_queue_acls']
+    }
+  }
+  
+  if has_key($configuration, 'hdp_hadoop__hadoop_policy') {
+    configgenerator::configfile{'hadoop_policy_xml': 
+      filename => 'hadoop-policy.xml',
+      module => 'hdp-hadoop',
+      configuration => $configuration['hdp_hadoop__hadoop_policy']
+    }
+  }
+  
+  if has_key($configuration, 'hdp_hadoop__core_site') {
+      configgenerator::configfile{'core_site_xml': 
+        filename => 'core-site.xml',
+        module => 'hdp-hadoop',
+        configuration => $configuration['hdp_hadoop__core_site']
+      }
+    }
+
+  if has_key($configuration, 'hdp_hadoop__mapred_site') {
+    configgenerator::configfile{'mapred_site_xml': 
+      filename => 'mapred-site.xml',
+      module => 'hdp-hadoop',
+      configuration => $configuration['hdp_hadoop__mapred_site']
+    }
+  }
+  
+  if has_key($configuration, 'hdp_hadoop__capacity_scheduler') {
+    configgenerator::configfile{'capacity_scheduler_xml': 
+      filename => 'capacity-scheduler.xml',
+      module => 'hdp-hadoop',
+      configuration => $configuration['hdp_hadoop__capacity_scheduler']
+    }
+  }
+
+  if has_key($configuration, 'hdp_hadoop__hdfs_site') {
+    configgenerator::configfile{'hdfs_site_xml': 
+      filename => 'hdfs-site.xml',
+      module => 'hdp-hadoop',
+      configuration => $configuration['hdp_hadoop__hdfs_site']
+    }
+  }
+}
+
+class hdp-hadoop(
+  $service_states  = []
+)
+{
+  include hdp-hadoop::params
+  $hadoop_config_dir = $hdp-hadoop::params::conf_dir
+  $mapred_user = $hdp-hadoop::params::mapred_user  
+  $hdfs_user = $hdp-hadoop::params::hdfs_user  
+
+  anchor{'hdp-hadoop::begin':} 
+  anchor{'hdp-hadoop::end':} 
+
+  if ('uninstalled' in $service_states) {
+    hdp-hadoop::package { 'hadoop':
+      ensure => 'uninstalled'
+    }
+
+    hdp::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> -> Hdp::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp-hadoop::end']
+  } else {
+    
+    hdp-hadoop::package { 'hadoop':}
+
+
+    hdp::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+ 
+    hdp::user{ $hdfs_user:}
+    hdp::user { $mapred_user:}
+
+    $logdirprefix = $hdp-hadoop::params::hadoop_logdirprefix
+    hdp::directory_recursive_create { $logdirprefix: 
+        owner => 'root'
+    }
+    $piddirprefix = $hdp-hadoop::params::hadoop_piddirprefix
+    hdp::directory_recursive_create { $piddirprefix: 
+        owner => 'root'
+    }
+ 
+    #taskcontroller.cfg properties conditional on security
+    if ($hdp::params::security_enabled == true) {
+      file { "${hdp::params::hadoop_bin}/task-controller":
+        owner   => 'root',
+        group   => $hdp::params::hadoop_user_group,
+        mode    => '6050',
+        require => Hdp-hadoop::Package['hadoop'],
+        before  => Anchor['hdp-hadoop::end']
+      }
+      $tc_owner = 'root'
+      $tc_mode = '0400'
+    } else {
+      $tc_owner = $hdfs_user
+      $tc_mode = undef
+    }
+    hdp-hadoop::configfile { 'taskcontroller.cfg' :
+      tag   => 'common',
+      owner => $tc_owner,
+      mode  => $tc_mode
+    }
+
+    $template_files = ['hadoop-env.sh','core-site.xml','hadoop-policy.xml','health_check','capacity-scheduler.xml','commons-logging.properties','log4j.properties','mapred-queue-acls.xml','slaves']
+    hdp-hadoop::configfile { $template_files:
+      tag   => 'common', 
+      owner => $hdfs_user
+    }
+    
+    hdp-hadoop::configfile { 'hadoop-metrics2.properties' : 
+      tag   => 'common', 
+      owner => $hdfs_user,
+    }
+
+    hdp-hadoop::configfile { 'mapred-site.xml': 
+      tag => 'common', 
+      owner => $mapred_user
+    }
+
+    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::Directory_recursive_create[$hadoop_config_dir] ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|> 
+    -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Anchor['hdp-hadoop::end']
+    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$logdirprefix] -> Anchor['hdp-hadoop::end']
+    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$piddirprefix] -> Anchor['hdp-hadoop::end']
+  }
+}
+
+class hdp-hadoop::enable-ganglia()
+{
+  Hdp-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
+}
+
+###config file helper
+define hdp-hadoop::configfile(
+  $owner = undef,
+  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir,
+  $mode = undef,
+  $namenode_host = undef,
+  $jtnode_host = undef,
+  $snamenode_host = undef,
+  $template_tag = undef,
+  $size = undef, #TODO: deprecate
+  $sizes = []
+) 
+{
+  #TODO: may need to be fixed 
+  if ($jtnode_host == undef) {
+    $calc_jtnode_host = $namenode_host
+  } else {
+    $calc_jtnode_host = $jtnode_host 
+  }
+ 
+  #only set 32 if theer is a 32 bit component and no 64 bit components
+  if (64 in $sizes) {
+    $common_size = 64
+  } elsif (32 in $sizes) {
+    $common_size = 32
+  } else {
+    $common_size = 6
+  }
+  
+  hdp::configfile { "${hadoop_conf_dir}/${name}":
+    component      => 'hadoop',
+    owner          => $owner,
+    mode           => $mode,
+    namenode_host  => $namenode_host,
+    snamenode_host => $snamenode_host,
+    jtnode_host    => $calc_jtnode_host,
+    template_tag   => $template_tag,
+    size           => $common_size
+  }
+}
+
+#####
+define hdp-hadoop::exec-hadoop(
+  $command,
+  $unless = undef,
+  $refreshonly = undef,
+  $echo_yes = false,
+  $kinit_override = false,
+  $tries = 1,
+  $timeout = 900,
+  $try_sleep = undef,
+  $user = undef,
+  $logoutput = undef
+)
+{
+  include hdp-hadoop::params
+  $security_enabled = $hdp::params::security_enabled
+  $conf_dir = $hdp-hadoop::params::conf_dir
+  $hdfs_user = $hdp-hadoop::params::hdfs_user
+
+  if ($user == undef) {
+    $run_user = $hdfs_user
+  } else {
+    $run_user = $user
+  }
+
+  if (($security_enabled == true) and ($kinit_override == false)) {
+    #TODO: may figure out so dont need to call kinit if auth in caceh already
+    if ($run_user in [$hdfs_user,'root']) {
+      $keytab = "${hdp-hadoop::params::keytab_path}/${hdfs_user}.headless.keytab"
+      $principal = $hdfs_user
+    } else {
+      $keytab = "${hdp-hadoop::params::keytab_path}/${user}.headless.keytab" 
+      $principal = $user
+    }
+    $kinit_if_needed = "/usr/kerberos/bin/kinit  -kt ${keytab} ${principal}; "
+  } else {
+    $kinit_if_needed = ""
+  }
+ 
+  if ($echo_yes == true) {
+    $cmd = "${kinit_if_needed}yes Y | hadoop --config ${conf_dir} ${command}"
+  } else {
+    $cmd = "${kinit_if_needed}hadoop --config ${conf_dir} ${command}"
+  }
+
+  hdp::exec { $cmd:
+    command     => $cmd,
+    user        => $run_user,
+    unless      => $unless,
+    refreshonly => $refreshonly,
+    tries       => $tries,
+    timeout     => $timeout,
+    try_sleep   => $try_sleep,
+    logoutput   => $logoutput
+  }
+}

Modified: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp?rev=1400790&r1=1400789&r2=1400790&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hcat-old/manifests/init.pp Mon Oct 22 07:44:06 2012
@@ -1,72 +1,72 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat(
-  $server = false
-) 
-{
-  include hdp-hcat::params
-
-# Configs generation  
-# site.pp must have following configurations: 
-# hdp_hcat_old__hive_site
-
-  configgenerator::configfile{'hive_site_xml': 
-    filename => 'hive-site.xml',
-    module => 'hdp-hcat-old',
-    configuration => $configuration['hdp_hcat_old__hive_site']
-  }
-
-  $hcat_user = $hdp::params::hcat_user
-  $hcat_config_dir = $hdp-hcat::params::hcat_conf_dir
- 
-  hdp::package { 'hcat-base' : }
-  if ($server == true ) {
-    hdp::package { 'hcat-server':} 
-    class { 'hdp-hcat::mysql-connector': }
-  }
-  
-  hdp::user{ $hcat_user:}
-  
-  hdp::directory { $hcat_config_dir: }
-
-  hdp-hcat::configfile { ['hcat-env.sh','hive-env.sh','hive-site.xml']: }
-  
-  anchor { 'hdp-hcat::begin': } -> Hdp::Package['hcat-base'] -> Hdp::User[$hcat_user] -> 
-   Hdp::Directory[$hcat_config_dir] -> Hdp-hcat::Configfile<||> ->  anchor { 'hdp-hcat::end': }
-
-   if ($server == true ) {
-     Hdp::Package['hcat-base'] -> Hdp::Package['hcat-server'] ->  Hdp::User[$hcat_user] -> Class['hdp-hcat::mysql-connector'] -> Anchor['hdp-hcat::end']
-  }
-}
-
-### config files
-define hdp-hcat::configfile(
-  $mode = undef,
-  $hcat_server_host = undef
-) 
-{
-  hdp::configfile { "${hdp-hcat::params::hcat_conf_dir}/${name}":
-    component        => 'hcat',
-    owner            => $hdp::params::hcat_user,
-    mode             => $mode,
-    hcat_server_host => $hcat_server_host 
-  }
-}
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hcat(
+  $server = false
+) 
+{
+  include hdp-hcat::params
+
+# Configs generation  
+
+  if has_key($configuration, 'hdp_hcat_old__hive_site') {
+    configgenerator::configfile{'hive_site_xml': 
+      filename => 'hive-site.xml',
+      module => 'hdp-hcat-old',
+      configuration => $configuration['hdp_hcat_old__hive_site']
+    }
+  }
+
+  $hcat_user = $hdp::params::hcat_user
+  $hcat_config_dir = $hdp-hcat::params::hcat_conf_dir
+ 
+  hdp::package { 'hcat-base' : }
+  if ($server == true ) {
+    hdp::package { 'hcat-server':} 
+    class { 'hdp-hcat::mysql-connector': }
+  }
+  
+  hdp::user{ $hcat_user:}
+  
+  hdp::directory { $hcat_config_dir: }
+
+  hdp-hcat::configfile { ['hcat-env.sh','hive-env.sh','hive-site.xml']: }
+  
+  anchor { 'hdp-hcat::begin': } -> Hdp::Package['hcat-base'] -> Hdp::User[$hcat_user] -> 
+   Hdp::Directory[$hcat_config_dir] -> Hdp-hcat::Configfile<||> ->  anchor { 'hdp-hcat::end': }
+
+   if ($server == true ) {
+     Hdp::Package['hcat-base'] -> Hdp::Package['hcat-server'] ->  Hdp::User[$hcat_user] -> Class['hdp-hcat::mysql-connector'] -> Anchor['hdp-hcat::end']
+  }
+}
+
+### config files
+define hdp-hcat::configfile(
+  $mode = undef,
+  $hcat_server_host = undef
+) 
+{
+  hdp::configfile { "${hdp-hcat::params::hcat_conf_dir}/${name}":
+    component        => 'hcat',
+    owner            => $hdp::params::hcat_user,
+    mode             => $mode,
+    hcat_server_host => $hcat_server_host 
+  }
+}

Modified: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp?rev=1400790&r1=1400789&r2=1400790&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp Mon Oct 22 07:44:06 2012
@@ -1,92 +1,92 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive(
-  $service_state,
-  $server = false
-) 
-{
-  include hdp-hive::params
-  
-  $hive_user = $hdp-hive::params::hive_user
-  $hive_config_dir = $hdp-hive::params::hive_conf_dir
-
-# Configs generation  
-# site.pp must have following configurations: 
-# hdp_hive__hive_site
-
-  configgenerator::configfile{'hive_site_xml': 
-    filename => 'hive-site.xml',
-    module => 'hdp-hive',
-    configuration => $configuration['hdp_hive__hive_site']
-  }
-
-  anchor { 'hdp-hive::begin': }
-  anchor { 'hdp-hive::end': } 
-
-  if ($service_state == 'uninstalled') {
-    hdp::package { 'hive' : 
-      ensure => 'uninstalled'
-    }
-
-    hdp::directory { $hive_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Anchor['hdp-hive::begin'] -> Hdp::Package['hive'] -> Hdp::Directory[$hive_config_dir] ->  Anchor['hdp-hive::end']
-
-  } else {
-    hdp::package { 'hive' : }
-    if ($server == true ) {
-      class { 'hdp-hive::mysql-connector': }
-    }
-  
-    hdp::user{ $hive_user:}
-  
-    hdp::directory { $hive_config_dir: 
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp-hive::configfile { ['hive-env.sh','hive-site.xml']: }
-  
-    Anchor['hdp-hive::begin'] -> Hdp::Package['hive'] -> Hdp::User[$hive_user] ->  
-     Hdp::Directory[$hive_config_dir] -> Hdp-hive::Configfile<||> ->  Anchor['hdp-hive::end']
-
-     if ($server == true ) {
-       Hdp::Package['hive'] -> Hdp::User[$hive_user] -> Class['hdp-hive::mysql-connector'] -> Anchor['hdp-hive::end']
-    }
-  }
-}
-
-### config files
-define hdp-hive::configfile(
-  $mode = undef,
-  $hive_server_host = undef
-) 
-{
-  hdp::configfile { "${hdp-hive::params::hive_conf_dir}/${name}":
-    component        => 'hive',
-    owner            => $hdp-hive::params::hive_user,
-    mode             => $mode,
-    hive_server_host => $hive_server_host 
-  }
-}
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-hive(
+  $service_state,
+  $server = false
+) 
+{
+  include hdp-hive::params
+  
+  $hive_user = $hdp-hive::params::hive_user
+  $hive_config_dir = $hdp-hive::params::hive_conf_dir
+
+# Configs generation  
+
+  if has_key($configuration, 'hdp_hive__hive_site') {
+    configgenerator::configfile{'hive_site_xml': 
+      filename => 'hive-site.xml',
+      module => 'hdp-hive',
+      configuration => $configuration['hdp_hive__hive_site']
+    }
+  }
+
+  anchor { 'hdp-hive::begin': }
+  anchor { 'hdp-hive::end': } 
+
+  if ($service_state == 'uninstalled') {
+    hdp::package { 'hive' : 
+      ensure => 'uninstalled'
+    }
+
+    hdp::directory { $hive_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp-hive::begin'] -> Hdp::Package['hive'] -> Hdp::Directory[$hive_config_dir] ->  Anchor['hdp-hive::end']
+
+  } else {
+    hdp::package { 'hive' : }
+    if ($server == true ) {
+      class { 'hdp-hive::mysql-connector': }
+    }
+  
+    hdp::user{ $hive_user:}
+  
+    hdp::directory { $hive_config_dir: 
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp-hive::configfile { ['hive-env.sh','hive-site.xml']: }
+  
+    Anchor['hdp-hive::begin'] -> Hdp::Package['hive'] -> Hdp::User[$hive_user] ->  
+     Hdp::Directory[$hive_config_dir] -> Hdp-hive::Configfile<||> ->  Anchor['hdp-hive::end']
+
+     if ($server == true ) {
+       Hdp::Package['hive'] -> Hdp::User[$hive_user] -> Class['hdp-hive::mysql-connector'] -> Anchor['hdp-hive::end']
+    }
+  }
+}
+
+### config files
+define hdp-hive::configfile(
+  $mode = undef,
+  $hive_server_host = undef
+) 
+{
+  hdp::configfile { "${hdp-hive::params::hive_conf_dir}/${name}":
+    component        => 'hive',
+    owner            => $hdp-hive::params::hive_user,
+    mode             => $mode,
+    hive_server_host => $hive_server_host 
+  }
+}

Modified: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp?rev=1400790&r1=1400789&r2=1400790&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp Mon Oct 22 07:44:06 2012
@@ -1,97 +1,97 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie(
-  $service_state = undef,
-  $server = false,
-  $setup = false
-)
-{
-  include hdp-oozie::params 
-
-# Configs generation  
-# site.pp must have following configurations: 
-# hdp_oozie__oozie_site
-
-  configgenerator::configfile{'oozie_site_xml': 
-    filename => 'oozie-site.xml',
-    module => 'hdp-oozie',
-    configuration => $configuration['hdp_oozie__oozie_site']
-  }
-
-  $oozie_user = $hdp-oozie::params::oozie_user
-  $oozie_config_dir = $hdp-oozie::params::conf_dir
-  
-  if ($service_state == 'uninstalled') {
-    hdp::package { 'oozie-client' : 
-      ensure => 'uninstalled'
-    }
-    if ($server == true ) {
-      hdp::package { 'oozie-server' :
-        ensure => 'uninstalled'
-      }
-    }
-    hdp::directory { $oozie_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    anchor { 'hdp-oozie::begin': } -> Hdp::Package['oozie-client'] -> Hdp::Directory[$oozie_config_dir] ->  anchor { 'hdp-oozie::end': }
-
-    if ($server == true ) {
-       Hdp::Package['oozie-server'] -> Hdp::Package['oozie-client'] ->  Anchor['hdp-oozie::end']
-     }
-  } else {
-    hdp::package { 'oozie-client' : }
-    if ($server == true ) {
-      hdp::package { 'oozie-server':}
-      class { 'hdp-oozie::download-ext-zip': }
-    }
-
-     hdp::user{ $oozie_user:}
-
-     hdp::directory { $oozie_config_dir: 
-      service_state => $service_state,
-      force => true
-    }
-
-     hdp-oozie::configfile { ['oozie-site.xml','oozie-env.sh','oozie-log4j.properties']: }
-
-    anchor { 'hdp-oozie::begin': } -> Hdp::Package['oozie-client'] -> Hdp::User[$oozie_user] -> Hdp::Directory[$oozie_config_dir] -> Hdp-oozie::Configfile<||> -> anchor { 'hdp-oozie::end': }
-
-     if ($server == true ) { 
-       Hdp::Package['oozie-server'] -> Hdp::Package['oozie-client'] -> Hdp::User[$oozie_user] ->   Class['hdp-oozie::download-ext-zip'] ->  Anchor['hdp-oozie::end']
-     }
- }
-}
-
-### config files
-define hdp-oozie::configfile(
-  $mode = undef,
-  $oozie_server = undef
-) 
-{
-  hdp::configfile { "${hdp-oozie::params::conf_dir}/${name}":
-    component       => 'oozie',
-    owner           => $hdp-oozie::params::oozie_user,
-    mode            => $mode,
-    oozie_server    => $oozie_server
-  }
-}
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-oozie(
+  $service_state = undef,
+  $server = false,
+  $setup = false
+)
+{
+  include hdp-oozie::params 
+
+# Configs generation  
+
+  if has_key($configuration, 'hdp_oozie__oozie_site') {
+    configgenerator::configfile{'oozie_site_xml': 
+      filename => 'oozie-site.xml',
+      module => 'hdp-oozie',
+      configuration => $configuration['hdp_oozie__oozie_site']
+    }
+  }
+
+  $oozie_user = $hdp-oozie::params::oozie_user
+  $oozie_config_dir = $hdp-oozie::params::conf_dir
+  
+  if ($service_state == 'uninstalled') {
+    hdp::package { 'oozie-client' : 
+      ensure => 'uninstalled'
+    }
+    if ($server == true ) {
+      hdp::package { 'oozie-server' :
+        ensure => 'uninstalled'
+      }
+    }
+    hdp::directory { $oozie_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    anchor { 'hdp-oozie::begin': } -> Hdp::Package['oozie-client'] -> Hdp::Directory[$oozie_config_dir] ->  anchor { 'hdp-oozie::end': }
+
+    if ($server == true ) {
+       Hdp::Package['oozie-server'] -> Hdp::Package['oozie-client'] ->  Anchor['hdp-oozie::end']
+     }
+  } else {
+    hdp::package { 'oozie-client' : }
+    if ($server == true ) {
+      hdp::package { 'oozie-server':}
+      class { 'hdp-oozie::download-ext-zip': }
+    }
+
+     hdp::user{ $oozie_user:}
+
+     hdp::directory { $oozie_config_dir: 
+      service_state => $service_state,
+      force => true
+    }
+
+     hdp-oozie::configfile { ['oozie-site.xml','oozie-env.sh','oozie-log4j.properties']: }
+
+    anchor { 'hdp-oozie::begin': } -> Hdp::Package['oozie-client'] -> Hdp::User[$oozie_user] -> Hdp::Directory[$oozie_config_dir] -> Hdp-oozie::Configfile<||> -> anchor { 'hdp-oozie::end': }
+
+     if ($server == true ) { 
+       Hdp::Package['oozie-server'] -> Hdp::Package['oozie-client'] -> Hdp::User[$oozie_user] ->   Class['hdp-oozie::download-ext-zip'] ->  Anchor['hdp-oozie::end']
+     }
+ }
+}
+
+### config files
+define hdp-oozie::configfile(
+  $mode = undef,
+  $oozie_server = undef
+) 
+{
+  hdp::configfile { "${hdp-oozie::params::conf_dir}/${name}":
+    component       => 'oozie',
+    owner           => $hdp-oozie::params::oozie_user,
+    mode            => $mode,
+    oozie_server    => $oozie_server
+  }
+}

Modified: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp?rev=1400790&r1=1400789&r2=1400790&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp Mon Oct 22 07:44:06 2012
@@ -1,94 +1,94 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton(
-  $service_state = undef,
-  $server = false
-)
-{
-# Configs generation  
-# site.pp must have following configurations: 
-# hdp_templeton__templeton_site
-
-  configgenerator::configfile{'templeton_site_xml': 
-    filename => 'templeton-site.xml',
-    module => 'hdp-templeton',
-    configuration => $configuration['hdp_templeton__templeton_site']
-  }
-
- include hdp-templeton::params 
- 
-  if ($hdp::params::use_32_bits_on_slaves == false) {
-    $size = 64
-  } else {
-    $size = 32
-  }
-
-  $templeton_user = $hdp-templeton::params::templeton_user
-  $templeton_config_dir = $hdp-templeton::params::conf_dir
-
-  if ($service_state == 'uninstalled') {
-      hdp::package { 'templeton' :
-      size => $size,
-      ensure => 'uninstalled'
-    }
-      hdp::directory { $templeton_config_dir:
-        service_state => $service_state,
-        force => true
-      }
-
-     anchor { 'hdp-templeton::begin': } -> Hdp::Package['templeton'] -> Hdp::Directory[$templeton_config_dir] ->  anchor { 'hdp-templeton::end': }
-
-  } else {
-    hdp::package { 'templeton' :
-      size => $size
-    }
-    class { hdp-templeton::download-hive-tar: }
-    class { hdp-templeton::download-pig-tar: }
-
-    hdp::user{ $templeton_user:}
-
-    hdp::directory { $templeton_config_dir: 
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp-templeton::configfile { ['templeton-site.xml','templeton-env.sh']: }
-
-    anchor { 'hdp-templeton::begin': } -> Hdp::Package['templeton'] -> Hdp::User[$templeton_user] -> Hdp::Directory[$templeton_config_dir] -> Hdp-templeton::Configfile<||> ->  anchor { 'hdp-templeton::end': }
-
-     if ($server == true ) { 
-      Hdp::Package['templeton'] -> Hdp::User[$templeton_user] ->   Class['hdp-templeton::download-hive-tar'] -> Class['hdp-templeton::download-pig-tar'] -> Anchor['hdp-templeton::end']
-     }
-  }
-}
-
-### config files
-define hdp-templeton::configfile(
-  $mode = undef
-) 
-{
-  hdp::configfile { "${hdp-templeton::params::conf_dir}/${name}":
-    component       => 'templeton',
-    owner           => $hdp-templeton::params::templeton_user,
-    mode            => $mode
-  }
-}
-
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp-templeton(
+  $service_state = undef,
+  $server = false
+)
+{
+# Configs generation  
+
+  if has_key($configuration, 'hdp_templeton__templeton_site') {
+    configgenerator::configfile{'templeton_site_xml': 
+      filename => 'templeton-site.xml',
+      module => 'hdp-templeton',
+      configuration => $configuration['hdp_templeton__templeton_site']
+    }
+  }
+
+ include hdp-templeton::params 
+ 
+  if ($hdp::params::use_32_bits_on_slaves == false) {
+    $size = 64
+  } else {
+    $size = 32
+  }
+
+  $templeton_user = $hdp-templeton::params::templeton_user
+  $templeton_config_dir = $hdp-templeton::params::conf_dir
+
+  if ($service_state == 'uninstalled') {
+      hdp::package { 'templeton' :
+      size => $size,
+      ensure => 'uninstalled'
+    }
+      hdp::directory { $templeton_config_dir:
+        service_state => $service_state,
+        force => true
+      }
+
+     anchor { 'hdp-templeton::begin': } -> Hdp::Package['templeton'] -> Hdp::Directory[$templeton_config_dir] ->  anchor { 'hdp-templeton::end': }
+
+  } else {
+    hdp::package { 'templeton' :
+      size => $size
+    }
+    class { hdp-templeton::download-hive-tar: }
+    class { hdp-templeton::download-pig-tar: }
+
+    hdp::user{ $templeton_user:}
+
+    hdp::directory { $templeton_config_dir: 
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp-templeton::configfile { ['templeton-site.xml','templeton-env.sh']: }
+
+    anchor { 'hdp-templeton::begin': } -> Hdp::Package['templeton'] -> Hdp::User[$templeton_user] -> Hdp::Directory[$templeton_config_dir] -> Hdp-templeton::Configfile<||> ->  anchor { 'hdp-templeton::end': }
+
+     if ($server == true ) { 
+      Hdp::Package['templeton'] -> Hdp::User[$templeton_user] ->   Class['hdp-templeton::download-hive-tar'] -> Class['hdp-templeton::download-pig-tar'] -> Anchor['hdp-templeton::end']
+     }
+  }
+}
+
+### config files
+define hdp-templeton::configfile(
+  $mode = undef
+) 
+{
+  hdp::configfile { "${hdp-templeton::params::conf_dir}/${name}":
+    component       => 'templeton',
+    owner           => $hdp-templeton::params::templeton_user,
+    mode            => $mode
+  }
+}
+

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/StatusCheck.py?rev=1400790&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/StatusCheck.py (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/StatusCheck.py Mon Oct 22 07:44:06 2012
@@ -0,0 +1,104 @@
+#!/usr/bin/env python2.6
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from shell import shellRunner
+import logging
+import logging.handlers
+import sys
+import os
+
+logger = logging.getLogger()
+
+
+
+def get_pair(line):
+  key, sep, value = line.strip().partition("=")
+  return key, value
+
+class StatusCheck:
+
+  def listFiles(self, dir):
+    basedir = dir
+    logger.info("Files in ", os.path.abspath(dir), ": ")
+    subdirlist = []
+    try:
+      if os.path.isdir(dir):
+        for item in os.listdir(dir):
+            if os.path.isfile(item) and item.endswith('.pid'):
+              self.pidFilesDict[item.split(os.sep).pop()] = item
+            else:
+                subdirlist.append(os.path.join(basedir, item))
+        for subdir in subdirlist:
+            self.listFiles(subdir)
+      else:
+        if dir.endswith('.pid'):
+          self.pidFilesDict[dir.split(os.sep).pop()] = dir
+    except OSError as e:
+      logger.info(e.strerror + ' to ' + e.filename)
+
+  def __init__(self, path):
+    self.path = path
+    self.sh = shellRunner()
+    self.pidFilesDict = {}
+    self.listFiles(self.path)
+
+
+    with open("servicesToPidNames.dict") as fd:    
+      self.serToPidDict = dict(get_pair(line) for line in fd)
+
+  def getIsLive(self, pidPath):
+    isLive = False
+    pidFile = open(pidPath, 'r')
+    pid = int(pidFile.readline())
+    res = self.sh.run(['ps -p', str(pid), '-f'])
+    lines = res['output'].split('\n')
+    try:
+      procInfo = lines[1]
+      isLive = not procInfo == None
+    except IndexError:
+      logger.info('Process is dead')
+
+    return isLive
+
+  def getStatus(self, serviceCode):
+    try:
+      pidName = self.serToPidDict[serviceCode]
+      logger.info( 'pidName: ' + pidName)
+    except KeyError as e:
+      logger.warn('There is no mapping for ' + serviceCode)
+      return None
+    try:
+      pidPath = self.pidFilesDict[pidName]
+      logger.info('pidPath: ' + pidPath)
+      result = self.getIsLive(self.pidFilesDict[pidName])
+      return result
+    except KeyError:
+      logger.info('Pid file was not found')
+      return False
+
+#Temporary, for testing from console
+def main(argv=None):
+  statusCheck = StatusCheck('/var/')
+  isLive = statusCheck.getStatus(argv[1])
+  print isLive
+
+if __name__ == '__main__':
+  main(sys.argv)
+

Modified: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py?rev=1400790&r1=1400789&r2=1400790&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py Mon Oct 22 07:44:06 2012
@@ -1,184 +1,184 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import json
-import os.path
-import logging
-
-logger = logging.getLogger()
-
-  #read static imports from file and write them to manifest
-def writeImports(outputFile, inputFileName='imports.txt'):
-  inputFile = open(inputFileName, 'r')
-  modulesdir = os.path.abspath(os.getcwd() + "../../../puppet/modules/")
-  logger.info("Modules dir is " + modulesdir)
-  for line in inputFile:
-    modulename = line.rstrip('\n')
-    line = "import '" + modulesdir + "/" + modulename + "'\n"
-    outputFile.write(line)
-    
-  inputFile.close()
-
-def generateManifest(inputJsonStr):
-#reading json
-  parsedJson = json.loads(inputJsonStr)
-  hostname = parsedJson['hostname']
-  clusterHostInfo = parsedJson['clusterHostInfo']
-  params = parsedJson['params']
-  configurations = parsedJson['configurations']
-  #hostAttributes = parsedJson['hostAttributes']
-  roles = parsedJson['roleCommands']
-  
-#writing manifest
-  manifest = open('site.pp', 'w')
-
-  #writing imports from external static file
-  writeImports(manifest)
-  
-  #writing nodes
-  writeNodes(manifest, clusterHostInfo)
-  
-  #writing params from map
-  writeParams(manifest, params)
-  
-  #writing config maps
-  writeConfigurations(manifest, configurations)
-
-  #writing host attributes
-  #writeHostAttributes(manifest, hostAttributes)
-
-  #writing task definitions 
-  writeTasks(manifest, roles)
-     
-  manifest.close()
-    
-  
-  #read dictionary
-def readDict(file, separator='='):
-  result = dict()
-  
-  for line in file :
-    dictTuple = line.partition(separator)
-    result[dictTuple[0].strip()] = dictTuple[2].strip()
-  
-  return result
-  
-
-  #write nodes
-def writeNodes(outputFile, clusterHostInfo):
-  for node in clusterHostInfo.iterkeys():
-    outputFile.write('$' + node + '= [')
-    coma = ''
-    
-    for value in clusterHostInfo[node]:
-      outputFile.write(coma + '\'' + value + '\'')
-      coma = ', '
-
-    outputFile.write(']\n')
-
-#write params
-def writeParams(outputFile, params):
-  for param in params.iterkeys():
-    outputFile.write('$' +  param + '="' + params[param] + '"\n')
-
-#write host attributes
-def writeHostAttributes(outputFile, hostAttributes):
-  outputFile.write('$hostAttributes={\n')
-
-  coma = ''
-  for attribute in hostAttributes.iterkeys():
-    outputFile.write(coma + '"' +  attribute + '" => "{' + hostAttributes[attribute] + '"}')
-    coma = ',\n'
-
-  outputFile.write('}\n')
-
-#write configurations
-def writeConfigurations(outputFile, configs):
-  outputFile.write('$configuration =  {\n')
-
-  for configName in configs.iterkeys():
-    outputFile.write(configName + '=> {\n')
-    config = configs[configName]
-    
-    coma = ''
-    for configParam in config.iterkeys():
-      outputFile.write(coma + '"' + configParam + '" => "' + config[configParam] + '"')
-      coma = ',\n'
-
-    outputFile.write('\n},\n')
-    
-  outputFile.write('\n}\n')
-
-#write node tasks
-def writeTasks(outputFile, roles):
-  #reading dictionaries
-  rolesToClassFile = open('rolesToClass.dict', 'r')
-  rolesToClass = readDict(rolesToClassFile)
-  rolesToClassFile.close()
-
-  serviceStatesFile =  open('serviceStates.dict', 'r')
-  serviceStates = readDict(serviceStatesFile)
-  serviceStatesFile.close()
-
-  outputFile.write('node /default/ {\n ')
-  writeStages(outputFile, len(roles))
-  stageNum = 1
-
-  for role in roles :
-    rolename = role['role']
-    command = role['cmd']
-    taskParams = role['roleParams']
-    taskParamsNormalized = normalizeTaskParams(taskParams)
-    taskParamsPostfix = ''
-    
-    if len(taskParamsNormalized) > 0 :
-      taskParamsPostfix = ', ' + taskParamsNormalized
-    
-    className = rolesToClass[rolename]
-    serviceState = serviceStates[command]
-    
-    outputFile.write('class {\'' + className + '\':' + ' stage => ' + str(stageNum) + 
-                     ', service_state => ' + serviceState + taskParamsPostfix + '}\n')
-    stageNum = stageNum + 1
-  outputFile.write('}\n')
-def normalizeTaskParams(taskParams):
-  result = ''
-  coma = ''
-  
-  for paramName in taskParams.iterkeys():
-    result = coma + result + paramName + ' => ' + taskParams[paramName]
-    coma = ','
-    
-  return result
-  
-def writeStages(outputFile, numStages):
-  arrow = ''
-  
-  for i in range(numStages):
-    outputFile.write(arrow + 'stage{' + str(i) + ' :}')
-    arrow = ' -> '
-  
-  outputFile.write('\n')
-    
-logging.basicConfig(level=logging.DEBUG)    
-#test code
-jsonFile = open('test.json', 'r')
-jsonStr = jsonFile.read() 
-generateManifest(jsonStr)
+#!/usr/bin/env python2.6
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import json
+import os.path
+import logging
+
+logger = logging.getLogger()
+
+  #read static imports from file and write them to manifest
+def writeImports(outputFile, inputFileName='imports.txt'):
+  inputFile = open(inputFileName, 'r')
+  modulesdir = os.path.abspath(os.getcwd() + "../../../puppet/modules/")
+  logger.info("Modules dir is " + modulesdir)
+  for line in inputFile:
+    modulename = line.rstrip('\n')
+    line = "import '" + modulesdir + "/" + modulename + "'\n"
+    outputFile.write(line)
+    
+  inputFile.close()
+
+def generateManifest(inputJsonStr):
+#reading json
+  parsedJson = json.loads(inputJsonStr)
+  hostname = parsedJson['hostname']
+  clusterHostInfo = parsedJson['clusterHostInfo']
+  params = parsedJson['params']
+  configurations = parsedJson['configurations']
+  #hostAttributes = parsedJson['hostAttributes']
+  roles = parsedJson['roleCommands']
+  
+#writing manifest
+  manifest = open('site.pp', 'w')
+
+  #writing imports from external static file
+  writeImports(manifest)
+  
+  #writing nodes
+  writeNodes(manifest, clusterHostInfo)
+  
+  #writing params from map
+  writeParams(manifest, params)
+  
+  #writing config maps
+  writeConfigurations(manifest, configurations)
+
+  #writing host attributes
+  #writeHostAttributes(manifest, hostAttributes)
+
+  #writing task definitions 
+  writeTasks(manifest, roles)
+     
+  manifest.close()
+    
+  
+  #read dictionary
+def readDict(file, separator='='):
+  result = dict()
+  
+  for line in file :
+    dictTuple = line.partition(separator)
+    result[dictTuple[0].strip()] = dictTuple[2].strip()
+  
+  return result
+  
+
+  #write nodes
+def writeNodes(outputFile, clusterHostInfo):
+  for node in clusterHostInfo.iterkeys():
+    outputFile.write('$' + node + '= [')
+    coma = ''
+    
+    for value in clusterHostInfo[node]:
+      outputFile.write(coma + '\'' + value + '\'')
+      coma = ', '
+
+    outputFile.write(']\n')
+
+#write params
+def writeParams(outputFile, params):
+  for param in params.iterkeys():
+    outputFile.write('$' +  param + '="' + params[param] + '"\n')
+
+#write host attributes
+def writeHostAttributes(outputFile, hostAttributes):
+  outputFile.write('$hostAttributes={\n')
+
+  coma = ''
+  for attribute in hostAttributes.iterkeys():
+    outputFile.write(coma + '"' +  attribute + '" => "{' + hostAttributes[attribute] + '"}')
+    coma = ',\n'
+
+  outputFile.write('}\n')
+
+#write configurations
+def writeConfigurations(outputFile, configs):
+  outputFile.write('$configuration =  {\n')
+
+  for configName in configs.iterkeys():
+    outputFile.write(configName + '=> {\n')
+    config = configs[configName]
+    
+    coma = ''
+    for configParam in config.iterkeys():
+      outputFile.write(coma + '"' + configParam + '" => "' + config[configParam] + '"')
+      coma = ',\n'
+
+    outputFile.write('\n},\n')
+    
+  outputFile.write('\n}\n')
+
+#write node tasks
+def writeTasks(outputFile, roles):
+  #reading dictionaries
+  rolesToClassFile = open('rolesToClass.dict', 'r')
+  rolesToClass = readDict(rolesToClassFile)
+  rolesToClassFile.close()
+
+  serviceStatesFile =  open('serviceStates.dict', 'r')
+  serviceStates = readDict(serviceStatesFile)
+  serviceStatesFile.close()
+
+  outputFile.write('node /default/ {\n ')
+  writeStages(outputFile, len(roles))
+  stageNum = 1
+
+  for role in roles :
+    rolename = role['role']
+    command = role['cmd']
+    taskParams = role['roleParams']
+    taskParamsNormalized = normalizeTaskParams(taskParams)
+    taskParamsPostfix = ''
+    
+    if len(taskParamsNormalized) > 0 :
+      taskParamsPostfix = ', ' + taskParamsNormalized
+    
+    className = rolesToClass[rolename]
+    serviceState = serviceStates[command]
+    
+    outputFile.write('class {\'' + className + '\':' + ' stage => ' + str(stageNum) + 
+                     ', service_state => ' + serviceState + taskParamsPostfix + '}\n')
+    stageNum = stageNum + 1
+  outputFile.write('}\n')
+def normalizeTaskParams(taskParams):
+  result = ''
+  coma = ''
+  
+  for paramName in taskParams.iterkeys():
+    result = coma + result + paramName + ' => ' + taskParams[paramName]
+    coma = ','
+    
+  return result
+  
+def writeStages(outputFile, numStages):
+  arrow = ''
+  
+  for i in range(numStages):
+    outputFile.write(arrow + 'stage{' + str(i + 1) + ' :}')
+    arrow = ' -> '
+  
+  outputFile.write('\n')
+    
+logging.basicConfig(level=logging.DEBUG)    
+#test code
+jsonFile = open('test.json', 'r')
+jsonStr = jsonFile.read() 
+generateManifest(jsonStr)

Added: incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict?rev=1400790&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict Mon Oct 22 07:44:06 2012
@@ -0,0 +1,9 @@
+NAMENODE=hadoop-hdfs-namenode.pid
+SECONDARYNAMENODE=hadoop-hdfs-secondarynamenode.pid
+DATANODE=hadoop-hdfs-datanode.pid
+JOBTRACKER=hadoop-mapred-jobtracker.pid
+TASKTRACKER=hadoop-mapred-tasktracker.pid
+OOZIE_SERVER=oozie.pid
+ZOOKEEPER=zookeeper_server.pid
+TEMPLETON=templeton.pid
+NAGIOS=nagios.pid
\ No newline at end of file

Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java?rev=1400790&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java Mon Oct 22 07:44:06 2012
@@ -0,0 +1,217 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.services;
+
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.RepositoryInfo;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackInfo;
+
+import javax.ws.rs.Path;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.DocumentBuilder;
+
+import org.w3c.dom.Document;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Node;
+import org.w3c.dom.Element;
+
+/**
+ * ServiceInfo responsible getting information about cluster.
+ */
+@Path("/metainfo/")
+public class AmbariMetaInfo {
+
+  private String CONFIG_FILE_PATH = "C:\\workspace\\stacks";//Configuration.CONFIG_FILE
+
+  List<ServiceInfo> getSupportedServices(String stackName, String version) {
+    return null;
+  }
+
+  List<ServiceInfo> getDependentServices(String stackName, String version, String serviceName) {
+    return null;
+  }
+
+  Map<String, Map<String, String>> getSupportedConfigs(String stackName, String version, String serviceName) {
+    return null;
+  }
+
+  List<StackInfo> getSupportedStack() {
+    return null;
+  }
+
+  private List<ServiceInfo> getConfigurationInformation() throws Exception {
+    List<StackInfo> stacksResult = new ArrayList<StackInfo>();
+
+    File stackRoot = new File(CONFIG_FILE_PATH);
+    if (!stackRoot.isDirectory())
+      throw new IOException("" + CONFIG_FILE_PATH + " should be a directory with stack.");
+    File[] stacks = stackRoot.listFiles();
+    for (File stackFolder : stacks) {
+      if (stackFolder.isFile()) continue;
+      File[] concretStacks = stackFolder.listFiles();
+      for (File stack : concretStacks) {
+        if (stack.isFile()) continue;
+        StackInfo stackInfo = new StackInfo();
+        stackInfo.setName(stackFolder.getName());
+        stackInfo.setVersion(stack.getName());
+        stacksResult.add(stackInfo);
+        //get repository data for current stack of techs
+        File repositoryFolder = new File(stack.getAbsolutePath() + File.separator + "repos" + File.separator + "repoinfo.xml");
+
+        if (repositoryFolder.exists()) {
+          List<RepositoryInfo> repositoryInfoList = getRepository(repositoryFolder);
+          stackInfo.getRepositories().addAll(repositoryInfoList);
+        }
+
+
+        //get services for this stack
+        File servicesRootFolder = new File(stack.getAbsolutePath() + File.separator + "services");
+        File[] servicesFolders = servicesRootFolder.listFiles();
+
+        if (servicesFolders != null)
+          for (File serviceFolder : servicesFolders) {
+            //Get information about service
+            ServiceInfo serviceInfo = new ServiceInfo();
+            serviceInfo.setName(serviceFolder.getName());
+            stackInfo.getServices().add(serviceInfo);
+
+
+            //Get all properties from all "configs/*-site.xml" files
+            File serviceConfigFolder = new File(serviceFolder.getAbsolutePath() + File.separator + "configs");
+            File[] configFiles = serviceConfigFolder.listFiles();
+            for (File config : configFiles) {
+              if (config.getName().endsWith("-site.xml")) {
+                serviceInfo.getProperties().addAll(getProperties(config));
+              }
+
+            }
+          }
+
+      }
+    }//stack root
+
+    for (StackInfo elem : stacksResult) {
+      System.out.println("###elem = \n" + elem);
+      System.out.println("contain services= " + elem.getServices().size());
+    }
+    System.out.println(" \n\n\n ");
+
+
+    return null;
+  }
+
+
+  public static void main(String[] args) throws Exception {
+    AmbariMetaInfo metadata = new AmbariMetaInfo();
+    metadata.getConfigurationInformation();
+//    metadata.getRepository(new File("C:\\workspace\\stacks\\HDP\\0.1\\repos\\repoinfo.xml"));
+
+  }
+
+
+  public List<RepositoryInfo> getRepository(File repositoryFile) {
+
+    List<RepositoryInfo> repositorysInfo = new ArrayList<RepositoryInfo>();
+    try {
+
+      DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
+      DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
+      Document doc = dBuilder.parse(repositoryFile);
+      doc.getDocumentElement().normalize();
+
+      NodeList propertyNodes = doc.getElementsByTagName("repo");
+
+      for (int index = 0; index < propertyNodes.getLength(); index++) {
+
+        Node node = propertyNodes.item(index);
+        if (node.getNodeType() == Node.ELEMENT_NODE) {
+
+          Element property = (Element) node;
+          RepositoryInfo repositoryInfo = new RepositoryInfo();
+          repositoryInfo.setUrl(getTagValue("url", property));
+          repositoryInfo.setOs(getTagValue("os", property));
+          repositoryInfo.setDescription(getTagValue("description", property));
+          repositorysInfo.add(repositoryInfo);
+        }
+      }
+
+    } catch (Exception e) {
+      e.printStackTrace();
+
+    }
+    return repositorysInfo;
+  }
+
+  public List<PropertyInfo> getProperties(File propertyFile) {
+
+    List<PropertyInfo> resultPropertyList = new ArrayList<PropertyInfo>();
+    try {
+      DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
+      DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
+      Document doc = dBuilder.parse(propertyFile);
+      doc.getDocumentElement().normalize();
+
+
+      NodeList propertyNodes = doc.getElementsByTagName("property");
+
+      for (int index = 0; index < propertyNodes.getLength(); index++) {
+
+        Node node = propertyNodes.item(index);
+        if (node.getNodeType() == Node.ELEMENT_NODE) {
+
+          Element property = (Element) node;
+          PropertyInfo propertyInfo = new PropertyInfo();
+          propertyInfo.setName(getTagValue("name", property));
+          propertyInfo.setValue(getTagValue("value", property));
+          propertyInfo.setDescription(getTagValue("description", property));
+
+          if (propertyInfo.getName() == null || propertyInfo.getValue() == null)
+            continue;
+
+          resultPropertyList.add(propertyInfo);
+        }
+      }
+    } catch (Exception e) {
+      e.printStackTrace();
+      return null;
+    }
+    return resultPropertyList;
+  }
+
+  private static String getTagValue(String sTag, Element rawElement) {
+    String result = null;
+    try {
+      NodeList element = rawElement.getElementsByTagName(sTag).item(0).getChildNodes();
+      Node value = (Node) element.item(0);
+      result = value.getNodeValue();
+    } finally {
+      return result;
+    }
+
+  }
+}
+
+

Modified: incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderImpl.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderImpl.java?rev=1400790&r1=1400789&r2=1400790&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderImpl.java (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderImpl.java Mon Oct 22 07:44:06 2012
@@ -396,9 +396,11 @@ public abstract class ResourceProviderIm
 
     @Override
     public void createResources(Request request) throws AmbariException {
-      for (Map<PropertyId, Object> properties : request.getProperties()) {
-        getManagementController().createService(getRequest(properties));
+      Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
+      for (Map<PropertyId, Object> propertyMap : request.getProperties()) {
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().createServices(requests);
     }
 
     @Override
@@ -422,18 +424,20 @@ public abstract class ResourceProviderIm
 
     @Override
     public void updateResources(Request request, Predicate predicate) throws AmbariException {
+      Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
       for (Map<PropertyId, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
-        ServiceRequest serviceRequest = getRequest(propertyMap);
-        getManagementController().updateService(serviceRequest);
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().updateServices(requests);
     }
 
     @Override
     public void deleteResources(Predicate predicate) throws AmbariException {
+      Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
       for (Map<PropertyId, Object> propertyMap : getPropertyMaps(null, predicate)) {
-        ServiceRequest serviceRequest = getRequest(propertyMap);
-        getManagementController().deleteService(serviceRequest);
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().deleteServices(requests);
     }
 
     // ----- utility methods -------------------------------------------------
@@ -488,9 +492,11 @@ public abstract class ResourceProviderIm
 
     @Override
     public void createResources(Request request) throws AmbariException {
-      for (Map<PropertyId, Object> properties : request.getProperties()) {
-        getManagementController().createComponent(getRequest(properties));
+      Set<ServiceComponentRequest> requests = new HashSet<ServiceComponentRequest>();
+      for (Map<PropertyId, Object> propertyMap : request.getProperties()) {
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().createComponents(requests);
     }
 
     @Override
@@ -515,18 +521,20 @@ public abstract class ResourceProviderIm
 
     @Override
     public void updateResources(Request request, Predicate predicate) throws AmbariException {
+      Set<ServiceComponentRequest> requests = new HashSet<ServiceComponentRequest>();
       for (Map<PropertyId, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
-        ServiceComponentRequest serviceComponentRequest = getRequest(propertyMap);
-        getManagementController().updateComponent(serviceComponentRequest);
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().updateComponents(requests);
     }
 
     @Override
     public void deleteResources(Predicate predicate) throws AmbariException {
+      Set<ServiceComponentRequest> requests = new HashSet<ServiceComponentRequest>();
       for (Map<PropertyId, Object> propertyMap : getPropertyMaps(null, predicate)) {
-        ServiceComponentRequest serviceComponentRequest = getRequest(propertyMap);
-        getManagementController().deleteComponent(serviceComponentRequest);
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().deleteComponents(requests);
     }
 
     // ----- utility methods -------------------------------------------------
@@ -580,9 +588,11 @@ public abstract class ResourceProviderIm
 
     @Override
     public void createResources(Request request) throws AmbariException {
-      for (Map<PropertyId, Object> properties : request.getProperties()) {
-        getManagementController().createHost(getRequest(properties));
+      Set<HostRequest> requests = new HashSet<HostRequest>();
+      for (Map<PropertyId, Object> propertyMap : request.getProperties()) {
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().createHosts(requests);
     }
 
     @Override
@@ -611,18 +621,20 @@ public abstract class ResourceProviderIm
 
     @Override
     public void updateResources(Request request, Predicate predicate) throws AmbariException {
+      Set<HostRequest> requests = new HashSet<HostRequest>();
       for (Map<PropertyId, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
-        HostRequest hostRequest = getRequest(propertyMap);
-        getManagementController().updateHost(hostRequest);
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().updateHosts(requests);
     }
 
     @Override
     public void deleteResources(Predicate predicate) throws AmbariException {
+      Set<HostRequest> requests = new HashSet<HostRequest>();
       for (Map<PropertyId, Object> propertyMap : getPropertyMaps(null, predicate)) {
-        HostRequest hostRequest = getRequest(propertyMap);
-        getManagementController().deleteHost(hostRequest);
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().deleteHosts(requests);
     }
 
     // ----- utility methods -------------------------------------------------
@@ -678,9 +690,11 @@ public abstract class ResourceProviderIm
 
     @Override
     public void createResources(Request request) throws AmbariException {
-      for (Map<PropertyId, Object> properties : request.getProperties()) {
-        getManagementController().createHostComponent(getRequest(properties));
+      Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+      for (Map<PropertyId, Object> propertyMap : request.getProperties()) {
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().createHostComponents(requests);
     }
 
     @Override
@@ -705,18 +719,20 @@ public abstract class ResourceProviderIm
 
     @Override
     public void updateResources(Request request, Predicate predicate) throws AmbariException {
+      Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
       for (Map<PropertyId, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
-        ServiceComponentHostRequest hostComponentRequest = getRequest(propertyMap);
-        getManagementController().updateHostComponent(hostComponentRequest);
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().updateHostComponents(requests);
     }
 
     @Override
     public void deleteResources(Predicate predicate) throws AmbariException {
+      Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
       for (Map<PropertyId, Object> propertyMap : getPropertyMaps(null, predicate)) {
-        ServiceComponentHostRequest serviceComponentHostRequest = getRequest(propertyMap);
-        getManagementController().deleteHostComponent(serviceComponentHostRequest);
+        requests.add(getRequest(propertyMap));
       }
+      getManagementController().deleteHostComponents(requests);
     }
 
     // ----- utility methods -------------------------------------------------

Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java?rev=1400790&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java Mon Oct 22 07:44:06 2012
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.orm.dao;
+
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.orm.entities.ExecutionCommandEntity;
+
+import javax.persistence.EntityManager;
+
+public class ExecutionCommandDAO {
+
+  @Inject
+  Provider<EntityManager> entityManagerProvider;
+  
+  public ExecutionCommandEntity findByPK(int taskId) {
+    return entityManagerProvider.get().find(ExecutionCommandEntity.class, taskId);
+  }
+
+  @Transactional
+  public void create(ExecutionCommandEntity stageEntity) {
+    entityManagerProvider.get().persist(stageEntity);
+  }
+
+  @Transactional
+  public ExecutionCommandEntity merge(ExecutionCommandEntity stageEntity) {
+    return entityManagerProvider.get().merge(stageEntity);
+  }
+
+  @Transactional
+  public void remove(ExecutionCommandEntity stageEntity) {
+    entityManagerProvider.get().remove(stageEntity);
+  }
+
+  @Transactional
+  public void removeByPK(int taskId) {
+    remove(findByPK(taskId));
+  }
+}



Mime
View raw message