ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From yus...@apache.org
Subject [1/2] ambari git commit: AMBARI-8949. Add support for Ranger. (Gautam Borad via yusaku)
Date Wed, 21 Jan 2015 05:33:23 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk 0e1d8bbb5 -> 1052efd85


http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/usersync-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/usersync-properties.xml b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/usersync-properties.xml
new file mode 100644
index 0000000..15838a2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/usersync-properties.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+	<property>
+		<name>SYNC_SOURCE</name>
+		<value>unix</value>
+		<description></description>
+	</property>
+	<property>
+		<name>MIN_UNIX_USER_ID_TO_SYNC</name>
+		<value>1000</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_INTERVAL</name>
+		<value>1</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_URL</name>
+		<value>ldap://localhost:389</value>
+		<description>a sample value would be:  ldap://ldap.example.com:389</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_BIND_DN</name>
+		<value>cn=admin,dc=xasecure,dc=net</value>
+		<description>a sample value would be cn=admin,ou=users,dc=hadoop,dc=apache,dc-org</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_BIND_PASSWORD</name>
+		<value>admin321</value>
+		<description></description>
+	</property>
+	<property>
+		<name>CRED_KEYSTORE_FILENAME</name>
+		<value>/usr/lib/xausersync/.jceks/xausersync.jceks</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_SEARCH_BASE</name>
+		<value>ou=users,dc=xasecure,dc=net</value>
+		<description>sample value would be ou=users,dc=hadoop,dc=apache,dc=org</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_SEARCH_SCOPE</name>
+		<value>sub</value>
+		<description>default value: sub</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_OBJECT_CLASS</name>
+		<value>person</value>
+		<description>default value: person</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_SEARCH_FILTER</name>
+		<value>-</value>
+		<description>default value is empty</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_NAME_ATTRIBUTE</name>
+		<value>cn</value>
+		<description>default value: cn</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE</name>
+		<value>memberof,ismemberof</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USERNAME_CASE_CONVERSION</name>
+		<value>lower</value>
+		<description>possible values:  none, lower, upper</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_GROUPNAME_CASE_CONVERSION</name>
+		<value>lower</value>
+		<description>possible values:  none, lower, upper</description>
+	</property>
+	<property>
+		<name>logdir</name>
+		<value>logs</value>
+		<description>user sync log path</description>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/common-services/RANGER/0.4.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/metainfo.xml b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/metainfo.xml
new file mode 100644
index 0000000..db6544b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/metainfo.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>RANGER</name>
+            <displayName>Ranger</displayName>
+            <comment>Comprehensive security for Hadoop</comment>
+            <version>0.4.0</version>
+            <components>
+                
+                <component>
+                    <name>RANGER_ADMIN</name>
+                    <displayName>Ranger Admin</displayName>
+                    <category>MASTER</category>
+                    <cardinality>1</cardinality>
+                    <commandScript>
+                        <script>scripts/ranger_admin.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                </component>
+
+                <component>
+                    <name>RANGER_USERSYNC</name>
+                    <displayName>Ranger Usersync</displayName>
+                    <category>MASTER</category>
+                    <cardinality>1</cardinality>
+                    <commandScript>
+                        <script>scripts/ranger_usersync.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>           
+                </component>
+
+            </components>              
+
+            <osSpecifics>
+                <osSpecific>
+                    <osFamily>redhat5,redhat6,suse11,ubuntu12</osFamily>
+                    <packages>
+                        <package>
+                            <name>ranger-admin</name>                                
+                        </package>
+                        <package>
+                            <name>ranger-usersync</name>
+                        </package>                           
+                    </packages>                        
+                </osSpecific>
+            </osSpecifics>
+
+            <configuration-dependencies>
+                <config-type>admin-properties</config-type>
+                <config-type>usersync-properties</config-type>
+            </configuration-dependencies>
+
+            <commandScript>
+                <script>scripts/service_check.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>300</timeout>				
+            </commandScript>
+
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
new file mode 100644
index 0000000..2deac53
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+
+config  = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+hdp_stack_version         = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version         = format_hdp_stack_version(hdp_stack_version)
+stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0
+
+if stack_is_hdp22_or_further:
+	ranger_home    = '/usr/hdp/current/ranger-admin'
+	ranger_stop    = '/usr/bin/ranger-admin-stop'
+	ranger_start   = '/usr/bin/ranger-admin-start'
+	usersync_home  = '/usr/hdp/current/ranger-usersync'
+	usersync_start = '/usr/bin/ranger-usersync-start'
+	usersync_stop  = '/usr/bin/ranger-usersync-stop'
+else:
+	pass
+
+java_home = config['hostLevelParams']['java_home']
+unix_user  = default("/configurations/ranger-env/ranger_user", "ranger")
+unix_group = default("/configurations/ranger-env/ranger_group", "ranger")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
new file mode 100644
index 0000000..36a5759
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from setup_ranger import setup_ranger
+
+class RangerAdmin(Script):
+    def install(self, env):
+        self.install_packages(env)
+        setup_ranger(env)
+
+    def stop(self, env):
+        import params
+        env.set_params(params)
+        Execute(format('{params.ranger_stop}'))
+
+    def start(self, env):
+        import params
+        setup_ranger(env)
+        Execute(format('{params.ranger_start}'))
+     
+    def status(self, env):
+        cmd = 'ps -ef | grep proc_rangeradmin | grep -v grep'
+        code, output = shell.call(cmd, timeout=20)
+
+        if code != 0:
+            Logger.debug('Ranger admin process not running')
+            raise ComponentIsNotRunning()
+        pass 
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+
+
+if __name__ == "__main__":
+  RangerAdmin().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
new file mode 100644
index 0000000..e474092
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from setup_ranger import setup_usersync
+
+class RangerUsersync(Script):
+    def install(self, env):
+        self.install_packages(env)
+        setup_usersync(env)        
+
+    def stop(self, env):
+        import params
+        Execute(format('{params.usersync_stop}'))
+
+    def start(self, env):
+        import params
+        setup_usersync(env)
+        Execute(format('{params.usersync_start}'))
+     
+    def status(self, env):
+        cmd = 'ps -ef | grep proc_rangerusersync | grep -v grep'
+        code, output = shell.call(cmd, timeout=20)        
+
+        if code != 0:
+            Logger.debug('Ranger usersync process not running')
+            raise ComponentIsNotRunning()
+        pass
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+
+
+if __name__ == "__main__":
+  RangerUsersync().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/service_check.py
new file mode 100644
index 0000000..51bbf52
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/service_check.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class RangerServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    self.check_ranger_admin_service()
+    self.check_ranger_usersync_service()
+    
+  def check_ranger_admin_service(self):
+    cmd = 'ps -ef | grep proc_rangeradmin | grep -v grep'
+    code, output = shell.call(cmd, timeout=20)
+    if code == 0:
+      Logger.info('Ranger admin process up and running')
+    else:
+      Logger.debug('Ranger admin process not running')
+      raise ComponentIsNotRunning()
+  pass
+
+
+  def check_ranger_usersync_service(self):
+    cmd = 'ps -ef | grep proc_rangerusersync | grep -v grep'
+    code, output = shell.call(cmd, timeout=20)
+    if code == 0:
+      Logger.info('Ranger usersync process up and running')
+    else:
+      Logger.debug('Ranger usersync process not running')
+      raise ComponentIsNotRunning()
+  pass
+
+
+if __name__ == "__main__":
+  RangerServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
new file mode 100644
index 0000000..81e3f2e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import fileinput
+import shutil
+import os
+from resource_management import *
+from resource_management.core.logger import Logger
+
+def setup_ranger(env):
+    import params
+    env.set_params(params)
+
+    if check_db_connnection(env):
+        file_path = params.ranger_home + '/install.properties'
+
+        if os.path.isfile(file_path):
+            shutil.copyfile(file_path, params.ranger_home + '/install-bk.properties')
+        else:
+            raise Fail('Ranger admin install.properties file doesnot exist')
+
+        write_properties_to_file(file_path, params.config['configurations']['admin-properties'])
+    
+        cmd = format('cd {ranger_home} && {ranger_home}/setup.sh')
+
+        try:
+           opt = Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+        except Exception, e:
+            if os.path.isfile(params.ranger_home + '/install-bk.properties'):
+                os.remove(file_path)
+                os.rename(params.ranger_home + '/install-bk.properties', file_path)
+            raise Fail('Ranger installation Failed, {0}'.format(str(e)))
+
+        if os.path.isfile(params.ranger_home + '/install-bk.properties'):
+            os.remove(file_path)
+            os.rename(params.ranger_home + '/install-bk.properties', file_path)
+        else:
+            raise Fail('Ranger admin install.properties backup file doesnot exist')
+        
+def setup_usersync(env):
+    import params
+    env.set_params(params)
+
+    file_path = params.usersync_home + '/install.properties'
+    write_properties_to_file(file_path, usersync_properties(params))
+    
+    cmd = format('cd {usersync_home} && {usersync_home}/setup.sh')
+    Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])        
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def usersync_properties(params):
+    d = dict()
+
+    d['POLICY_MGR_URL'] = params.config['configurations']['admin-properties']['policymgr_external_url']
+    
+    d['SYNC_SOURCE'] = params.config['configurations']['usersync-properties']['SYNC_SOURCE']
+    d['MIN_UNIX_USER_ID_TO_SYNC'] = params.config['configurations']['usersync-properties']['MIN_UNIX_USER_ID_TO_SYNC']
+    d['SYNC_INTERVAL'] = params.config['configurations']['usersync-properties']['SYNC_INTERVAL']
+    d['SYNC_LDAP_URL'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_URL']
+    d['SYNC_LDAP_BIND_DN'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_BIND_DN']
+    d['SYNC_LDAP_BIND_PASSWORD'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_BIND_PASSWORD']
+    d['CRED_KEYSTORE_FILENAME'] = params.config['configurations']['usersync-properties']['CRED_KEYSTORE_FILENAME']
+    d['SYNC_LDAP_USER_SEARCH_BASE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_BASE']
+    d['SYNC_LDAP_USER_SEARCH_SCOPE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_SCOPE']
+    d['SYNC_LDAP_USER_OBJECT_CLASS'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_OBJECT_CLASS']
+    d['SYNC_LDAP_USER_SEARCH_FILTER'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_FILTER']
+    d['SYNC_LDAP_USER_NAME_ATTRIBUTE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_NAME_ATTRIBUTE']
+    d['SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE']
+    d['SYNC_LDAP_USERNAME_CASE_CONVERSION'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USERNAME_CASE_CONVERSION']
+    d['SYNC_LDAP_GROUPNAME_CASE_CONVERSION'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_GROUPNAME_CASE_CONVERSION']
+    d['logdir'] = params.config['configurations']['usersync-properties']['logdir']
+
+    return d
+
+def check_db_connnection(env):
+    import params
+    env.set_params(params)
+    
+    db_root_password = params.config['configurations']['admin-properties']["db_root_password"]
+    db_root_user = params.config['configurations']['admin-properties']["db_root_user"]
+    db_host = params.config['configurations']['admin-properties']['db_host']
+    sql_command_invoker = params.config['configurations']['admin-properties']['SQL_COMMAND_INVOKER']
+
+    Logger.info('Checking MYSQL root password')
+
+    cmd_str = "\""+sql_command_invoker+"\""+" -u "+db_root_user+" --password="+db_root_password+" -h "+db_host+" -s -e \"select version();\""
+    status, output = get_status_output(cmd_str)
+    
+    if status == 0:
+        Logger.info('Checking MYSQL root password DONE')
+        return True 
+    else:
+        Logger.info('Ranger Admin installation Failed! Ranger requires DB client installed on Ranger Host and DB server running on DB Host')
+        sys.exit(1)
+
+def get_status_output(cmd):
+    import subprocess
+
+    ret = subprocess.call(cmd, shell=True)
+    return ret, ret

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
index e62cda2..b8840d8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
@@ -3,6 +3,8 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
+    "RANGER_SERVICE_CHECK-SERVICE_CHECK" : ["RANGER_ADMIN-START"],
+    "RANGER_SERVICE_CHECK-SERVICE_CHECK" : ["RANGER_USERSYNC-START"],
     "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START", "OOZIE_SERVER-START"],
     "METRIC_COLLECTOR-START": ["NAMENODE-START", "DATANODE-START"],
     "AMS_SERVICE_CHECK-SERVICE_CHECK": ["METRIC_COLLECTOR-START", "HDFS_SERVICE_CHECK-SERVICE_CHECK"],

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
new file mode 100644
index 0000000..fdc2c7c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
@@ -0,0 +1,150 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+                <name>ranger-hbase-plugin-enabled</name>
+                <value>No</value>
+                <description>Enable ranger hbase plugin ?</description>
+        </property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_USERNAME</name>
+		<value>hbase</value>
+		<description>Used for repository creation on ranger admin</description>
+	</property>	
+
+	<property>
+		<name>REPOSITORY_CONFIG_PASSWORD</name>
+		<value>hbase</value>
+		<property-type>PASSWORD</property-type>
+		<description>Used for repository creation on ranger admin</description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>UPDATE_XAPOLICIES_ON_GRANT_REVOKE</name>
+		<value>true</value>
+		<description></description>
+	</property>
+
+</configuration>	

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
new file mode 100644
index 0000000..7bb6a8a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
@@ -0,0 +1,156 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+               <name>hadoop.rpc.protection</name>
+               <value>-</value>
+               <description>Used for repository creation on ranger admin</description>
+        </property>
+
+	<property>
+		<name>common.name.for.certificate</name>
+		<value>-</value>
+		<description>Used for repository creation on ranger admin</description>
+	</property>
+
+        <property>
+               <name>ranger-hdfs-plugin-enabled</name>
+               <value>No</value>
+               <description>Enable ranger hdfs plugin ?</description>
+        </property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_USERNAME</name>
+		<value>hadoop</value>
+		<description>Used for repository creation on ranger admin</description>
+	</property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_PASSWORD</name>
+		<value>hadoop</value>
+		<property-type>PASSWORD</property-type>
+		<description>Used for repository creation on ranger admin</description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+</configuration>	

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
new file mode 100644
index 0000000..3ee693e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
@@ -0,0 +1,163 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+                <name>jdbc.driverClassName</name>
+                <value>org.apache.hive.jdbc.HiveDriver</value>
+                <description>Used for repository creation on ranger admin</description>
+        </property>
+
+	<property>
+	        <name>common.name.for.certificate</name>
+        	<value>-</value>
+	        <description>Used for repository creation on ranger admin</description>
+	</property>
+
+
+        <property>
+                <name>ranger-hive-plugin-enabled</name>
+                <value>No</value>
+                <description>Enable ranger hive plugin ?</description>
+        </property>
+
+	<property>
+	        <name>REPOSITORY_CONFIG_USERNAME</name>
+        	<value>hive</value>
+	        <description>Used for repository creation on ranger admin</description>
+	</property>
+
+	<property>
+        	<name>REPOSITORY_CONFIG_PASSWORD</name>
+	        <value>hive</value>
+	        <property-type>PASSWORD</property-type>
+	        <description>Used for repository creation on ranger admin</description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>UPDATE_XAPOLICIES_ON_GRANT_REVOKE</name>
+		<value>true</value>
+		<description></description>
+	</property>	
+
+</configuration>	

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml
new file mode 100644
index 0000000..5f91087
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>RANGER</name>
+            <extends>common-services/RANGER/0.4.0</extends>		
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 05ad18b..1608e04 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -23,6 +23,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     parentRecommendConfDict = super(HDP22StackAdvisor, self).getServiceConfigurationRecommenderDict()
     childRecommendConfDict = {
       "HDFS": self.recommendHDFSConfigurations,
+      "HIVE": self.recommendHIVEConfigurations,
       "HBASE": self.recommendHBASEConfigurations,
       "MAPREDUCE2": self.recommendMapReduce2Configurations,
       "TEZ": self.recommendTezConfigurations,
@@ -46,11 +47,38 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     putHDFSProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
     putHDFSProperty = self.putProperty(configurations, "hadoop-env")
     putHDFSProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'ranger-hdfs-plugin-properties' in services['configurations']:
+      rangerPluginEnabled = services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled']
+      if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putHDFSProperty("dfs.permissions.enabled",'true')
+
+  def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'ranger-hive-plugin-properties' in services['configurations']:
+      rangerPluginEnabled = services['configurations']['ranger-hive-plugin-properties']['properties']['ranger-hive-plugin-enabled']
+      if ("RANGER" in servicesList) :
+        if (rangerPluginEnabled.lower() == "Yes".lower()):
+          putHiveProperty = self.putProperty(configurations, "hiveserver2-site")
+          putHiveProperty("hive.security.authorization.manager", 'com.xasecure.authorization.hive.authorizer.XaSecureHiveAuthorizerFactory')
+          putHiveProperty("hive.security.authenticator.manager", 'org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator')
+        elif (rangerPluginEnabled.lower() == "No".lower()):
+          putHiveProperty = self.putProperty(configurations, "hiveserver2-site")
+          putHiveProperty("hive.security.authorization.manager", 'org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider')
+          putHiveProperty("hive.security.authenticator.manager", 'org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator')
 
   def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
     putHbaseSiteProperty = self.putProperty(configurations, "hbase-site")
     putHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", '0.4')
 
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'ranger-hbase-plugin-properties' in services['configurations']:
+      rangerPluginEnabled = services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled']
+      if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == "Yes".lower()):
+          putHbaseSiteProperty("hbase.security.authorization", 'true')
+          putHbaseSiteProperty("hbase.coprocessor.master.classes", 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor')
+          putHbaseSiteProperty("hbase.coprocessor.region.classes", 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor')
+
   def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
     putTezProperty = self.putProperty(configurations, "tez-site")
     putTezProperty("tez.am.resource.memory.mb", int(clusterData['amMemory']) * 2 if int(clusterData['amMemory']) < 3072 else int(clusterData['amMemory']))
@@ -104,6 +132,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     childValidators = {
       "HDFS": {"hdfs-site": self.validateHDFSConfigurations,
                "hadoop-env": self.validateHDFSConfigurationsEnv},
+      "HIVE": {"hiveserver2-site": self.validateHIVEConfigurations},
       "HBASE": {"hbase-site": self.validateHBASEConfigurations},
       "MAPREDUCE2": {"mapred-site": self.validateMapReduce2Configurations},
       "AMS": {"ams-hbase-site": self.validateAmsHbaseSiteConfigurations,
@@ -232,7 +261,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
                         {"config-name": 'namenode_opt_newsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},
                         {"config-name": 'namenode_opt_maxnewsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]
     return self.toConfigurationValidationProblems(validationItems, "hadoop-env")
-
+  
   def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     # We can not access property hadoop.security.authentication from the
     # other config (core-site). That's why we are using another heuristics here
@@ -253,6 +282,16 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     VALID_TRANSFER_PROTECTION_VALUES = ['authentication', 'integrity', 'privacy']
 
     validationItems = []
+    #Adding Ranger Plugin logic here 
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hdfs-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hdfs-plugin-enabled']
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if hdfs_site['dfs.permissions.enabled'] != 'true':
+        validationItems.append({"config-name": 'dfs.permissions.enabled',
+                                    "item": self.getWarnItem(
+                                      "dfs.permissions.enabled needs to be set to true if Ranger HDFS Plugin is enabled.")})
+
     if (not wire_encryption_enabled and   # If wire encryption is enabled at Hadoop, it disables all our checks
           core_site['hadoop.security.authentication'] == 'kerberos' and
           core_site['hadoop.security.authorization'] == 'true'):
@@ -339,6 +378,48 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
                                       data_transfer_protection_value, VALID_TRANSFER_PROTECTION_VALUES))})
     return self.toConfigurationValidationProblems(validationItems, "hdfs-site")
 
+  def validateHIVEConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    hive_server2 = properties
+    validationItems = [] 
+    #Adding Ranger Plugin logic here 
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hive-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hive-plugin-enabled']
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    ##Add stack validations only if Ranger is enabled.
+    if ("RANGER" in servicesList):
+      ##Add stack validations for  Ranger plugin enabled.
+      if (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+        prop_name = 'hive.security.authorization.manager'
+        prop_val = "com.xasecure.authorization.hive.authorizer.XaSecureHiveAuthorizerFactory"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger HIVE Plugin is enabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+        prop_name = 'hive.security.authenticator.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger HIVE Plugin is enabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+      ##Add stack validations for  Ranger plugin disabled.
+      elif (ranger_plugin_enabled.lower() == 'No'.lower()):
+        prop_name = 'hive.security.authorization.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger HIVE Plugin is disabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+        prop_name = 'hive.security.authenticator.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator"
+        if hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger HIVE Plugin is disabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+    return self.toConfigurationValidationProblems(validationItems, "hiveserver2-site")
 
   def validateHBASEConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     hbase_site = properties
@@ -360,6 +441,33 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       validationItems.append({"config-name": prop_name1,
                               "item": self.getWarnItem(
                               "{0} and {1} sum should not exceed {2}".format(prop_name1, prop_name2, props_max_sum))})
+
+    #Adding Ranger Plugin logic here 
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hbase-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hbase-plugin-enabled']
+    prop_name = 'hbase.security.authorization'
+    prop_val = "true"
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if hbase_site[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBASE Plugin is enabled."\
+                                "{0} needs to be set to {1}".format(prop_name,prop_val))})
+      prop_name = "hbase.coprocessor.master.classes"
+      prop_val = "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
+      if hbase_site[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBASE Plugin is enabled."\
+                                " {0} needs to be set to {1}".format(prop_name,prop_val))})
+      prop_name = "hbase.coprocessor.region.classes"
+      prop_val = "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
+      if hbase_site[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBASE Plugin is enabled."\
+                                " {0} needs to be set to {1}".format(prop_name,prop_val))})
     return self.toConfigurationValidationProblems(validationItems, "hbase-site")
 
   def getMastersWithMultipleInstances(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
index 05aba97..6b6aff5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
@@ -327,6 +327,12 @@
             "ipc.client.connection.maxidletime": "30000", 
             "ipc.client.connect.max.retries": "50"
         }, 
+        "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+        },
+        "ranger-hbase-plugin-properties" : {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "yarn-env": {
             "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
             "apptimelineserver_heapsize": "1024", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 2be60b6..0bb7aa9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -373,6 +373,12 @@
             "hive.server2.transport.mode": "binary",
             "hive.optimize.mapjoin.mapreduce": "true"
         }, 
+        "ranger-hive-plugin-properties": {
+            "ranger-hive-plugin-enabled":"yes"
+        },
+        "ranger-knox-plugin-properties": {
+            "ranger-knox-plugin-enabled":"yes"
+        },
         "yarn-site": {
             "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
             "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
index 8e643de..5d813f6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
@@ -331,6 +331,9 @@
             "ipc.client.connection.maxidletime": "30000", 
             "ipc.client.connect.max.retries": "50"
         }, 
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
index 253747a..652ca7b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -228,6 +228,9 @@
             "ipc.client.connection.maxidletime": "30000", 
             "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT"
         }, 
+        "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+        },
         "hdfs-log4j": {
             "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout", 
             "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
index 8e6b3d4..09ee63a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
@@ -533,6 +533,9 @@
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
         }, 
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },        
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
index 410e70e..824caf1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
@@ -528,6 +528,9 @@
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
         }, 
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },        
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
index ee46527..545bd13 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
@@ -82,6 +82,9 @@
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
         }, 
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },        
         "cluster-env": {
             "security_enabled": "false", 
             "hive_tar_source": "/usr/hdp/current/hive-client/hive.tar.gz", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
index 89face6..eb93275 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
@@ -533,6 +533,9 @@
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
         }, 
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index 119ab88..ed46b33 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -368,6 +368,12 @@
             "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
             "ipc.client.connection.maxidletime": "30000"
         }, 
+        "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
index 4faf0f8..7d70958 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
@@ -366,6 +366,9 @@
             "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
             "ipc.client.connection.maxidletime": "30000"
         }, 
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 2b8ce44..f387bd1 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -159,10 +159,20 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': unsecure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties':{
+          'properties': {'ranger-hdfs-plugin-enabled':'Yes'}
       }
     }
+    services = {"services":
+                    [{"StackServices":
+                          {"service_name" : "HDFS",
+                           "service_version" : "2.6.0.2.2",
+                           }
+                     }]
+                }
     expected = []  # No warnings
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Unsecured cluster, unsecure ports
@@ -176,10 +186,22 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': unsecure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []  # No warnings
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    services = {"services":
+                [{"StackServices":
+                      {"service_name" : "HDFS",
+                       "service_version" : "2.6.0.2.2",
+                       }
+                 }]
+            }
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, invalid dfs.http.policy value
@@ -194,6 +216,11 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [{'config-name': 'dfs.http.policy',
@@ -201,7 +228,14 @@ class TestHDP22StackAdvisor(TestCase):
                  'level': 'WARN',
                  'message': "Invalid property value: WRONG_VALUE. Valid values are ['HTTP_ONLY', 'HTTPS_ONLY', 'HTTP_AND_HTTPS']",
                  'type': 'configuration'}]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    services = {"services":
+            [{"StackServices":
+                  {"service_name" : "HDFS",
+                   "service_version" : "2.6.0.2.2",
+                   }
+             }]
+        }
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, https address not defined
@@ -215,10 +249,22 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [ ]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    services = {"services":
+            [{"StackServices":
+                  {"service_name" : "HDFS",
+                   "service_version" : "2.6.0.2.2",
+                   }
+             }]
+        }
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, https address defined and secure
@@ -233,10 +279,22 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    services = {"services":
+            [{"StackServices":
+                  {"service_name" : "HDFS",
+                   "service_version" : "2.6.0.2.2",
+                   }
+             }]
+        }
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, https address defined and non secure
@@ -251,10 +309,22 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    services = {"services":
+            [{"StackServices":
+                  {"service_name" : "HDFS",
+                   "service_version" : "2.6.0.2.2",
+                   }
+             }]
+        }
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, non secure dfs port, https property not defined
@@ -268,7 +338,13 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
+
     }
     expected = [{'config-name': 'dfs.datanode.address',
                  'config-type': 'hdfs-site',
@@ -298,7 +374,7 @@ class TestHDP22StackAdvisor(TestCase):
                             "order to be able to use HTTPS.",
                  'type': 'configuration'}
     ]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
 
@@ -314,6 +390,11 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [{'config-name': 'dfs.datanode.address',
@@ -343,7 +424,7 @@ class TestHDP22StackAdvisor(TestCase):
                             "able to use HTTPS.",
                  'type': 'configuration'}
     ]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, valid non-root configuration
@@ -359,10 +440,15 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTP_ONLY, insecure port
@@ -377,6 +463,11 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [{'config-name': 'dfs.datanode.address',
@@ -398,7 +489,7 @@ class TestHDP22StackAdvisor(TestCase):
                             "['dfs.datanode.address', 'dfs.datanode.http.address'] use secure ports.",
                  'type': 'configuration'}
                 ]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTP_ONLY, valid configuration
@@ -413,10 +504,15 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, absent dfs.http.policy (typical situation)
@@ -430,10 +526,15 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTP_ONLY, misusage of dfs.data.transfer.protection warning
@@ -449,6 +550,11 @@ class TestHDP22StackAdvisor(TestCase):
         },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [{'config-name': 'dfs.data.transfer.protection',
@@ -457,7 +563,7 @@ class TestHDP22StackAdvisor(TestCase):
                  'message': "dfs.data.transfer.protection property can not be used when dfs.http.policy is "
                             "set to any value other then HTTPS_ONLY. Tip: When dfs.http.policy property is not defined, it defaults to HTTP_ONLY",
                  'type': 'configuration'}]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, wrong dfs.data.transfer.protection value
@@ -473,6 +579,11 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = [{'config-name': 'dfs.data.transfer.protection',
@@ -480,7 +591,7 @@ class TestHDP22StackAdvisor(TestCase):
                  'level': 'WARN',
                  'message': "Invalid property value: WRONG_VALUE. Valid values are ['authentication', 'integrity', 'privacy'].",
                  'type': 'configuration'}]
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
     # TEST CASE: Hadoop wire encryption enabled
@@ -496,10 +607,15 @@ class TestHDP22StackAdvisor(TestCase):
       },
       'core-site': {
         'properties': secure_cluster_core_site
+      },
+      'ranger-hdfs-plugin-properties': {
+          'properties':{
+              'ranger-hdfs-plugin-enabled':'Yes'
+          }
       }
     }
     expected = []  # No warnings
-    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, None, None)
+    validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
     self.assertEquals(validation_problems, expected)
 
   def test_recommendYARNConfigurations(self):
@@ -631,11 +747,22 @@ class TestHDP22StackAdvisor(TestCase):
       'hdfs-site': {
         'properties': {
           'dfs.datanode.max.transfer.threads': '16384'
-        }
+        },
       }
     }
-
-    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, '', '')
+    services = {"services":
+                    [{"StackServices":
+                          {"service_name" : "HDFS",
+                           "service_version" : "2.6.0.2.2",
+                           }
+                     }],
+                "configurations": {
+                    'ranger-hdfs-plugin-properties':{
+                        "properties": {"ranger-hdfs-plugin-enabled":"Yes"}
+                    }
+                }
+                }
+    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, '')
     self.assertEquals(configurations, expected)
 
   def test_validateHDFSConfigurationsEnv(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/default.json b/ambari-server/src/test/python/stacks/2.2/configs/default.json
index 15ffb66..23abc72 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/default.json
@@ -150,9 +150,13 @@
         "kafka.ganglia.metrics.port": "8649",
         "log.index.interval.bytes": "4096",
         "log.retention.hours": "168"
-      }
-
-
+      },
+      "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+      },
+      "ranger-hive-plugin-properties": {
+            "ranger-hive-plugin-enabled":"yes"
+       }
     },
     "configuration_attributes": {
         "yarn-site": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
index 35aedc0..8ef4bd9 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
@@ -400,6 +400,9 @@
             "pig_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/",
             "sink.dbpassword": "",
             "sink_database": "Existing MSSQL Server database with sql auth"
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
         }
     },
     "configurationTags": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1052efd8/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index 8beba79..f05adc7 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -64,6 +64,7 @@ App.supports = {
   alwaysEnableManagedMySQLForHive: true,
   preKerberizeCheck: false,
   automatedKerberos: true,
+  ranger: false,
   customizeAgentUserAccount: false,
   installGanglia: false,
   opsDuringRollingUpgrade: false


Mime
View raw message