ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From stoa...@apache.org
Subject ambari git commit: AMBARI-15978. Blueprint processor does not replace localhost in xasecure.audit.destination.hdfs.dir property. (stoader)
Date Wed, 20 Apr 2016 09:37:58 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk 172eb1111 -> a26fc8670


AMBARI-15978. Blueprint processor does not replace localhost in xasecure.audit.destination.hdfs.dir
property. (stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a26fc867
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a26fc867
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a26fc867

Branch: refs/heads/trunk
Commit: a26fc867024f91705f8e3e32c6bbc47e6edf89bd
Parents: 172eb11
Author: Toader, Sebastian <stoader@hortonworks.com>
Authored: Wed Apr 20 11:37:47 2016 +0200
Committer: Toader, Sebastian <stoader@hortonworks.com>
Committed: Wed Apr 20 11:37:47 2016 +0200

----------------------------------------------------------------------
 .../BlueprintConfigurationProcessor.java        |  39 ++-
 .../BlueprintConfigurationProcessorTest.java    | 334 +++++++++++++++++++
 2 files changed, 370 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a26fc867/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 5e8241b..29f937a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -48,6 +48,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Predicates;
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
@@ -148,7 +149,7 @@ public class BlueprintConfigurationProcessor {
    *   expected hostname information is not found.
    */
   private static Set<String> configPropertiesWithHASupport =
-    new HashSet<String>(Arrays.asList("fs.defaultFS", "hbase.rootdir", "instance.volumes",
"policymgr_external_url"));
+    new HashSet<String>(Arrays.asList("fs.defaultFS", "hbase.rootdir", "instance.volumes",
"policymgr_external_url", "xasecure.audit.destination.hdfs.dir"));
 
   /**
    * Statically-defined list of filters to apply on property exports.
@@ -2250,6 +2251,14 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> multiRangerKmsSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> rangerAdminPropsMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> rangerEnvPropsMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> rangerYarnAuditPropsMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> rangerHdfsAuditPropsMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> rangerHbaseAuditPropsMap = new HashMap<String,
PropertyUpdater>();
+    Map<String, PropertyUpdater> rangerHiveAuditPropsMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> rangerKnoxAuditPropsMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> rangerKafkaAuditPropsMap = new HashMap<String,
PropertyUpdater>();
+    Map<String, PropertyUpdater> rangerStormAuditPropsMap = new HashMap<String,
PropertyUpdater>();
     Map<String, PropertyUpdater> hawqSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> zookeeperEnvMap = new HashMap<String, PropertyUpdater>();
 
@@ -2271,6 +2280,15 @@ public class BlueprintConfigurationProcessor {
     singleHostTopologyUpdaters.put("kafka-broker", kafkaBrokerMap);
     singleHostTopologyUpdaters.put("application-properties", atlasPropsMap);
     singleHostTopologyUpdaters.put("admin-properties", rangerAdminPropsMap);
+    singleHostTopologyUpdaters.put("ranger-env", rangerEnvPropsMap);
+    singleHostTopologyUpdaters.put("ranger-yarn-audit", rangerYarnAuditPropsMap);
+    singleHostTopologyUpdaters.put("ranger-hdfs-audit", rangerHdfsAuditPropsMap);
+    singleHostTopologyUpdaters.put("ranger-hbase-audit", rangerHbaseAuditPropsMap);
+    singleHostTopologyUpdaters.put("ranger-hive-audit", rangerHiveAuditPropsMap);
+    singleHostTopologyUpdaters.put("ranger-knox-audit", rangerKnoxAuditPropsMap);
+    singleHostTopologyUpdaters.put("ranger-kafka-audit", rangerKafkaAuditPropsMap);
+    singleHostTopologyUpdaters.put("ranger-storm-audit", rangerStormAuditPropsMap);
+
     singleHostTopologyUpdaters.put("hawq-site", hawqSiteMap);
     singleHostTopologyUpdaters.put("zookeeper-env", zookeeperEnvMap);
 
@@ -2430,7 +2448,7 @@ public class BlueprintConfigurationProcessor {
                                            ClusterTopology topology) {
         if (topology.getBlueprint().getServices().contains("ATLAS")) {
           String host = topology.getHostAssignmentsForComponent("ATLAS_SERVER").iterator().next();
-          
+
           boolean tlsEnabled = Boolean.parseBoolean(properties.get("application-properties").get("atlas.enableTLS"));
           String scheme;
           String port;
@@ -2441,7 +2459,7 @@ public class BlueprintConfigurationProcessor {
             scheme = "http";
             port = properties.get("application-properties").get("atlas.server.http.port");
           }
-  
+
           return String.format("%s://%s:%s", scheme, host, port);
         }
         return origValue;
@@ -2543,6 +2561,21 @@ public class BlueprintConfigurationProcessor {
     // RANGER_ADMIN
     rangerAdminPropsMap.put("policymgr_external_url", new SingleHostTopologyUpdater("RANGER_ADMIN"));
 
+    // RANGER ENV
+    List<Map<String, PropertyUpdater>> configsWithRangerHdfsAuditDirProperty
= ImmutableList.of(
+      rangerEnvPropsMap,
+      rangerYarnAuditPropsMap,
+      rangerHdfsAuditPropsMap,
+      rangerHbaseAuditPropsMap,
+      rangerHiveAuditPropsMap,
+      rangerKnoxAuditPropsMap,
+      rangerKafkaAuditPropsMap,
+      rangerStormAuditPropsMap
+    );
+    for (Map<String, PropertyUpdater> rangerAuditPropsMap: configsWithRangerHdfsAuditDirProperty)
{
+      rangerAuditPropsMap.put("xasecure.audit.destination.hdfs.dir", new SingleHostTopologyUpdater("NAMENODE"));
// the same prop updater must be used as for fs.defaultFS in core-site
+    }
+
     // RANGER KMS
     multiRangerKmsSiteMap.put("hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string",
       new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/a26fc867/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index dd26c75..2759869 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -6058,6 +6058,340 @@ public class BlueprintConfigurationProcessorTest {
   }
 
   @Test
+  public void testRangerEnv_defaults() throws Exception {
+    // Given
+    List<String> configTypesWithRangerHdfsAuditDir = ImmutableList.of(
+      "ranger-env",
+      "ranger-yarn-audit",
+      "ranger-hdfs-audit",
+      "ranger-hbase-audit",
+      "ranger-hive-audit",
+      "ranger-knox-audit",
+      "ranger-kafka-audit",
+      "ranger-storm-audit"
+    );
+    Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
+
+    for (String configType: configTypesWithRangerHdfsAuditDir) {
+      Map<String, String> configProperties = new HashMap<>();
+      configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://localhost:100");
+
+      clusterConfigProperties.put(configType, configProperties);
+    }
+
+
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<>();
+    Configuration parentClusterConfig = new Configuration(parentProperties, new HashMap<String,
Map<String, Map<String, String>>>());
+    Configuration clusterConfig = new Configuration(clusterConfigProperties, new HashMap<String,
Map<String, Map<String, String>>>(), parentClusterConfig);
+
+    Collection<String> rangerComponents = new HashSet<>();
+    rangerComponents.add("RANGER_ADMIN");
+    rangerComponents.add("RANGER_USERSYNC");
+
+    Collection<String> hdfsComponents = new HashSet<String>();
+    hdfsComponents.add("NAMENODE");
+    hdfsComponents.add("DATANODE");
+
+    TestHostGroup group1 = new TestHostGroup("group1", rangerComponents, Collections.singleton("host1"));
+    group1.components.add("DATANODE");
+
+
+    TestHostGroup group2 = new TestHostGroup("group2", hdfsComponents, Collections.singleton("nn_host"));
+
+
+    Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+    // When
+    configProcessor.doUpdateForClusterCreate();
+
+    // Then
+    String expectedAuditHdfsDir = "hdfs://nn_host:100";
+
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-env", "xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-yarn-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hdfs-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hbase-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hive-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-knox-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-kafka-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-storm-audit",
"xasecure.audit.destination.hdfs.dir"));
+  }
+
+
+  @Test
+  public void testRangerEnv() throws Exception {
+    // Given
+    List<String> configTypesWithRangerHdfsAuditDir = ImmutableList.of(
+      "ranger-env",
+      "ranger-yarn-audit",
+      "ranger-hdfs-audit",
+      "ranger-hbase-audit",
+      "ranger-hive-audit",
+      "ranger-knox-audit",
+      "ranger-kafka-audit",
+      "ranger-storm-audit"
+    );
+    Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
+
+    for (String configType: configTypesWithRangerHdfsAuditDir) {
+      Map<String, String> configProperties = new HashMap<>();
+      configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://%HOSTGROUP::group2%:100");
+
+      clusterConfigProperties.put(configType, configProperties);
+    }
+
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<>();
+    Configuration parentClusterConfig = new Configuration(parentProperties, new HashMap<String,
Map<String, Map<String, String>>>());
+    Configuration clusterConfig = new Configuration(clusterConfigProperties, new HashMap<String,
Map<String, Map<String, String>>>(), parentClusterConfig);
+
+    Collection<String> rangerComponents = new HashSet<>();
+    rangerComponents.add("RANGER_ADMIN");
+    rangerComponents.add("RANGER_USERSYNC");
+
+    Collection<String> hdfsComponents = new HashSet<String>();
+    hdfsComponents.add("NAMENODE");
+    hdfsComponents.add("DATANODE");
+
+    TestHostGroup group1 = new TestHostGroup("group1", rangerComponents, Collections.singleton("host1"));
+    group1.components.add("DATANODE");
+
+
+    TestHostGroup group2 = new TestHostGroup("group2", hdfsComponents, Collections.singleton("nn_host"));
+
+
+    Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+    // When
+    configProcessor.doUpdateForClusterCreate();
+
+    // Then
+    String expectedAuditHdfsDir = "hdfs://nn_host:100";
+
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-env", "xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-yarn-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hdfs-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hbase-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hive-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-knox-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-kafka-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-storm-audit",
"xasecure.audit.destination.hdfs.dir"));
+  }
+
+
+  @Test
+  public void testRangerEnvWithHdfsHA() throws Exception {
+    // Given
+    List<String> configTypesWithRangerHdfsAuditDir = ImmutableList.of(
+      "ranger-env",
+      "ranger-yarn-audit",
+      "ranger-hdfs-audit",
+      "ranger-hbase-audit",
+      "ranger-hive-audit",
+      "ranger-knox-audit",
+      "ranger-kafka-audit",
+      "ranger-storm-audit"
+    );
+    Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
+
+    for (String configType: configTypesWithRangerHdfsAuditDir) {
+      Map<String, String> configProperties = new HashMap<>();
+      configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://my_name_service:100");
+
+      clusterConfigProperties.put(configType, configProperties);
+    }
+
+
+    // DFS name service
+    final String hdfsSiteConfigType = "hdfs-site";
+    Map<String, String> hdfsSiteProperties = new HashMap<>();
+    clusterConfigProperties.put(hdfsSiteConfigType, hdfsSiteProperties);
+    hdfsSiteProperties.put("dfs.nameservices", "my_name_service");
+    hdfsSiteProperties.put("dfs.ha.namenodes.my_name_service", "nn1,nn2");
+
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<>();
+    Configuration parentClusterConfig = new Configuration(parentProperties, new HashMap<String,
Map<String, Map<String, String>>>());
+    Configuration clusterConfig = new Configuration(clusterConfigProperties, new HashMap<String,
Map<String, Map<String, String>>>(), parentClusterConfig);
+
+    Collection<String> rangerComponents = new HashSet<>();
+    rangerComponents.add("RANGER_ADMIN");
+    rangerComponents.add("RANGER_USERSYNC");
+
+    Collection<String> hdfsComponents = new HashSet<String>();
+    hdfsComponents.add("NAMENODE");
+    hdfsComponents.add("DATANODE");
+
+    TestHostGroup group1 = new TestHostGroup("group1", rangerComponents, Collections.singleton("host1"));
+    group1.components.addAll(hdfsComponents);
+
+
+    TestHostGroup group2 = new TestHostGroup("group2", hdfsComponents, Collections.singleton("host2"));
+
+
+    Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+    // When
+    configProcessor.doUpdateForClusterCreate();
+
+    // Then
+    String expectedAuditHdfsDir = "hdfs://my_name_service:100";
+
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-env", "xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-yarn-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hdfs-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hbase-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hive-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-knox-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-kafka-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-storm-audit",
"xasecure.audit.destination.hdfs.dir"));
+  }
+
+
+  @Test
+  public void testRangerEnvBlueprintExport() throws Exception {
+    // Given
+    List<String> configTypesWithRangerHdfsAuditDir = ImmutableList.of(
+      "ranger-env",
+      "ranger-yarn-audit",
+      "ranger-hdfs-audit",
+      "ranger-hbase-audit",
+      "ranger-hive-audit",
+      "ranger-knox-audit",
+      "ranger-kafka-audit",
+      "ranger-storm-audit"
+    );
+    Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
+
+    for (String configType: configTypesWithRangerHdfsAuditDir) {
+      Map<String, String> configProperties = new HashMap<>();
+      configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://nn_host:100");
+
+      clusterConfigProperties.put(configType, configProperties);
+    }
+
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<>();
+    Configuration parentClusterConfig = new Configuration(parentProperties, new HashMap<String,
Map<String, Map<String, String>>>());
+    Configuration clusterConfig = new Configuration(clusterConfigProperties, new HashMap<String,
Map<String, Map<String, String>>>(), parentClusterConfig);
+
+    Collection<String> rangerComponents = new HashSet<>();
+    rangerComponents.add("RANGER_ADMIN");
+    rangerComponents.add("RANGER_USERSYNC");
+
+    Collection<String> hdfsComponents = new HashSet<String>();
+    hdfsComponents.add("NAMENODE");
+    hdfsComponents.add("DATANODE");
+
+    TestHostGroup group1 = new TestHostGroup("group1", rangerComponents, Collections.singleton("host1"));
+    group1.components.add("DATANODE");
+
+
+    TestHostGroup group2 = new TestHostGroup("group2", hdfsComponents, Collections.singleton("nn_host"));
+
+
+    Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+    // When
+    configProcessor.doUpdateForBlueprintExport();
+
+    // Then
+    String expectedAuditHdfsDir = "hdfs://%HOSTGROUP::group2%:100";
+
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-env", "xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-yarn-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hdfs-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hbase-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hive-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-knox-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-kafka-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-storm-audit",
"xasecure.audit.destination.hdfs.dir"));
+  }
+
+  @Test
+  public void testRangerEnvExportBlueprintWithHdfsHA() throws Exception {
+    // Given
+    List<String> configTypesWithRangerHdfsAuditDir = ImmutableList.of(
+      "ranger-env",
+      "ranger-yarn-audit",
+      "ranger-hdfs-audit",
+      "ranger-hbase-audit",
+      "ranger-hive-audit",
+      "ranger-knox-audit",
+      "ranger-kafka-audit",
+      "ranger-storm-audit"
+    );
+    Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
+
+    for (String configType: configTypesWithRangerHdfsAuditDir) {
+      Map<String, String> configProperties = new HashMap<>();
+      configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://my_name_service:100");
+
+      clusterConfigProperties.put(configType, configProperties);
+    }
+
+    // DFS name service
+    final String hdfsSiteConfigType = "hdfs-site";
+    Map<String, String> hdfsSiteProperties = new HashMap<>();
+    clusterConfigProperties.put(hdfsSiteConfigType, hdfsSiteProperties);
+    hdfsSiteProperties.put("dfs.nameservices", "my_name_service");
+    hdfsSiteProperties.put("dfs.ha.namenodes.my_name_service", "nn1,nn2");
+
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<>();
+    Configuration parentClusterConfig = new Configuration(parentProperties, new HashMap<String,
Map<String, Map<String, String>>>());
+    Configuration clusterConfig = new Configuration(clusterConfigProperties, new HashMap<String,
Map<String, Map<String, String>>>(), parentClusterConfig);
+
+    Collection<String> rangerComponents = new HashSet<>();
+    rangerComponents.add("RANGER_ADMIN");
+    rangerComponents.add("RANGER_USERSYNC");
+
+    Collection<String> hdfsComponents = new HashSet<String>();
+    hdfsComponents.add("NAMENODE");
+    hdfsComponents.add("DATANODE");
+
+    TestHostGroup group1 = new TestHostGroup("group1", rangerComponents, Collections.singleton("host1"));
+    group1.components.addAll(hdfsComponents);
+
+
+    TestHostGroup group2 = new TestHostGroup("group2", hdfsComponents, Collections.singleton("host2"));
+
+
+    Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+    // When
+    configProcessor.doUpdateForBlueprintExport();
+
+    // Then
+    String expectedAuditHdfsDir = "hdfs://my_name_service:100";
+
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-env", "xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-yarn-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hdfs-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hbase-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-hive-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-knox-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-kafka-audit",
"xasecure.audit.destination.hdfs.dir"));
+    assertEquals(expectedAuditHdfsDir, clusterConfig.getPropertyValue("ranger-storm-audit",
"xasecure.audit.destination.hdfs.dir"));
+  }
+
+  @Test
   public void testRangerKmsServerProperties() throws Exception {
     // Given
 


Mime
View raw message