ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nc...@apache.org
Subject [31/32] ambari git commit: AMBARI-13129. Integrate Hive with Atlas in Blueprints
Date Fri, 02 Oct 2015 21:12:35 GMT
AMBARI-13129.  Integrate Hive with Atlas in Blueprints


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/321d57b0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/321d57b0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/321d57b0

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 321d57b0de3475977741497d206990dccd4b6f40
Parents: bfc35fe
Author: John Speidel <jspeidel@hortonworks.com>
Authored: Thu Oct 1 15:54:43 2015 -0400
Committer: John Speidel <jspeidel@hortonworks.com>
Committed: Fri Oct 2 15:17:17 2015 -0400

----------------------------------------------------------------------
 .../BlueprintConfigurationProcessor.java        | 93 +++++++++++++++++++-
 .../ATLAS/0.1.0.2.3/metainfo.xml                | 27 +++++-
 .../0.1.0.2.3/package/scripts/atlas_client.py   | 48 ++++++++++
 .../BlueprintConfigurationProcessorTest.java    | 93 ++++++++++++++++++++
 4 files changed, 256 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/321d57b0/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 892cf32..5fd5563 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -1077,9 +1077,9 @@ public class BlueprintConfigurationProcessor {
       } else {
         int matchingGroupCount = topology.getHostGroupsForComponent(component).size();
         if (matchingGroupCount == 1) {
-          Collection<String> componentHosts = topology.getHostAssignmentsForComponent(component);
           //todo: warn if > 1 hosts
-          return origValue.replace("localhost", componentHosts.iterator().next());
+          return replacePropertyValue(origValue,
+              topology.getHostAssignmentsForComponent(component).iterator().next(), properties);
         } else {
           //todo: extract all hard coded HA logic
           Cardinality cardinality = topology.getBlueprint().getStack().getCardinality(component);
@@ -1159,6 +1159,10 @@ public class BlueprintConfigurationProcessor {
       }
     }
 
+    public String replacePropertyValue(String origValue, String host, Map<String, Map<String,
String>> properties) {
+      return origValue.replace("localhost", host);
+    }
+
     @Override
     public Collection<String> getRequiredHostGroups(String propertyName,
                                                     String origValue,
@@ -1856,6 +1860,21 @@ public class BlueprintConfigurationProcessor {
   }
 
   /**
+   * A topology independent updater which provides a default implementation of getRequiredHostGroups
+   * since no topology related information is required by the updater.
+   */
+  private static abstract class NonTopologyUpdater implements PropertyUpdater {
+    @Override
+    public Collection<String> getRequiredHostGroups(String propertyName,
+                                                    String origValue,
+                                                    Map<String, Map<String, String>>
properties,
+                                                    ClusterTopology topology) {
+      return Collections.emptyList();
+    }
+  }
+
+
+  /**
    * Register updaters for configuration properties.
    */
   static {
@@ -1877,6 +1896,7 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> accumuloSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> falconStartupPropertiesMap = new HashMap<String,
PropertyUpdater>();
     Map<String, PropertyUpdater> kafkaBrokerMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> atlasPropsMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> mapredEnvMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> hadoopEnvMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<String, PropertyUpdater>();
@@ -1911,6 +1931,7 @@ public class BlueprintConfigurationProcessor {
     singleHostTopologyUpdaters.put("hive-env", hiveEnvMap);
     singleHostTopologyUpdaters.put("oozie-env", oozieEnvMap);
     singleHostTopologyUpdaters.put("kafka-broker", kafkaBrokerMap);
+    singleHostTopologyUpdaters.put("application-properties", atlasPropsMap);
 
     mPropertyUpdaters.put("hadoop-env", hadoopEnvMap);
     mPropertyUpdaters.put("hbase-env", hbaseEnvMap);
@@ -1980,7 +2001,6 @@ public class BlueprintConfigurationProcessor {
     yarnSiteMap.put("yarn.timeline-service.webapp.address", new SingleHostTopologyUpdater("APP_TIMELINE_SERVER"));
     yarnSiteMap.put("yarn.timeline-service.webapp.https.address", new SingleHostTopologyUpdater("APP_TIMELINE_SERVER"));
 
-
     // HIVE_SERVER
     multiHiveSiteMap.put("hive.metastore.uris", new MultipleHostTopologyUpdater("HIVE_METASTORE",
',', true));
     dbHiveSiteMap.put("javax.jdo.option.ConnectionURL",
@@ -1995,6 +2015,70 @@ public class BlueprintConfigurationProcessor {
     multiHiveSiteMap.put("hive.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
     multiHiveSiteMap.put("hive.cluster.delegation.token.store.zookeeper.connectString", new
MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
 
+    // HIVE Atlas integration
+    hiveSiteMap.put("hive.exec.post.hooks", new NonTopologyUpdater() {
+      @Override
+      public String updateForClusterCreate(String propertyName,
+                                           String origValue,
+                                           Map<String, Map<String, String>> properties,
+                                           ClusterTopology topology) {
+
+        if (topology.getBlueprint().getServices().contains("ATLAS")) {
+          String hiveHookClass = "org.apache.atlas.hive.hook.HiveHook";
+          if (origValue == null || origValue.isEmpty()) {
+            return hiveHookClass;
+          } else {
+            return String.format("%s,%s", origValue, hiveHookClass);
+          }
+        } else {
+          return origValue;
+        }
+      }
+    });
+
+    //todo: john - this property should be moved to atlas configuration
+    hiveSiteMap.put("atlas.cluster.name", new NonTopologyUpdater() {
+      @Override
+      public String updateForClusterCreate(String propertyName,
+                                           String origValue,
+                                           Map<String, Map<String, String>> properties,
+                                           ClusterTopology topology) {
+
+        if (topology.getBlueprint().getServices().contains("ATLAS")) {
+          // if original value is not set or is the default "primary" set the cluster id
+          if (origValue == null || origValue.trim().isEmpty() || origValue.equals("primary"))
{
+            //use cluster id because cluster name may change
+            return String.valueOf(topology.getClusterId());
+          } else {
+            // if explicitly set by user, don't override
+            return origValue;
+          }
+        } else {
+          return origValue;
+        }
+      }
+    });
+
+    //todo: john - this property should be removed
+    hiveSiteMap.put("atlas.rest.address", new SingleHostTopologyUpdater("ATLAS_SERVER") {
+      @Override
+      public String replacePropertyValue(String origValue, String host, Map<String, Map<String,
String>> properties) {
+        boolean tlsEnabled = Boolean.parseBoolean(properties.get("application-properties").get("atlas.enableTLS"));
+        String scheme;
+        String port;
+        if (tlsEnabled) {
+          scheme = "https";
+          port = properties.get("application-properties").get("atlas.server.https.port");
+        } else {
+          scheme = "http";
+          port = properties.get("application-properties").get("atlas.server.http.port");
+        }
+
+        return String.format("%s://%s:%s", scheme, host, port);
+      }
+    });
+
+
     // OOZIE_SERVER
     oozieSiteMap.put("oozie.base.url", new SingleHostTopologyUpdater("OOZIE_SERVER"));
     oozieSiteMap.put("oozie.authentication.kerberos.principal", new SingleHostTopologyUpdater("OOZIE_SERVER"));
@@ -2041,6 +2125,9 @@ public class BlueprintConfigurationProcessor {
     multiOozieSiteMap.put("hadoop.proxyuser.knox.hosts", new MultipleHostTopologyUpdater("KNOX_GATEWAY"));
     multiOozieSiteMap.put("oozie.service.ProxyUserService.proxyuser.knox.hosts", new MultipleHostTopologyUpdater("KNOX_GATEWAY"));
 
+    // ATLAS
+    atlasPropsMap.put("atlas.server.bind.address", new SingleHostTopologyUpdater("ATLAS_SERVER"));
+
 
     // Required due to AMBARI-4933.  These no longer seem to be required as the default values
in the stack
     // are now correct but are left here in case an existing blueprint still contains an
old value.

http://git-wip-us.apache.org/repos/asf/ambari/blob/321d57b0/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/metainfo.xml
b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/metainfo.xml
index a77bad3..2600fc4 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/metainfo.xml
@@ -23,6 +23,7 @@
       <displayName>Atlas</displayName>
       <comment>Atlas Metadata and Governance platform</comment>
       <version>0.1.0.2.3</version>
+      
       <components>
         <component>
           <name>ATLAS_SERVER</name>
@@ -48,6 +49,30 @@
             <dictionaryName>atlas-env</dictionaryName>
           </configFile>
         </component>
+
+        <component>
+          <name>ATLAS_CLIENT</name>
+          <displayName>Atlas Metadata Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <dependencies>
+          </dependencies>
+          <commandScript>
+            <script>scripts/atlas_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFile>
+            <type>properties</type>
+            <fileName>application.properties</fileName>
+            <dictionaryName>application-properties</dictionaryName>
+          </configFile>
+          <configFile>
+            <type>env</type>
+            <fileName>atlas-env.sh</fileName>
+            <dictionaryName>atlas-env</dictionaryName>
+          </configFile>
+        </component>
       </components>
 
       <osSpecifics>
@@ -66,14 +91,12 @@
         <scriptType>PYTHON</scriptType>
         <timeout>300</timeout>
       </commandScript>
-      
 
       <configuration-dependencies>
         <config-type>application-properties</config-type>
         <config-type>atlas-env</config-type>
       </configuration-dependencies>
 
-
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/321d57b0/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
new file mode 100644
index 0000000..e54cc84
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
+
+from metadata import metadata
+
+# todo: support rolling upgrade
+class AtlasClient(Script):
+
+  def get_stack_to_component(self):
+    return {"HDP": "atlas-client"}
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    metadata()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  AtlasClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/321d57b0/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index a97ca74..ac86668 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -143,6 +143,11 @@ public class BlueprintConfigurationProcessorTest {
     hbaseComponents.add("HBASE_MASTER");
     serviceComponents.put("HBASE", hbaseComponents);
 
+    Collection<String> atlasComponents = new HashSet<String>();
+    atlasComponents.add("ATLAS_SERVER");
+    atlasComponents.add("ATLAS_CLIENT");
+    serviceComponents.put("ATLAS", atlasComponents);
+
     for (Map.Entry<String, Collection<String>> entry : serviceComponents.entrySet())
{
       String service = entry.getKey();
       for (String component : entry.getValue()) {
@@ -5096,6 +5101,94 @@ public class BlueprintConfigurationProcessorTest {
     assertEquals("users", leafConfigCoreSiteProps.get("hadoop.proxyuser.test-falcon-user.groups"));
   }
 
+  @Test
+  public void testAtlasHiveProperties() throws Exception {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String,
String>>();
+    Map<String, String> atlasProperties = new HashMap<String, String>();
+    properties.put("application-properties", atlasProperties);
+    atlasProperties.put("atlas.enableTLS", "false");
+    atlasProperties.put("atlas.server.bind.address", "localhost");
+    atlasProperties.put("atlas.server.http.port", "21000");
+    Map<String, String> atlasEnv = new HashMap<String, String>();
+
+    properties.put("atlas-env", atlasEnv);
+    Map<String, String> hiveProperties = new HashMap<String, String>();
+    hiveProperties.put("hive.exec.post.hooks", "");
+    hiveProperties.put("atlas.cluster.name", "primary");
+    hiveProperties.put("atlas.rest.address", "http://localhost:21000");
+    properties.put("hive-site", hiveProperties);
+
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<String,
Map<String, String>>();
+    Configuration parentClusterConfig = new Configuration(parentProperties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap(),
parentClusterConfig);
+
+
+    Collection<String> hgComponents1 = new HashSet<String>();
+    hgComponents1.add("ATLAS_SERVER");
+    hgComponents1.add("HIVE_SERVER");
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents1, Collections.singleton("host1"));
+
+    Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+    configProcessor.doUpdateForClusterCreate();
+
+    assertEquals("org.apache.atlas.hive.hook.HiveHook", clusterConfig.getPropertyValue("hive-site",
"hive.exec.post.hooks"));
+    assertEquals("1", clusterConfig.getPropertyValue("hive-site", "atlas.cluster.name"));
+    assertEquals("http://host1:21000", clusterConfig.getPropertyValue("hive-site", "atlas.rest.address"));
+    assertEquals("host1", clusterConfig.getPropertyValue("application-properties", "atlas.server.bind.address"));
+  }
+
+  @Test
+  public void testAtlasHiveProperties2() throws Exception {
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String,
String>>();
+    Map<String, String> atlasProperties = new HashMap<String, String>();
+    properties.put("application-properties", atlasProperties);
+    // use https
+    atlasProperties.put("atlas.enableTLS", "true");
+    atlasProperties.put("atlas.server.bind.address", "localhost");
+    atlasProperties.put("atlas.server.https.port", "99999");
+    Map<String, String> atlasEnv = new HashMap<String, String>();
+
+    properties.put("atlas-env", atlasEnv);
+    Map<String, String> hiveProperties = new HashMap<String, String>();
+    // default hook registered
+    hiveProperties.put("hive.exec.post.hooks", "foo");
+    // user specified cluster name
+    hiveProperties.put("atlas.cluster.name", "userSpecified");
+    hiveProperties.put("atlas.rest.address", "http://localhost:21000");
+    properties.put("hive-site", hiveProperties);
+
+
+    Map<String, Map<String, String>> parentProperties = new HashMap<String,
Map<String, String>>();
+    Configuration parentClusterConfig = new Configuration(parentProperties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap(),
parentClusterConfig);
+
+
+    Collection<String> hgComponents1 = new HashSet<String>();
+    hgComponents1.add("ATLAS_SERVER");
+    hgComponents1.add("HIVE_SERVER");
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents1, Collections.singleton("host1"));
+
+    Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
+    configProcessor.doUpdateForClusterCreate();
+
+    assertEquals("foo,org.apache.atlas.hive.hook.HiveHook", clusterConfig.getPropertyValue("hive-site",
"hive.exec.post.hooks"));
+    assertEquals("userSpecified", clusterConfig.getPropertyValue("hive-site", "atlas.cluster.name"));
+    assertEquals("https://host1:99999", clusterConfig.getPropertyValue("hive-site", "atlas.rest.address"));
+  }
+
 
   private static String createExportedAddress(String expectedPortNum, String expectedHostGroupName)
{
     return createExportedHostName(expectedHostGroupName, expectedPortNum);


Mime
View raw message