ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From srima...@apache.org
Subject ambari git commit: AMBARI-10332. BE: Extend stack-advisor to recommend property_value_attributes for HDFS (dsen via srimanth)
Date Fri, 10 Apr 2015 23:26:46 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk 25fe9bd51 -> 55aaed016


AMBARI-10332. BE: Extend stack-advisor to recommend property_value_attributes for HDFS (dsen
via srimanth)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/55aaed01
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/55aaed01
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/55aaed01

Branch: refs/heads/trunk
Commit: 55aaed016eff89592843cb779a0ca8f308a3d9b3
Parents: 25fe9bd
Author: Srimanth Gunturi <sgunturi@hortonworks.com>
Authored: Fri Apr 10 14:29:17 2015 -0700
Committer: Srimanth Gunturi <sgunturi@hortonworks.com>
Committed: Fri Apr 10 16:26:40 2015 -0700

----------------------------------------------------------------------
 .../apache/ambari/server/agent/DiskInfo.java    |  12 +-
 .../commands/StackAdvisorCommand.java           |   2 +-
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   6 +
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |  21 ++-
 .../stacks/HDP/2.2/services/stack_advisor.py    |  59 +++++-
 .../stacks/2.2/common/test_stack_advisor.py     | 181 +++++++++++++++++--
 6 files changed, 250 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/55aaed01/ambari-server/src/main/java/org/apache/ambari/server/agent/DiskInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/DiskInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/DiskInfo.java
index e3fb88e..0f345b8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/DiskInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/DiskInfo.java
@@ -117,7 +117,17 @@ public class DiskInfo {
   public void setSize(String size) {
     this.size = size;
   }
-  
+
+  @JsonProperty("device")
+  public String getDevice() {
+    return device;
+  }
+
+  @JsonProperty("device")
+  public void setDevice(String device) {
+    this.device = device;
+  }
+
   @Override
   public String toString() {
     return "available=" + this.available + ",mountpoint=" + this.mountpoint

http://git-wip-us.apache.org/repos/asf/ambari/blob/55aaed01/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommand.java
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommand.java
index ce67002..15ab6fb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommand.java
@@ -70,7 +70,7 @@ public abstract class StackAdvisorCommand<T extends StackAdvisorResponse>
extend
   protected static Log LOG = LogFactory.getLog(StackAdvisorCommand.class);
 
   private static final String GET_HOSTS_INFO_URI = "/api/v1/hosts"
-      + "?fields=Hosts&Hosts/host_name.in(%s)";
+      + "?fields=Hosts/*&Hosts/host_name.in(%s)";
   private static final String GET_SERVICES_INFO_URI = "/api/v1/stacks/%s/versions/%s/"
       + "?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version"
       + ",services/StackServices/service_name,services/StackServices/service_version"

http://git-wip-us.apache.org/repos/asf/ambari/blob/55aaed01/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
index 28191e2..c9d7172 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
@@ -61,6 +61,12 @@
       <maximum>2</maximum>
       <increment-step>1</increment-step>
     </value-attributes>
+    <depends-on>
+      <property>
+        <type>hdfs-site</type>
+        <name>dfs.datanode.data.dir</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/55aaed01/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 08badf8..e692411 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -139,11 +139,11 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])),
1024))
  
   def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
-    putHDFSProperty = self.putProperty(configurations, "hadoop-env")
+    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
     putHDFSProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
-    putHDFSProperty = self.putProperty(configurations, "hadoop-env")
+    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
     putHDFSProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8),
128))
-    putHDFSProperty = self.putProperty(configurations, "hadoop-env")
+    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
     putHDFSProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] /
8), 256))
 
   def recommendHbaseEnvConfigurations(self, configurations, clusterData, services, hosts):
@@ -153,7 +153,6 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
   def recommendAmsConfigurations(self, configurations, clusterData, services, hosts):
     putAmsEnvProperty = self.putProperty(configurations, "ams-env")
-    putAmsEnvProperty = self.putProperty(configurations, "ams-env")
     putAmsHbaseSiteProperty = self.putProperty(configurations, "ams-hbase-site")
     putTimelineServiceProperty = self.putProperty(configurations, "ams-site")
     putHbaseEnvProperty = self.putProperty(configurations, "ams-hbase-env")
@@ -209,16 +208,22 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
 
 
-  def getHostWithComponent(self, serviceName, componentName, services, hosts):
+  def getHostsWithComponent(self, serviceName, componentName, services, hosts):
     if services is not None and hosts is not None and serviceName in [service["StackServices"]["service_name"]
for service in services["services"]]:
       service = [serviceEntry for serviceEntry in services["services"] if serviceEntry["StackServices"]["service_name"]
== serviceName][0]
       components = [componentEntry for componentEntry in service["components"] if componentEntry["StackServiceComponents"]["component_name"]
== componentName]
       if (len(components) > 0 and len(components[0]["StackServiceComponents"]["hostnames"])
> 0):
-        # NodeManager available - determine hosts and memory
+        # component available - determine hosts and memory
         componentHostname = components[0]["StackServiceComponents"]["hostnames"][0]
         componentHosts = [host for host in hosts["items"] if host["Hosts"]["host_name"] ==
componentHostname]
-        if (componentHosts is not None and len(componentHosts) > 0):
-          return componentHosts[0]
+        return componentHosts
+    return []
+
+  def getHostWithComponent(self, serviceName, componentName, services, hosts):
+    componentHosts = self.getHostsWithComponent(serviceName, componentName, services, hosts)
+    if (len(componentHosts) > 0):
+      return componentHosts[0]
+    return None
 
   def getConfigurationClusterSummary(self, servicesList, hosts, components, services):
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/55aaed01/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 3a42a3d..9a2d373 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -53,19 +53,60 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       putYarnPropertyAttribute('yarn.scheduler.maximum-allocation-mb', 'max', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"])
 
   def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
-    putHdfsProperty = self.putProperty(configurations, "hdfs-site")
-    putHdfsProperty("dfs.datanode.max.transfer.threads", 16384 if clusterData["hBaseInstalled"]
else 4096)
-    putHDFSProperty = self.putProperty(configurations, "hadoop-env")
-    putHDFSProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
-    putHDFSProperty = self.putProperty(configurations, "hadoop-env")
-    putHDFSProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8),
128))
-    putHDFSProperty = self.putProperty(configurations, "hadoop-env")
-    putHDFSProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] /
8), 256))
+    putHdfsSiteProperty = self.putProperty(configurations, "hdfs-site", services)
+    putHdfsSiteProperty("dfs.datanode.max.transfer.threads", 16384 if clusterData["hBaseInstalled"]
else 4096)
+    dataDirsCount = 1
+    if "dfs.datanode.data.dir" in configurations["hdfs-site"]["properties"]:
+      dataDirsCount = len(str(configurations["hdfs-site"]["properties"]["dfs.datanode.data.dir"]).split(","))
+    if dataDirsCount <= 2:
+      failedVolumesTolerated = 0
+    elif dataDirsCount <= 4:
+      failedVolumesTolerated = 1
+    else:
+      failedVolumesTolerated = 2
+    putHdfsSiteProperty("dfs.datanode.failed.volumes.tolerated", failedVolumesTolerated)
+
+    namenodeHosts = self.getHostsWithComponent("HDFS", "NAMENODE", services, hosts)
+
+    # 25 * # of cores on NameNode
+    nameNodeCores = 4
+    if namenodeHosts is not None and len(namenodeHosts):
+      nameNodeCores = int(namenodeHosts[0]['Hosts']['cpu_count'])
+    putHdfsSiteProperty("dfs.namenode.handler.count", 25*nameNodeCores)
+
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     if ('ranger-hdfs-plugin-properties' in services['configurations']) and ('ranger-hdfs-plugin-enabled'
in services['configurations']['ranger-hdfs-plugin-properties']['properties']):
       rangerPluginEnabled = services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled']
       if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == 'Yes'.lower()):
-        putHDFSProperty("dfs.permissions.enabled",'true')
+        putHdfsSiteProperty("dfs.permissions.enabled",'true')
+
+    putHdfsSiteProperty("dfs.namenode.safemode.threshold-pct", "0.99f" if len(namenodeHosts)
> 1 else "1.0f")
+
+    putHdfsEnvProperty = self.putProperty(configurations, "hadoop-env", services)
+    putHdfsEnvProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2),
1024))
+    putHdfsEnvProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] /
8), 128))
+    putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam']
/ 8), 256))
+
+    # Property Attributes
+    putHdfsEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hadoop-env")
+    if (namenodeHosts is not None and len(namenodeHosts) > 0):
+      if len(namenodeHosts) > 1:
+        namenode_heapsize = min(int(namenodeHosts[0]["Hosts"]["total_mem"]), int(namenodeHosts[1]["Hosts"]["total_mem"]))
/ 1024
+      else:
+        namenode_heapsize = int(namenodeHosts[0]["Hosts"]["total_mem"] / 1024) # total_mem
in kb
+
+      putHdfsEnvPropertyAttribute('namenode_heapsize', 'max', namenode_heapsize)
+
+    datanodeHosts = self.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
+    if (datanodeHosts is not None and len(datanodeHosts)>0):
+      min_datanode_ram_kb = 1073741824 # 1 TB
+      for datanode in datanodeHosts:
+        ram_kb = datanode['Hosts']['total_mem']
+        min_datanode_ram_kb = min(min_datanode_ram_kb, ram_kb)
+      putHdfsEnvPropertyAttribute('dtnode_heapsize', 'max', int(min_datanode_ram_kb/1024))
+
+    putHdfsSitePropertyAttribute = self.putPropertyAttribute(configurations, "hdfs-site")
+    putHdfsSitePropertyAttribute('dfs.datanode.failed.volumes.tolerated', 'max', dataDirsCount)
 
   def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP22StackAdvisor, self).recommendHiveConfigurations(configurations, clusterData,
services, hosts)

http://git-wip-us.apache.org/repos/asf/ambari/blob/55aaed01/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 0cfc3b6..5e9b48c 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -135,7 +135,6 @@ class TestHDP22StackAdvisor(TestCase):
 
 
   def test_validateHDFSConfigurations(self):
-    self.maxDiff = None
     recommendedDefaults = None
 
     unsecure_cluster_core_site = {
@@ -1152,7 +1151,14 @@ class TestHDP22StackAdvisor(TestCase):
     self.assertEquals(configurations, expected)
 
   def test_recommendHDFSConfigurations(self):
-    configurations = {}
+    configurations = {
+      'ranger-hdfs-plugin-properties':{
+        "properties": {"ranger-hdfs-plugin-enabled":"Yes"}
+      },
+      'hdfs-site': {
+        "properties": {"dfs.datanode.data.dir": "/path/1,/path/2,/path/3,/path/4"}
+      }
+    }
     clusterData = {
       "totalAvailableRam": 2048,
       "hBaseInstalled": 111
@@ -1163,27 +1169,178 @@ class TestHDP22StackAdvisor(TestCase):
           'namenode_heapsize': '1024',
           'namenode_opt_newsize' : '256',
           'namenode_opt_maxnewsize' : '256'
+        },
+        'property_attributes': {
+          'dtnode_heapsize': {'max': '2048'},
+          'namenode_heapsize': {'max': '1024'}
         }
       },
       'hdfs-site': {
         'properties': {
-          'dfs.datanode.max.transfer.threads': '16384'
+          'dfs.datanode.max.transfer.threads': '16384',
+          'dfs.namenode.safemode.threshold-pct': '1.0f',
+          'dfs.datanode.failed.volumes.tolerated': '1',
+          'dfs.namenode.handler.count': '25',
+          'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4'
         },
+        'property_attributes': {
+          'dfs.datanode.failed.volumes.tolerated': {'max': '4'}
+        }
+      },
+      'ranger-hdfs-plugin-properties': {
+        'properties': {
+          'ranger-hdfs-plugin-enabled': 'Yes'
+        }
       }
     }
     services = {"services":
                     [{"StackServices":
                           {"service_name" : "HDFS",
-                           "service_version" : "2.6.0.2.2",
-                           }
-                     }],
-                "configurations": {
-                    'ranger-hdfs-plugin-properties':{
-                        "properties": {"ranger-hdfs-plugin-enabled":"Yes"}
-                    }
-                }
+                           "service_version" : "2.6.0.2.2"
+                           },
+                      "components":[
+                        {
+                          "href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/DATANODE",
+                          "StackServiceComponents":{
+                            "advertise_version":"true",
+                            "cardinality":"1+",
+                            "component_category":"SLAVE",
+                            "component_name":"DATANODE",
+                            "custom_commands":[
+
+                            ],
+                            "display_name":"DataNode",
+                            "is_client":"false",
+                            "is_master":"false",
+                            "service_name":"HDFS",
+                            "stack_name":"HDP",
+                            "stack_version":"2.2",
+                            "hostnames":[
+                              "host1"
+                            ]
+                          },
+                          "dependencies":[
+
+                          ]
+                        },
+                        {
+                          "href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE",
+                          "StackServiceComponents":{
+                            "advertise_version":"true",
+                            "cardinality":"0+",
+                            "component_category":"SLAVE",
+                            "component_name":"JOURNALNODE",
+                            "custom_commands":[
+
+                            ],
+                            "display_name":"JournalNode",
+                            "is_client":"false",
+                            "is_master":"false",
+                            "service_name":"HDFS",
+                            "stack_name":"HDP",
+                            "stack_version":"2.2",
+                            "hostnames":[
+                              "host1"
+                            ]
+                          },
+                          "dependencies":[
+                            {
+                              "href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
+                              "Dependencies":{
+                                "component_name":"HDFS_CLIENT",
+                                "dependent_component_name":"JOURNALNODE",
+                                "dependent_service_name":"HDFS",
+                                "stack_name":"HDP",
+                                "stack_version":"2.2"
+                              }
+                            }
+                          ]
+                        },
+                        {
+                          "href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/NAMENODE",
+                          "StackServiceComponents":{
+                            "advertise_version":"true",
+                            "cardinality":"1-2",
+                            "component_category":"MASTER",
+                            "component_name":"NAMENODE",
+                            "custom_commands":[
+                              "DECOMMISSION",
+                              "REBALANCEHDFS"
+                            ],
+                            "display_name":"NameNode",
+                            "is_client":"false",
+                            "is_master":"true",
+                            "service_name":"HDFS",
+                            "stack_name":"HDP",
+                            "stack_version":"2.2",
+                            "hostnames":[
+                              "host2"
+                            ]
+                          },
+                          "dependencies":[
+
+                          ]
+                        },
+                        {
+                          "href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/SECONDARY_NAMENODE",
+                          "StackServiceComponents":{
+                            "advertise_version":"true",
+                            "cardinality":"1",
+                            "component_category":"MASTER",
+                            "component_name":"SECONDARY_NAMENODE",
+                            "custom_commands":[
+
+                            ],
+                            "display_name":"SNameNode",
+                            "is_client":"false",
+                            "is_master":"true",
+                            "service_name":"HDFS",
+                            "stack_name":"HDP",
+                            "stack_version":"2.2",
+                            "hostnames":[
+                              "host1"
+                            ]
+                          },
+                          "dependencies":[
+
+                          ]
+                        },
+                      ],
+                    }],
+                "configurations": configurations
                 }
-    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services,
'')
+    hosts = {
+      "items" : [
+        {
+          "href" : "/api/v1/hosts/host1",
+          "Hosts" : {
+            "cpu_count" : 1,
+            "host_name" : "host1",
+            "os_arch" : "x86_64",
+            "os_type" : "centos6",
+            "ph_cpu_count" : 1,
+            "public_host_name" : "host1",
+            "rack_info" : "/default-rack",
+            "total_mem" : 2097152
+          }
+        },
+        {
+          "href" : "/api/v1/hosts/host2",
+          "Hosts" : {
+            "cpu_count" : 1,
+            "host_name" : "host2",
+            "os_arch" : "x86_64",
+            "os_type" : "centos6",
+            "ph_cpu_count" : 1,
+            "public_host_name" : "host2",
+            "rack_info" : "/default-rack",
+            "total_mem" : 1048576
+          }
+        },
+      ]
+    }
+
+    self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services,
hosts)
     self.assertEquals(configurations, expected)
 
   def test_validateHDFSConfigurationsEnv(self):


Mime
View raw message