ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dmitriu...@apache.org
Subject [18/46] ambari git commit: AMBARI-18739. Perf: Create Rolling and Express Upgrade Packs (dlysnichenko)
Date Fri, 20 Jan 2017 10:19:20 GMT
http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/kerberos.json
b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/kerberos.json
new file mode 100644
index 0000000..4129506
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/kerberos.json
@@ -0,0 +1,246 @@
+{
+  "services": [
+    {
+      "name": "FAKEHDFS",
+      "identities": [
+        {
+          "name": "/spnego",
+          "principal": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal"
+          },
+          "keytab": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "auth_to_local_properties" : [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.security.authorization": "true",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+          }
+        },
+        {
+          "ranger-hdfs-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name":  "FAKEHDFS_CLIENT",
+          "identities": [
+            {
+              "name": "/FAKEHDFS/FAKENAMENODE/hdfs"
+            }
+          ]
+        },
+        {
+          "name": "FAKENAMENODE",
+          "identities": [
+            {
+              "name": "hdfs",
+              "principal": {
+                "value": "${hadoop-env/hdfs_user}${principal_suffix}@${realm}",
+                "type" : "user" ,
+                "configuration": "hadoop-env/hdfs_principal_name",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hdfs.headless.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hadoop-env/hdfs_user_keytab"
+              }
+            },
+            {
+              "name": "namenode_nn",
+              "principal": {
+                "value": "nn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.namenode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal"
+              }
+            },
+            {
+              "name": "/FAKEHDFS/FAKENAMENODE/namenode_nn",
+              "principal": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal"
               
+              },
+              "keytab": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.block.access.token.enable": "true"
+              }
+            }
+          ]
+        },
+        {
+          "name": "FAKEDATANODE",
+          "identities": [
+            {
+              "name": "datanode_dn",
+              "principal": {
+                "value": "dn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/dn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.datanode.keytab.file"
+              }
+            }
+          ],
+          "configurations" : [
+            {
+              "hdfs-site" : {
+                "dfs.datanode.address" : "0.0.0.0:1019",
+                "dfs.datanode.http.address": "0.0.0.0:1022"
+              }
+            }
+          ]
+        },
+        {
+          "name": "SECONDARY_FAKENAMENODE",
+          "identities": [
+            {
+              "name": "secondary_namenode_nn",
+              "principal": {
+                "value": "nn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal"
+              }
+            }
+          ]
+        },
+        {
+          "name": "FAKENFS_GATEWAY",
+          "identities": [
+            {
+              "name": "nfsgateway",
+              "principal": {
+                "value": "nfs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/nfs.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nfs.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/nfs.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "FAKEJOURNALNODE",
+          "identities": [
+            {
+              "name": "journalnode_jn",
+              "principal": {
+                "value": "jn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.journalnode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/metainfo.xml
b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/metainfo.xml
new file mode 100644
index 0000000..14f15fc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/metainfo.xml
@@ -0,0 +1,266 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FAKEHDFS</name>
+      <displayName>FAKEHDFS</displayName>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.7.1.2.3</version>
+
+      <components>
+        <component>
+          <name>FAKENAMENODE</name>
+          <displayName>FAKEHNameNode</displayName>
+          <category>MASTER</category>
+          <cardinality>1-2</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1800</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hdfs_namenode</logId>
+              <primary>true</primary>
+            </log>
+            <log>
+              <logId>hdfs_audit</logId>
+            </log>
+          </logs>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REBALANCEFAKEHDFS</name>
+              <background>true</background>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>FAKEDATANODE</name>
+          <displayName>FAKEDataNode</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <decommissionAllowed>true</decommissionAllowed>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <bulkCommands>
+            <displayName>FAKEDataNodes</displayName>
+            <!-- Used by decommission and recommission -->
+            <masterComponent>FAKENAMENODE</masterComponent>
+          </bulkCommands>
+          <logs>
+            <log>
+              <logId>hdfs_datanode</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>SECONDARY_FAKENAMENODE</name>
+          <displayName>SFAKEHNameNode</displayName>
+          <cardinality>1</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hdfs_secondarynamenode</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>FAKEHDFS_CLIENT</name>
+          <displayName>FAKEHDFS Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hdfs-site.xml</fileName>
+              <dictionaryName>hdfs-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-env.sh</fileName>
+              <dictionaryName>hadoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+
+        <component>
+          <name>FAKEJOURNALNODE</name>
+          <displayName>FAKEJournalNode</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hdfs_journalnode</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+          <dependencies>
+            <dependency>
+              <name>FAKEHDFS/FAKEHDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+
+        <component>
+          <name>FAKEZKFC</name>
+          <displayName>FAKEZKFailoverController</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hdfs_zkfc</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>FAKENFS_GATEWAY</name>
+          <displayName>FAKENFSGateway</displayName>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/nfsgateway.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>FAKEHDFS/FAKEHDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+      </components>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>FAKEZOOKEEPER</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>hdfs-alert-config</config-type>
+        <config-type>core-site</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+        <config-type>ranger-hdfs-plugin-properties</config-type>
+        <config-type>ssl-client</config-type>
+        <config-type>ssl-server</config-type>
+        <config-type>ranger-hdfs-audit</config-type>
+        <config-type>ranger-hdfs-policymgr-ssl</config-type>
+        <config-type>ranger-hdfs-security</config-type>
+        <config-type>ams-ssl-client</config-type>
+        <config-type>hadoop-metrics2.properties</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+
+      <!-- No packages to install. -->
+      <osSpecifics></osSpecifics>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>
\ No newline at end of file


Mime
View raw message