incubator-ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r1187472 - in /incubator/ambari/trunk: ./ client/src/main/java/org/apache/ambari/common/rest/entities/ client/src/main/resources/org/ client/src/main/resources/org/apache/ client/src/main/resources/org/apache/ambari/ client/src/main/resourc...
Date Fri, 21 Oct 2011 17:22:26 GMT
Author: omalley
Date: Fri Oct 21 17:22:25 2011
New Revision: 1187472

URL: http://svn.apache.org/viewvc?rev=1187472&view=rev
Log:
AMBARI-91. Move the example blueprints into xml resources. (omalley)

Added:
    incubator/ambari/trunk/client/src/main/resources/org/
    incubator/ambari/trunk/client/src/main/resources/org/apache/
    incubator/ambari/trunk/client/src/main/resources/org/apache/ambari/
    incubator/ambari/trunk/client/src/main/resources/org/apache/ambari/common/
    incubator/ambari/trunk/client/src/main/resources/org/apache/ambari/common/rest/
    incubator/ambari/trunk/client/src/main/resources/org/apache/ambari/common/rest/entities/
    incubator/ambari/trunk/client/src/main/resources/org/apache/ambari/common/rest/entities/jaxb.index
    incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/
    incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/cluster123-0.xml
    incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/cluster124-0.xml
    incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/hadoop-security-0.xml
Modified:
    incubator/ambari/trunk/.gitignore
    incubator/ambari/trunk/CHANGES.txt
    incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Blueprint.java
    incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Component.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Blueprints.java
    incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java

Modified: incubator/ambari/trunk/.gitignore
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/.gitignore?rev=1187472&r1=1187471&r2=1187472&view=diff
==============================================================================
--- incubator/ambari/trunk/.gitignore (original)
+++ incubator/ambari/trunk/.gitignore Fri Oct 21 17:22:25 2011
@@ -14,6 +14,7 @@
 agent/src/main/python/hms_agent.egg-info
 target
 *~
+*.pyc
 .classpath
 .project
 .settings

Modified: incubator/ambari/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/CHANGES.txt?rev=1187472&r1=1187471&r2=1187472&view=diff
==============================================================================
--- incubator/ambari/trunk/CHANGES.txt (original)
+++ incubator/ambari/trunk/CHANGES.txt Fri Oct 21 17:22:25 2011
@@ -2,15 +2,20 @@ Ambari Change log
 
 Release 0.1.0 - unreleased
 
+  AMBARI-91. Move the example blueprints into xml resources. (omalley)
+
   AMBARI-90. Implement nodes get/list CLI (vgogate)
 
   AMBARI-89. Implement blueprint history CLI (vgogate)
 
-  AMBARI-88. Update cluster nodes reservation is giving null pointer exception during cluster
creation (vgogate)
+  AMBARI-88. Update cluster nodes reservation is giving null pointer
+  exception during cluster creation (vgogate)
 
-  AMBARI-87. Importing pre-existing blueprint to Ambari through CLI "blueprint add" gives
wrong error message (vgogate)
+  AMBARI-87. Importing pre-existing blueprint to Ambari through CLI
+  "blueprint add" gives wrong error message (vgogate)
 
-  AMBARI-86. Validate blueprint referenced by cluster exist including it's parent blueprints
(vgogate)
+  AMBARI-86. Validate blueprint referenced by cluster exist including
+  it's parent blueprints (vgogate)
 
   AMBARI-85. Adds handling of new states to do with preinstall actions (ddas)
 

Modified: incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Blueprint.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Blueprint.java?rev=1187472&r1=1187471&r2=1187472&view=diff
==============================================================================
--- incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Blueprint.java
(original)
+++ incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Blueprint.java
Fri Oct 21 17:22:25 2011
@@ -17,6 +17,7 @@
  */
 package org.apache.ambari.common.rest.entities;
 
+import java.io.IOException;
 import java.util.Date;
 import java.util.GregorianCalendar;
 import java.util.List;
@@ -28,6 +29,7 @@ import javax.xml.bind.annotation.XmlElem
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlSchemaType;
 import javax.xml.bind.annotation.XmlType;
+import javax.xml.datatype.DatatypeConfigurationException;
 import javax.xml.datatype.DatatypeFactory;
 import javax.xml.datatype.XMLGregorianCalendar;
 
@@ -37,7 +39,7 @@ import javax.xml.datatype.XMLGregorianCa
  * 
  */
 @XmlAccessorType(XmlAccessType.FIELD)
-@XmlType(name = "Blueprint", propOrder = {
+@XmlType(name = "blueprint", propOrder = {
     "name",
     "revision",
     "parentName",
@@ -47,7 +49,7 @@ import javax.xml.datatype.XMLGregorianCa
     "configuration",
     "components"
 })
-@XmlRootElement
+@XmlRootElement(name="blueprint")
 public class Blueprint {
 
     @XmlAttribute
@@ -187,13 +189,18 @@ public class Blueprint {
     /**
      * @param creationTime the creationTime to set
      */
-    public void setCreationTime(Date creationTime) throws Exception {
+    public void setCreationTime(Date creationTime) throws IOException {
         if (creationTime == null) {
             this.creationTime = null;
         } else {
             GregorianCalendar cal = new GregorianCalendar();
             cal.setTime(creationTime);
-            this.creationTime = DatatypeFactory.newInstance().newXMLGregorianCalendar(cal);
+            try {
+              this.creationTime = 
+                  DatatypeFactory.newInstance().newXMLGregorianCalendar(cal);
+            } catch (DatatypeConfigurationException e) {
+              throw new IOException("can't create calendar", e);
+            }
         }
     }
 }

Modified: incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Component.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Component.java?rev=1187472&r1=1187471&r2=1187472&view=diff
==============================================================================
--- incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Component.java
(original)
+++ incubator/ambari/trunk/client/src/main/java/org/apache/ambari/common/rest/entities/Component.java
Fri Oct 21 17:22:25 2011
@@ -33,6 +33,7 @@ import javax.xml.bind.annotation.XmlType
 @XmlAccessorType(XmlAccessType.FIELD)
 @XmlType(name = "CategoryType", propOrder = {
     "definition",
+    "configuration",
     "roles"
 })
 @XmlRootElement
@@ -49,6 +50,8 @@ public class Component {
     @XmlElement
     protected ComponentDefinition definition;
     @XmlElement
+    protected Configuration configuration;
+    @XmlElement
     protected List<Role> roles;
 
     /**
@@ -154,4 +157,20 @@ public class Component {
     public void setProvider(String value) {
       provider = value;
     }
+    
+    /**
+     * Get the configuration for all of the active roles.
+     * @return the configuration
+     */
+    public Configuration getConfiguration() {
+      return configuration;
+    }
+    
+    /**
+     * Set the configuration for all of the active roles
+     * @param conf the configuration
+     */
+    public void setConfiguration(Configuration conf) {
+      configuration = conf;
+    }
 }

Added: incubator/ambari/trunk/client/src/main/resources/org/apache/ambari/common/rest/entities/jaxb.index
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/client/src/main/resources/org/apache/ambari/common/rest/entities/jaxb.index?rev=1187472&view=auto
==============================================================================
--- incubator/ambari/trunk/client/src/main/resources/org/apache/ambari/common/rest/entities/jaxb.index
(added)
+++ incubator/ambari/trunk/client/src/main/resources/org/apache/ambari/common/rest/entities/jaxb.index
Fri Oct 21 17:22:25 2011
@@ -0,0 +1,2 @@
+ClusterDefinition
+Blueprint
\ No newline at end of file

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Blueprints.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Blueprints.java?rev=1187472&r1=1187471&r2=1187472&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Blueprints.java
(original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Blueprints.java
Fri Oct 21 17:22:25 2011
@@ -35,204 +35,51 @@ import java.util.concurrent.ConcurrentHa
 import javax.ws.rs.WebApplicationException;
 import javax.ws.rs.core.Response;
 import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
 import javax.xml.bind.Unmarshaller;
 
 import org.apache.ambari.common.rest.entities.Blueprint;
 import org.apache.ambari.common.rest.entities.BlueprintInformation;
 import org.apache.ambari.common.rest.entities.Component;
-import org.apache.ambari.common.rest.entities.ComponentDefinition;
-import org.apache.ambari.common.rest.entities.Configuration;
-import org.apache.ambari.common.rest.entities.ConfigurationCategory;
-import org.apache.ambari.common.rest.entities.RepositoryKind;
 import org.apache.ambari.common.rest.entities.Property;
-import org.apache.ambari.common.rest.entities.Role;
-import org.apache.ambari.resource.statemachine.ClusterStateFSM;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
+import org.mortbay.log.Log;
 
 public class Blueprints {
 
     private static Blueprints BlueprintsRef=null;
         
     private Blueprints() {
-        this.createDummyBlueprint("MyDummyBlueprint", "0", "MyDummySiteBlueprint", "0");
+      try {
+        JAXBContext jaxbContext = 
+            JAXBContext.newInstance("org.apache.ambari.common.rest.entities");
+        loadDummyBlueprint(jaxbContext, "hadoop-security", 0);
+        loadDummyBlueprint(jaxbContext, "cluster123", 0);
+        loadDummyBlueprint(jaxbContext, "cluster124", 0);
+      } catch (JAXBException e) {
+        throw new RuntimeException("Can't create jaxb context", e);
+      }
     }
     
-    public void createDummyBlueprint (String name, String revision, String parentName, String
parentRevision) {
-        Blueprint bp = new Blueprint();
-        
-        bp.setName(name);
-        bp.setParentName(parentName);
-        bp.setRevision(revision);
-        bp.setParentRevision(parentRevision);
-        try {
-            bp.setCreationTime(new Date());
-        } catch (Exception e) {
-            System.out.println ("Error setting blueprint creation time");
-            e.printStackTrace();
-        }
-        
-        /*
-         * Repository URLs
-         */
-        List<RepositoryKind> prList = new ArrayList<RepositoryKind>();
-        RepositoryKind hdfsRepo = new RepositoryKind();
-        hdfsRepo.setKind("TAR");
-        List<String> repoURLs = new ArrayList<String>();
-        repoURLs.add("http://www.apache.org/dist/hadoop/common/");   
-        hdfsRepo.setUrls(repoURLs);
-        prList.add(hdfsRepo);
-        bp.setPackageRepositories(prList);
-        
-        /*
-         * Global Configuration
-         */
-        Configuration bpDefaultCfg = new Configuration();
-        
-        ConfigurationCategory ambari = new ConfigurationCategory();
-        ambari.setName("ambari");
-        ambari.getProperty().add(getProperty ("AMBARI_INSTALL_DIR", "/var/ambari"));
-        ambari.getProperty().add(getProperty ("AMBARI_HADOOP_JAVA_HOME", "/home/hms/apps/jdk1.6.0_27"));
-        ambari.getProperty().add(getProperty ("AMBARI_HADOOP_NN_DIR","/grid/2/hadoop/var/hdfs/name"));

-        ambari.getProperty().add(getProperty ("AMBARI_DATA_DIRS", "/grid/*"));
-        ambari.getProperty().add(getProperty ("AMBARI_HADOOP_SECURITY", "false"));
-        
-        ambari.getProperty().add(getProperty ("AMBARI_HADOOP_DN_ADDR", "DEFAULT"));
-        ambari.getProperty().add(getProperty ("AMBARI_HADOOP_DN_HTTP_ADDR", "DEFAULT"));
-        ambari.getProperty().add(getProperty ("AMBARI_HADOOP_NN_HOST", "DEFAULT"));
-        
-        
-
-        
-        ConfigurationCategory core_site = new ConfigurationCategory();
-        core_site.setName("core-site");
-        core_site.getProperty().add(getProperty ("local.realm","${KERBEROS_REALM}"));
-        core_site.getProperty().add(getProperty ("fs.default.name","hdfs://${HADOOP_NN_HOST}:8020"));
-        core_site.getProperty().add(getProperty ("fs.trash.interval","360"));
-        core_site.getProperty().add(getProperty ("hadoop.security.auth_to_local","RULE:[2:$1@$0]([jt]t@.*${KERBEROS_REALM})s/.*/${HADOOP_MR_USER}/
RULE:[2:$1@$0](hm@.*${KERBEROS_REALM})s/.*/${HADOOP_HDFS_USER}/ RULE:[2:$1@$0](rs@.*${KERBEROS_REALM})s/.*/${HADOOP_HDFS_USER}/
RULE:[2:$1@$0]([nd]n@.*${KERBEROS_REALM})s/.*/${HADOOP_HDFS_USER}/ RULE:[2:$1@$0](mapred@.*${KERBEROS_REALM})s/.*/${HADOOP_MR_USER}/
RULE:[2:$1@$0](hdfs@.*${KERBEROS_REALM})s/.*/${HADOOP_HDFS_USER}/ RULE:[2:$1@$0](mapredqa@.*${KERBEROS_REALM})s/.*/${HADOOP_MR_USER}/
RULE:[2:$1@$0](hdfsqa@.*${KERBEROS_REALM})s/.*/${HADOOP_HDFS_USER}/ DEFAULT"));
-        core_site.getProperty().add(getProperty ("hadoop.security.authentication","${SECURITY_TYPE}"));
-        core_site.getProperty().add(getProperty ("hadoop.security.authorization","${SECURITY}"));
-        core_site.getProperty().add(getProperty ("hadoop.security.groups.cache.secs","14400"));
-        core_site.getProperty().add(getProperty ("hadoop.kerberos.kinit.command","${KINIT}"));
-        core_site.getProperty().add(getProperty ("hadoop.http.filter.initializers","org.apache.hadoop.http.lib.StaticUserWebFilter"));
-        
-        ConfigurationCategory hdfs_site = new ConfigurationCategory();
-        hdfs_site.setName("hdfs-site");
-        hdfs_site.getProperty().add(getProperty ("dfs.name.dir","${HADOOP_NN_DIR}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.data.dir","${HADOOP_DN_DIR}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.safemode.threshold.pct","1.0f"));
-        hdfs_site.getProperty().add(getProperty ("dfs.datanode.address","${HADOOP_DN_ADDR}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.datanode.http.address","${HADOOP_DN_HTTP_ADDR}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.http.address","${HADOOP_NN_HOST}:50070"));
-        hdfs_site.getProperty().add(getProperty ("dfs.umaskmode","077"));
-        hdfs_site.getProperty().add(getProperty ("dfs.block.access.token.enable","${SECURITY}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.namenode.kerberos.principal","nn/_HOST@${local.realm}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.secondary.namenode.kerberos.principal","nn/_HOST@${local.realm}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.namenode.kerberos.https.principal","host/_HOST@${local.realm}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.secondary.namenode.kerberos.https.principal","host/_HOST@${local.realm}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.secondary.https.port","50490"));
-        hdfs_site.getProperty().add(getProperty ("dfs.datanode.kerberos.principal","dn/_HOST@${local.realm}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.namenode.keytab.file","/etc/security/keytabs/nn.service.keytab"));
-        hdfs_site.getProperty().add(getProperty ("dfs.secondary.namenode.keytab.file","/etc/security/keytabs/nn.service.keytab"));
-        hdfs_site.getProperty().add(getProperty ("dfs.datanode.keytab.file","/etc/security/keytabs/dn.service.keytab"));
-        hdfs_site.getProperty().add(getProperty ("dfs.https.port","50470"));
-        hdfs_site.getProperty().add(getProperty ("dfs.https.address","${HADOOP_NN_HOST}:50470"));
-        hdfs_site.getProperty().add(getProperty ("dfs.datanode.data.dir.perm","700"));
-        hdfs_site.getProperty().add(getProperty ("dfs.cluster.administrators","${HADOOP_HDFS_USER}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.permissions.superusergroup","${HADOOP_GROUP}"));
-        hdfs_site.getProperty().add(getProperty ("dfs.namenode.http-address","${HADOOP_NN_HOST}:50070"));
-        hdfs_site.getProperty().add(getProperty ("dfs.namenode.https-address","${HADOOP_NN_HOST}:50470"));
-        hdfs_site.getProperty().add(getProperty ("dfs.secondary.http.address","${HADOOP_SNN_HOST}:50090"));
-        hdfs_site.getProperty().add(getProperty ("dfs.hosts","${HADOOP_CONF_DIR}/dfs.include"));
-        hdfs_site.getProperty().add(getProperty ("dfs.hosts.exclude","${HADOOP_CONF_DIR}/dfs.exclude"));
-        
-        ConfigurationCategory hadoop_env = new ConfigurationCategory();
-        hadoop_env.setName("hadoop-env");
-        hadoop_env.getProperty().add(getProperty ("JAVA_HOME","${JAVA_HOME}"));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_CONF_DIR","${HADOOP_CONF_DIR:-\"/etc/hadoop\"}"));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_OPTS","\"-Djava.net.preferIPv4Stack=true
$HADOOP_OPTS\""));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_NAMENODE_OPTS","\"-Dsecurity.audit.logger=INFO,DRFAS
-Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\""));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_JOBTRACKER_OPTS","\"-Dsecurity.audit.logger=INFO,DRFAS
-Dmapred.audit.logger=INFO,MRAUDIT -Dmapred.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\""));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_TASKTRACKER_OPTS","\"-Dsecurity.audit.logger=ERROR,console
-Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\""));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_DATANODE_OPTS","\"-Dsecurity.audit.logger=ERROR,DRFAS
${HADOOP_DATANODE_OPTS}\""));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_SECONDARYNAMENODE_OPTS","\"-Dsecurity.audit.logger=INFO,DRFAS
-Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\""));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_CLIENT_OPTS","\"-Xmx128m ${HADOOP_CLIENT_OPTS}\""));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_SECURE_DN_USER","${HADOOP_SECURE_DN_USER}"));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_LOG_DIR","${HADOOP_LOG_DIR}/$USER"));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_SECURE_DN_LOG_DIR","${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}"));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_PID_DIR","${HADOOP_PID_DIR}"));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_SECURE_DN_PID_DIR","${HADOOP_PID_DIR}"));
-        hadoop_env.getProperty().add(getProperty ("HADOOP_IDENT_STRING","$USER"));
-
-        ConfigurationCategory hadoop_metrics2 = new ConfigurationCategory();
-        hadoop_metrics2.setName("hadoop_metrics2.properties");
-        hadoop_metrics2.getProperty().add(getProperty ("*.period","60"));
-        
-        bpDefaultCfg.getCategory().add(core_site);
-        bpDefaultCfg.getCategory().add(hdfs_site);
-        bpDefaultCfg.getCategory().add(hadoop_env);
-        bpDefaultCfg.getCategory().add(hadoop_metrics2);
-        bpDefaultCfg.getCategory().add(ambari);
-        
-        bp.setConfiguration(bpDefaultCfg);
-        
-        /*
-         * Define and add common component
-         */
-        List<Component> compList = new ArrayList<Component>();
-        
-        Component commonC = new Component(); 
-        commonC.setName("common");
-        commonC.setArchitecture("x86_64");
-        commonC.setVersion("0.20.205.0");
-        commonC.setProvider("org.apache.hadoop");
-        ComponentDefinition commonCD = new ComponentDefinition(); 
-        commonCD.setGroup("org.apache.ambari");
-        commonCD.setDefinition("hadoop-common");
-        commonCD.setVersion("0.1.0");
-        commonC.setDefinition(commonCD);
-        
-        compList.add(commonC);
-        
-        /*
-         * Define and add hdfs component
-         */
-        Component hdfsC = new Component(); 
-        hdfsC.setName("hdfs");
-        hdfsC.setArchitecture("x86_64");
-        hdfsC.setVersion("0.20.205.0");
-        hdfsC.setProvider("org.apache.hadoop");
-        ComponentDefinition hdfsCD = new ComponentDefinition(); 
-        hdfsCD.setGroup("org.apache.ambari");
-        hdfsCD.setDefinition("hadoop-hdfs");
-        hdfsCD.setVersion("0.1.0");
-        hdfsC.setDefinition(hdfsCD);
-        /*
-         * Set the list of roles to hdfsC
-         */
-        List<Role> hdfsRoleList = new ArrayList<Role>();
-        Role hdfs_nn_role = new Role();
-        hdfs_nn_role.setName("namenode");
-        //hdfs_nn_role.setConfiguration(bpDefaultCfg);
-        
-        Role hdfs_dn_role = new Role();
-        hdfs_dn_role.setName("datanode");
-        //hdfs_dn_role.setConfiguration(bpDefaultCfg);
-        
-        hdfsRoleList.add(hdfs_nn_role);
-        hdfsRoleList.add(hdfs_dn_role);
-        hdfsC.setRoles(hdfsRoleList);
-       
-        compList.add(hdfsC);  
-        bp.setComponents(compList);
-     
-      
+    public void loadDummyBlueprint (JAXBContext jaxbContext,
+                                    String name, int revision) {
         try {
-            addBlueprint (bp);
-        }catch (Exception e) {
-            e.printStackTrace();
+            Unmarshaller um = jaxbContext.createUnmarshaller();
+            String resourceName =
+                "org/apache/ambari/stacks/" + name + "-" + revision + ".xml";
+            InputStream in = 
+                ClassLoader.getSystemResourceAsStream(resourceName);
+            Blueprint bp = (Blueprint) um.unmarshal(in);
+            bp.setName(name);
+            bp.setRevision(Integer.toString(revision));
+            addBlueprint(bp);
+        } catch (IOException e) {
+            Log.warn("Problem loading blueprint " + name + " rev " + revision,
+                     e);
+        } catch (JAXBException e) {
+          Log.warn("Problem loading blueprint " + name + " rev " + revision,
+              e);
         }
     }
     
@@ -284,7 +131,7 @@ public class Blueprints {
     /*
      * Add or update the blueprint
      */
-    public Blueprint addBlueprint(Blueprint bp) throws Exception {
+    public Blueprint addBlueprint(Blueprint bp) throws IOException {
         
         /*
          * Validate and set the defaults
@@ -310,7 +157,7 @@ public class Blueprints {
     /*
      * Import the default blueprint from the URL location
      */
-    public Blueprint importDefaultBlueprint (String locationURL) throws Exception {
+    public Blueprint importDefaultBlueprint (String locationURL) throws IOException {
         Blueprint blueprint;
         URL blueprintUrl;
         try {
@@ -335,7 +182,7 @@ public class Blueprints {
     /*
      * Validate the blueprint before importing into controller
      */
-    public void validateAndSetBlueprintDefaults(Blueprint blueprint) throws Exception {
+    public void validateAndSetBlueprintDefaults(Blueprint blueprint) throws IOException {
         
         if (blueprint.getName() == null || blueprint.getName().equals("")) {
             String msg = "Blueprint must be associated with non-empty name";
@@ -482,7 +329,6 @@ public class Blueprints {
     }
 
     public static JSONObject readJsonFromUrl(String url) throws IOException, JSONException
{
-        ObjectMapper m = new ObjectMapper();
         InputStream is = new URL(url).openStream();
         try {
             BufferedReader rd = new BufferedReader(new InputStreamReader(is, Charset.forName("UTF-8")));

Modified: incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java?rev=1187472&r1=1187471&r2=1187472&view=diff
==============================================================================
--- incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java
(original)
+++ incubator/ambari/trunk/controller/src/main/java/org/apache/ambari/controller/Clusters.java
Fri Oct 21 17:22:25 2011
@@ -61,16 +61,6 @@ public class Clusters {
     private static Clusters ClustersTypeRef=null;
         
     private Clusters() {
-        /*
-         * TODO: Check if blueprint in cluster definition AND its parents already exists
-         */
-        Blueprints.getInstance().createDummyBlueprint("cluster123-site-site-site-blueprint",
"0", null, null);
-        Blueprints.getInstance().createDummyBlueprint("cluster123-site-site-blueprint", "0",
"cluster123-site-site-site-blueprint", "0");
-        Blueprints.getInstance().createDummyBlueprint("cluster123-site-blueprint", "0", "cluster123-site-site-blueprint",
"0");
-        Blueprints.getInstance().createDummyBlueprint("cluster123-blueprint", "0", "cluster123-site-blueprint",
"0");
-        Blueprints.getInstance().createDummyBlueprint("cluster123-blueprint", "1", "cluster123-site-blueprint",
"0");
-        Blueprints.getInstance().createDummyBlueprint("cluster124-site-blueprint", "0", null,
null);
-        Blueprints.getInstance().createDummyBlueprint("cluster124-blueprint", "0", "cluster124-site-blueprint",
"0");
         
         /*
          * Cluster definition 
@@ -78,7 +68,7 @@ public class Clusters {
         ClusterDefinition cluster123 = new ClusterDefinition();
         
         cluster123.setName("blue.dev.Cluster123");
-        cluster123.setBlueprintName("cluster123-blueprint");
+        cluster123.setBlueprintName("cluster123");
         cluster123.setBlueprintRevision("0");
         cluster123.setDescription("cluster123 - development cluster");
         cluster123.setGoalState(ClusterState.CLUSTER_STATE_ATTIC);
@@ -116,7 +106,7 @@ public class Clusters {
          */
         ClusterDefinition cluster124 = new ClusterDefinition();
         cluster124.setName("blue.research.Cluster124");
-        cluster124.setBlueprintName("cluster124-blueprint");
+        cluster124.setBlueprintName("cluster124");
         cluster124.setBlueprintRevision("0");
         cluster124.setDescription("cluster124 - research cluster");
         cluster124.setGoalState(ClusterState.CLUSTER_STATE_INACTIVE);

Added: incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/cluster123-0.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/cluster123-0.xml?rev=1187472&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/cluster123-0.xml
(added)
+++ incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/cluster123-0.xml
Fri Oct 21 17:22:25 2011
@@ -0,0 +1,24 @@
+<blueprint parentName="hadoop-security" parentRevision="0">
+    <configuration>
+        <category name="ambari">
+            <property name="data.dirs" value="/grid/*" />
+            <property name="hdfs.user" value="hrt_hdfs" />
+            <property name="mapreduce.user" value="hrt_mapred" />
+            <property name="hbase.user" value="hrt_hbase" />
+            <property name="hcat.user" value="hrt_hcat" />
+            <property name="user.realm" value="HORTON.YGRIDCORE.NET" />
+        </category>
+        <category name="hadoop-env">
+            <property name="HADOOP_CLIENT_OPTS" 
+                      value="-Xmx256m ${HADOOP_CLIENT_OPTS}" />
+        </category>
+    </configuration>
+    <components name="hdfs">
+      <configuration>
+        <category name="hadoop-env">
+          <property name="HADOOP_NAMENODE_OPTS" 
+                    value="-Xmx512m -Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT
${HADOOP_NAMENODE_OPTS}" />
+        </category>
+      </configuration>
+    </components>
+</blueprint>

Added: incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/cluster124-0.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/cluster124-0.xml?rev=1187472&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/cluster124-0.xml
(added)
+++ incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/cluster124-0.xml
Fri Oct 21 17:22:25 2011
@@ -0,0 +1,24 @@
+<blueprint parentName="hadoop-security" parentRevision="0">
+    <configuration>
+        <category name="ambari">
+            <property name="data.dirs" value="/grid/*" />
+            <property name="hdfs.user" value="hrt_hdfs" />
+            <property name="mapreduce.user" value="hrt_mapred" />
+            <property name="hbase.user" value="hrt_hbase" />
+            <property name="hcat.user" value="hrt_hcat" />
+            <property name="user.realm" value="HORTON.YGRIDCORE.NET" />
+        </category>
+        <category name="hadoop-env">
+            <property name="HADOOP_CLIENT_OPTS" 
+                      value="-Xmx256m ${HADOOP_CLIENT_OPTS}" />
+        </category>
+    </configuration>
+    <components name="hdfs">
+      <configuration>
+        <category name="hadoop-env">
+          <property name="HADOOP_NAMENODE_OPTS" 
+                    value="-Xmx512m -Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT
${HADOOP_NAMENODE_OPTS}" />
+        </category>
+      </configuration>
+    </components>
+</blueprint>

Added: incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/hadoop-security-0.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/hadoop-security-0.xml?rev=1187472&view=auto
==============================================================================
--- incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/hadoop-security-0.xml
(added)
+++ incubator/ambari/trunk/controller/src/main/resources/org/apache/ambari/stacks/hadoop-security-0.xml
Fri Oct 21 17:22:25 2011
@@ -0,0 +1,159 @@
+<blueprint>
+    <repositories kind="TAR">
+        <urls>http://www.apache.org/dist/hadoop/common/</urls>
+    </repositories>
+    <configuration>
+        <category name="ambari">
+            <property name="data.prefix" value="ambari" />
+            <property name="hdfs.user" value="hdfs" />
+            <property name="namenode.principal" value="nn" />
+            <property name="datanode.principal" value="dn" />
+            <property name="mapreduce.user" value="mapred" />
+            <property name="jobtracker.principal" value="jt" />
+            <property name="tasktracker.principal" value="tt" />
+            <property name="hbase.user" value="hrt_hbase" />
+            <property name="hbasemaster.principal" value="hm" />
+            <property name="regionserver.principal" value="rs" />
+            <property name="hcat.user" value="hcat" />
+            <property name="hcat.principal" value="hcat" />
+            <property name="service.realm" value="${ambari.user.realm}" />
+            <property name="admin.group" value="hadoop" />
+            <property name="webauthfilter"
+                      value="org.apache.hadoop.http.lib.StaticUserWebFilter"/>
+        </category>
+        <category name="core-site">
+            <property name="fs.default.name" 
+                      value="hdfs://${ambari.namenode.host}:8020" />
+            <property name="fs.trash.interval" value="360" />
+            <property name="hadoop.security.authentication" value="kerberos" />
+            <property name="hadoop.security.authorization" value="true" />
+            <property name="hadoop.kerberos.kinit.command" 
+                      value="/usr/kerberos/bin/kinit" />
+            <property name="HADOOP_CONF_DIR" 
+                      value="${ambari.cluster.prefix}/stack/etc/hadoop" />
+        </category>
+        <category name="hdfs-site">
+            <property name="dfs.umaskmode" value="077" />
+            <property name="dfs.block.access.token.enable" value="true" />
+            <property name="dfs.namenode.kerberos.principal" 
+                      value="${ambari.namenode.principal}/_HOST@${ambari.service.realm}"
/>
+            <property name="dfs.namenode.kerberos.https.principal" 
+                      value="host/_HOST@${ambari.service.realm}" />
+            <property name="dfs.http.port" value="50070" />
+            <property name="dfs.https.port" value="50470" />
+            <property name="dfs.https.address" 
+                      value="${ambari.namenode.host}:${dfs.https.port}" />
+            <property name="dfs.namenode.http-address" 
+                      value="${ambari.namenode.host}:${dfs.http.port}" />
+            <property name="dfs.namenode.https-address" 
+                      value="${ambari.namenode.host}:${dfs.https.port}" />
+        </category>
+        <category name="hadoop-env">
+            <property name="JAVA_HOME" value="${ambari.cluster.prefix}/stack/share/java"
/>
+            <property name="HADOOP_OPTS" 
+                      value="-Djava.net.preferIPv4Stack=true $HADOOP_OPTS" />
+            <property name="HADOOP_JOBTRACKER_OPTS" 
+                      value="-Dsecurity.audit.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT
-Dmapred.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}" />
+            <property name="HADOOP_TASKTRACKER_OPTS" 
+                      value="-Dsecurity.audit.logger=ERROR,console -Dmapred.audit.logger=ERROR,console
${HADOOP_TASKTRACKER_OPTS}" />
+            <property name="HADOOP_CLIENT_OPTS" 
+                      value="-Xmx128m ${HADOOP_CLIENT_OPTS}" />
+            <property name="HADOOP_IDENT_STRING" 
+                      value="${ambari.cluster.name}" />
+        </category>
+        <category name="hadoop_metrics2">
+            <property name="*.period" value="60" />
+        </category>
+    </configuration>
+    <components architecture="x86_64" name="common" 
+                provider="org.apache.hadoop" version="0.20.205.0">
+        <definition definition="hadoop-common" group="org.apache.ambari" 
+                    version="0.1.0" />
+    </components>
+    <components architecture="x86_64" name="hdfs" 
+                provider="org.apache.hadoop" version="0.20.205.0">
+        <definition definition="hadoop-hdfs" group="org.apache.ambari" 
+                    version="0.1.0" />
+        <configuration>
+           <category name="hadoop-env">
+              <property name="HADOOP_LOG_DIR" 
+                        value="${ambari.cluster.prefix}/logs" />
+              <property name="HADOOP_SECURE_DN_LOG_DIR" 
+                        value="${ambari.cluster.prefix}/logs" />
+              <property name="HADOOP_PID_DIR" 
+                        value="${ambari.cluster.prefix}/pid" />
+              <property name="HADOOP_SECURE_DN_PID_DIR" 
+                        value="${ambari.cluster.prefix}/pid" />
+           </category>
+           <category name="core-site">
+              <property name="hadoop.security.groups.cache.secs" 
+                        value="14400" />
+              <property name="hadoop.http.filter.initializers" 
+                        value="${ambari.webauthfilter}"/>
+           </category>
+           <category name="hdfs-site">
+              <property name="dfs.secondary.namenode.kerberos.principal" 
+                        value="${dfs.namenode.kerberos.principal}" />
+            <property name="dfs.secondary.namenode.kerberos.https.principal" 
+                      value="${dfs.namenode.kerberos.https.principal}" />
+            <property name="dfs.secondary.https.port" value="50490" />
+            <property name="dfs.secondary.http.address" 
+                      value="${ambari.secondarynamenode.host}:${dfs.secondary.https.port}"
/>
+            <property name="dfs.datanode.kerberos.principal" 
+                      value="${ambari.datanode.principal}/_HOST@${ambari.service.realm}"
/>
+            <property name="dfs.namenode.keytab.file" 
+                      value="/etc/security/keytabs/nn.service.keytab" />
+            <property name="dfs.secondary.namenode.keytab.file" 
+                      value="/etc/security/keytabs/nn.service.keytab" />
+            <property name="dfs.datanode.keytab.file" 
+                      value="/etc/security/keytabs/dn.service.keytab" />
+           </category>
+        </configuration>
+        <roles name="namenode">
+           <configuration>
+              <category name="hadoop-env">
+                 <property name="HADOOP_NAMENODE_OPTS" 
+                           value="-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT
${HADOOP_NAMENODE_OPTS}" />
+                 <property name="HADOOP_SECONDARYNAMENODE_OPTS" 
+                           value="-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT
${HADOOP_SECONDARYNAMENODE_OPTS}"/>
+              </category>
+              <category name="core-site">
+                <property name="hadoop.security.auth_to_local" value="RULE:[1:$1@$0](.*@${ambari.user.realm})s/@.*//
+            RULE:[2:$1@$0](${ambari.jobtracker.principal}@${ambari.service.realm})s/.*/${ambari.mapreduce.user}/
+            RULE:[2:$1@$0](${ambari.tasktracer.principal}@${ambari.service.realm})s/.*/${ambari.mapreduce.user}/
+            RULE:[2:$1@$0](${ambari.namenode.principal}@${ambari.service.realm})s/.*/${ambari.hdfs.user}/
+            RULE:[2:$1@$0](${ambari.datanode.principal}@${ambari.service.realm})s/.*/${ambari.hdfs.user}/
+            RULE:[2:$1@$0](${ambari.hbasemaster.principal}@${ambari.service.realm})s/.*/${ambari.hbase.user}/
+            RULE:[2:$1@$0](${ambari.regionserver.principal}@${ambari.service.realm})s/.*/${ambari.hbase.user}/
+            RULE:[2:$1@$0](${ambari.hcat.principal}@${ambari.service.realm})s/.*/${ambari.hcat.user}/"
/>
+             </category>
+             <category name="hdfs-site">
+              <property name="dfs.name.dir" 
+                        value="${ambari.cluster.prefix}/data/namenode" />
+               <property name="dfs.safemode.threshold.pct" value="1.0f" />
+               <property name="dfs.hosts" 
+                         value="${HADOOP_CONF_DIR}/dfs.include" />
+               <property name="dfs.hosts.exclude" 
+                         value="${HADOOP_CONF_DIR}/dfs.exclude" />
+               <property name="dfs.cluster.administrators" 
+                         value="${ambari.hdfs.user}" />
+               <property name="dfs.permissions.superusergroup" 
+                         value="${ambari.admin.group}" />
+             </category>
+           </configuration>
+        </roles>
+        <roles name="datanode">
+           <configuration>
+              <category name="hadoop-env">
+                <property name="HADOOP_SECURE_DN_USER" 
+                          value="${ambari.hdfs.user}" />
+                <property name="HADOOP_DATANODE_OPTS" 
+                          value="-Dsecurity.audit.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
/>
+              </category>
+              <category name="hdfs-site">
+                <property name="dfs.datanode.data.dir.perm" value="700" />
+              </category>
+           </configuration>
+        </roles>
+    </components>
+</blueprint>



Mime
View raw message