incubator-ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From yus...@apache.org
Subject svn commit: r1418917 [1/4] - in /incubator/ambari/branches/AMBARI-666: ./ ambari-web/app/ ambari-web/app/assets/data/wizard/stack/ ambari-web/app/assets/data/wizard/stack/hdp/ ambari-web/app/assets/data/wizard/stack/hdp/version01/ ambari-web/app/contro...
Date Sun, 09 Dec 2012 12:43:34 GMT
Author: yusaku
Date: Sun Dec  9 12:43:31 2012
New Revision: 1418917

URL: http://svn.apache.org/viewvc?rev=1418917&view=rev
Log:
AMBARI-1002. Integrate Installer with config APIs. (Jaimin Jetly via yusaku)

Added:
    incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/
    incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/
    incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version0.1.json
    incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version01/
    incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version01/HDFS.json
    incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/stacks.json
    incubator/ambari/branches/AMBARI-666/ambari-web/app/data/configMapping.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/data/custom_configs.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/models/quick_links.js
Removed:
    incubator/ambari/branches/AMBARI-666/ambari-web/app/models/protoRelations.js
Modified:
    incubator/ambari/branches/AMBARI-666/AMBARI-666-CHANGES.txt
    incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/installer.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/wizard/step7_controller.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/wizard/step8_controller.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/data/config_properties.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/data/service_configs.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/models/service_config.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/router.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/routes/installer.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/utils/db.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/utils/helper.js
    incubator/ambari/branches/AMBARI-666/ambari-web/app/views/wizard/controls_view.js

Modified: incubator/ambari/branches/AMBARI-666/AMBARI-666-CHANGES.txt
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/AMBARI-666-CHANGES.txt?rev=1418917&r1=1418916&r2=1418917&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/AMBARI-666-CHANGES.txt (original)
+++ incubator/ambari/branches/AMBARI-666/AMBARI-666-CHANGES.txt Sun Dec  9 12:43:31 2012
@@ -12,6 +12,9 @@ AMBARI-666 branch (unreleased changes)
 
   NEW FEATURES
 
+  AMBARI-1002. Integrate Installer with config APIs. (Jaimin Jetly
+  via yusaku)
+
   AMBARI-989. Show task logs for each host in the Deploy step of the
   wizard. (yusaku)
 

Added: incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version0.1.json
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version0.1.json?rev=1418917&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version0.1.json (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version0.1.json Sun Dec  9 12:43:31 2012
@@ -0,0 +1,311 @@
+{
+  "name" : "HDP",
+  "version" : "0.1",
+  "repositories" : [ {
+    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6",
+    "osType" : "centos6",
+    "repoId" : "HDP-1.1.1.16",
+    "repoName" : "HDP",
+    "mirrorsList" : null
+  }, {
+    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6",
+    "osType" : "centos6",
+    "repoId" : "HDP-UTILS-1.1.0.15",
+    "repoName" : "HDP-UTILS",
+    "mirrorsList" : null
+  }, {
+    "baseUrl" : null,
+    "osType" : "centos6",
+    "repoId" : "epel",
+    "repoName" : "epel",
+    "mirrorsList" : "https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch"
+  }, {
+    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5",
+    "osType" : "centos5",
+    "repoId" : "HDP-1.1.1.16",
+    "repoName" : "HDP",
+    "mirrorsList" : null
+  }, {
+    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5",
+    "osType" : "centos5",
+    "repoId" : "HDP-UTILS-1.1.0.15",
+    "repoName" : "HDP-UTILS",
+    "mirrorsList" : null
+  }, {
+    "baseUrl" : null,
+    "osType" : "centos5",
+    "repoId" : "epel",
+    "repoName" : "epel",
+    "mirrorsList" : "https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch"
+  } ],
+  "services" : [ {
+    "name" : "TEMPLETON",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for TEMPLETON service",
+    "components" : [ {
+      "name" : "TEMPLETON_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "TEMPLETON_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "TEMPLETON_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "SQOOP",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for SQOOP service",
+    "components" : [ {
+      "name" : "SQOOP",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "SQOOP",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "NAGIOS",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for NAGIOS service",
+    "components" : [ {
+      "name" : "NAGIOS_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    } ],
+    "clientComponent" : {
+      "name" : "NAGIOS_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }
+  }, {
+    "name" : "HDFS",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for HDFS service",
+    "components" : [ {
+      "name" : "NAMENODE",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "DATANODE",
+      "category" : "SLAVE",
+      "client" : false,
+      "master" : false
+    }, {
+      "name" : "SECONDARY_NAMENODE",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "HDFS_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "HDFS_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "PIG",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for PIG service",
+    "components" : [ {
+      "name" : "PIG",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "PIG",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "HBASE",
+    "version" : "1.0",
+    "user" : "mapred",
+    "comment" : "This is comment for HBASE service",
+    "components" : [ {
+      "name" : "HBASE_MASTER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "HBASE_REGIONSERVER",
+      "category" : "SLAVE",
+      "client" : false,
+      "master" : false
+    }, {
+      "name" : "HBASE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "HBASE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "ZOOKEEPER",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for ZOOKEEPER service",
+    "components" : [ {
+      "name" : "ZOOKEEPER_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "ZOOKEEPER_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "ZOOKEEPER_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "GANGLIA",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for GANGLIA service",
+    "components" : [ {
+      "name" : "GANGLIA_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "GANGLIA_MONITOR",
+      "category" : "SLAVE",
+      "client" : false,
+      "master" : false
+    }, {
+      "name" : "MONITOR_WEBSERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    } ],
+    "clientComponent" : {
+      "name" : "GANGLIA_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }
+  }, {
+    "name" : "HCATALOG",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for HCATALOG service",
+    "components" : [ {
+      "name" : "HCAT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "HCAT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "HIVE",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for HIVE service",
+    "components" : [ {
+      "name" : "HIVE_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "HIVE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "HIVE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "OOZIE",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for OOZIE service",
+    "components" : [ {
+      "name" : "OOZIE_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "OOZIE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "OOZIE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "MAPREDUCE",
+    "version" : "1.0",
+    "user" : "mapred",
+    "comment" : "This is comment for MAPREDUCE service",
+    "components" : [ {
+      "name" : "JOBTRACKER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "TASKTRACKER",
+      "category" : "SLAVE",
+      "client" : false,
+      "master" : false
+    }, {
+      "name" : "MAPREDUCE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "MAPREDUCE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  } ]
+}
\ No newline at end of file

Added: incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version01/HDFS.json
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version01/HDFS.json?rev=1418917&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version01/HDFS.json (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/hdp/version01/HDFS.json Sun Dec  9 12:43:31 2012
@@ -0,0 +1,584 @@
+{
+  "name" : "HDFS",
+  "version" : "1.0",
+  "user" : "root",
+  "comment" : "This is comment for HDFS service",
+  "properties" : [ {
+    "name" : "dfs.name.dir",
+    "value" : "/mnt/hmc/hadoop/hdfs/namenode",
+    "description" : "Determines where on the local filesystem the DFS name node\n      should store the name table.  If this is a comma-delimited list\n      of directories then the name table is replicated in all of the\n      directories, for redundancy. ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.support.append",
+    "value" : "true",
+    "description" : "to enable dfs append",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.webhdfs.enabled",
+    "value" : "false",
+    "description" : "to enable webhdfs",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.failed.volumes.tolerated",
+    "value" : "0",
+    "description" : "#of failed disks dn would tolerate",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.block.local-path-access.user",
+    "value" : "hbase",
+    "description" : "the user who is allowed to perform short\n    circuit reads.\n    ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.data.dir",
+    "value" : "/mnt/hmc/hadoop/hdfs/data",
+    "description" : "Determines where on the local filesystem an DFS data node\n  should store its blocks.  If this is a comma-delimited\n  list of directories, then data will be stored in all named\n  directories, typically on different devices.\n  Directories that do not exist are ignored.\n  ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.hosts.exclude",
+    "value" : "/etc/hadoop/conf/dfs.exclude",
+    "description" : "Names a file that contains a list of hosts that are\n    not permitted to connect to the namenode.  The full pathname of the\n    file must be specified.  If the value is empty, no hosts are\n    excluded.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.hosts",
+    "value" : "/etc/hadoop/conf/dfs.include",
+    "description" : "Names a file that contains a list of hosts that are\n    permitted to connect to the namenode. The full pathname of the file\n    must be specified.  If the value is empty, all hosts are\n    permitted.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.replication.max",
+    "value" : "50",
+    "description" : "Maximal block replication.\n  ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.replication",
+    "value" : "3",
+    "description" : "Default block replication.\n  ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.heartbeat.interval",
+    "value" : "3",
+    "description" : "Determines datanode heartbeat interval in seconds.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.safemode.threshold.pct",
+    "value" : "1.0f",
+    "description" : "\n        Specifies the percentage of blocks that should satisfy\n        the minimal replication requirement defined by dfs.replication.min.\n        Values less than or equal to 0 mean not to start in safe mode.\n        Values greater than 1 will make safe mode permanent.\n        ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.balance.bandwidthPerSec",
+    "value" : "6250000",
+    "description" : "\n        Specifies the maximum amount of bandwidth that each datanode\n        can utilize for the balancing purpose in term of\n        the number of bytes per second.\n  ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.address",
+    "value" : "0.0.0.0:50010",
+    "description" : null,
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.http.address",
+    "value" : "0.0.0.0:50075",
+    "description" : null,
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.block.size",
+    "value" : "134217728",
+    "description" : "The default block size for new files.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.http.address",
+    "value" : "hdp1.cybervisiontech.com.ua:50070",
+    "description" : "The name of the default file system.  Either the\nliteral string \"local\" or a host:port for NDFS.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.du.reserved",
+    "value" : "1073741824",
+    "description" : "Reserved space in bytes per volume. Always leave this much space free for non dfs use.\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.ipc.address",
+    "value" : "0.0.0.0:8010",
+    "description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.blockreport.initialDelay",
+    "value" : "120",
+    "description" : "Delay for first block report in seconds.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.du.pct",
+    "value" : "0.85f",
+    "description" : "When calculating remaining space, only use this percentage of the real available space\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.namenode.handler.count",
+    "value" : "40",
+    "description" : "The number of server threads for the namenode.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.max.xcievers",
+    "value" : "1024",
+    "description" : "PRIVATE CONFIG VARIABLE",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.umaskmode",
+    "value" : "077",
+    "description" : "\nThe octal umask used when creating files and directories.\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.web.ugi",
+    "value" : "gopher,gopher",
+    "description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.permissions",
+    "value" : "true",
+    "description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.permissions.supergroup",
+    "value" : "hdfs",
+    "description" : "The name of the group of super-users.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.namenode.handler.count",
+    "value" : "100",
+    "description" : "Added to grow Queue size so that more client connections are allowed",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "ipc.server.max.response.size",
+    "value" : "5242880",
+    "description" : null,
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.block.access.token.enable",
+    "value" : "true",
+    "description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.namenode.kerberos.principal",
+    "value" : "nn/_HOST@",
+    "description" : "\nKerberos principal name for the NameNode\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.secondary.namenode.kerberos.principal",
+    "value" : "nn/_HOST@",
+    "description" : "\n        Kerberos principal name for the secondary NameNode.\n    ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.namenode.kerberos.https.principal",
+    "value" : "host/_HOST@",
+    "description" : "The Kerberos principal for the host that the NameNode runs on.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.secondary.namenode.kerberos.https.principal",
+    "value" : "host/_HOST@",
+    "description" : "The Kerberos principal for the hostthat the secondary NameNode runs on.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.secondary.http.address",
+    "value" : "hdp2.cybervisiontech.com.ua:50090",
+    "description" : "Address of secondary namenode web server",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.secondary.https.port",
+    "value" : "50490",
+    "description" : "The https port where secondary-namenode binds",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.web.authentication.kerberos.principal",
+    "value" : "HTTP/_HOST@",
+    "description" : "\n      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.\n      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos\n      HTTP SPENGO specification.\n    ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.web.authentication.kerberos.keytab",
+    "value" : "/nn.service.keytab",
+    "description" : "\n      The Kerberos keytab file with the credentials for the\n      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.\n    ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.kerberos.principal",
+    "value" : "dn/_HOST@",
+    "description" : "\n        The Kerberos principal that the DataNode runs as. \"_HOST\" is replaced by the real host name.\n    ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.namenode.keytab.file",
+    "value" : "/nn.service.keytab",
+    "description" : "\n        Combined keytab file containing the namenode service and host principals.\n    ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.secondary.namenode.keytab.file",
+    "value" : "/nn.service.keytab",
+    "description" : "\n        Combined keytab file containing the namenode service and host principals.\n    ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.keytab.file",
+    "value" : "/dn.service.keytab",
+    "description" : "\n        The filename of the keytab file for the DataNode.\n    ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.https.port",
+    "value" : "50470",
+    "description" : "The https port where namenode binds",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.https.address",
+    "value" : "hdp1.cybervisiontech.com.ua:50470",
+    "description" : "The https address where namenode binds",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.data.dir.perm",
+    "value" : "750",
+    "description" : "The permissions that should be there on dfs.data.dir\ndirectories. The datanode will not come up if the permissions are\ndifferent on existing dfs.data.dir directories. If the directories\ndon't exist, they will be created with this permission.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.access.time.precision",
+    "value" : "0",
+    "description" : "The access time for HDFS file is precise upto this value.\n               The default value is 1 hour. Setting a value of 0 disables\n               access times for HDFS.\n  ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.cluster.administrators",
+    "value" : " hdfs",
+    "description" : "ACL for who all can view the default servlets in the HDFS",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "ipc.server.read.threadpool.size",
+    "value" : "5",
+    "description" : null,
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "hadoop.tmp.dir",
+    "value" : "/tmp/hadoop-${user.name}",
+    "description" : "A base for other temporary directories.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.native.lib",
+    "value" : "true",
+    "description" : "Should native hadoop libraries, if present, be used.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.security.group.mapping",
+    "value" : "org.apache.hadoop.security.ShellBasedUnixGroupsMapping",
+    "description" : "Class for user to group mapping (get groups for a given user)\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.security.authorization",
+    "value" : "false",
+    "description" : "Is service-level authorization enabled?",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.security.authentication",
+    "value" : "simple",
+    "description" : "Possible values are simple (no authentication), and kerberos\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.security.token.service.use_ip",
+    "value" : "true",
+    "description" : "Controls whether tokens always use IP addresses.  DNS changes\n  will not be detected if this option is enabled.  Existing client connections\n  that break will always reconnect to the IP of the original host.  New clients\n  will connect to the host's new IP but fail to locate a token.  Disabling\n  this option will allow existing and new clients to detect an IP change and\n  continue to locate the new host's token.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.security.use-weak-http-crypto",
+    "value" : "false",
+    "description" : "If enabled, use KSSL to authenticate HTTP connections to the\n  NameNode. Due to a bug in JDK6, using KSSL requires one to configure\n  Kerberos tickets to use encryption types that are known to be\n  cryptographically weak. If disabled, SPNEGO will be used for HTTP\n  authentication, which supports stronger encryption types.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.logfile.size",
+    "value" : "10000000",
+    "description" : "The max size of each log file",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.logfile.count",
+    "value" : "10",
+    "description" : "The max number of log files",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.file.buffer.size",
+    "value" : "4096",
+    "description" : "The size of buffer for use in sequence files.\n  The size of this buffer should probably be a multiple of hardware\n  page size (4096 on Intel x86), and it determines how much data is\n  buffered during read and write operations.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.bytes.per.checksum",
+    "value" : "512",
+    "description" : "The number of bytes per checksum.  Must not be larger than\n  io.file.buffer.size.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.skip.checksum.errors",
+    "value" : "false",
+    "description" : "If true, when a checksum error is encountered while\n  reading a sequence file, entries are skipped, instead of throwing an\n  exception.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.compression.codecs",
+    "value" : "org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.SnappyCodec",
+    "description" : "A list of the compression codec classes that can be used\n               for compression/decompression.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.serializations",
+    "value" : "org.apache.hadoop.io.serializer.WritableSerialization",
+    "description" : "A list of serialization classes that can be used for\n  obtaining serializers and deserializers.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.default.name",
+    "value" : "file:///",
+    "description" : "The name of the default file system.  A URI whose\n  scheme and authority determine the FileSystem implementation.  The\n  uri's scheme determines the config property (fs.SCHEME.impl) naming\n  the FileSystem implementation class.  The uri's authority is used to\n  determine the host, port, etc. for a filesystem.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.trash.interval",
+    "value" : "0",
+    "description" : "Number of minutes between trash checkpoints.\n  If zero, the trash feature is disabled.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.file.impl",
+    "value" : "org.apache.hadoop.fs.LocalFileSystem",
+    "description" : "The FileSystem for file: uris.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.hdfs.impl",
+    "value" : "org.apache.hadoop.hdfs.DistributedFileSystem",
+    "description" : "The FileSystem for hdfs: uris.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.s3.impl",
+    "value" : "org.apache.hadoop.fs.s3.S3FileSystem",
+    "description" : "The FileSystem for s3: uris.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.s3n.impl",
+    "value" : "org.apache.hadoop.fs.s3native.NativeS3FileSystem",
+    "description" : "The FileSystem for s3n: (Native S3) uris.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.kfs.impl",
+    "value" : "org.apache.hadoop.fs.kfs.KosmosFileSystem",
+    "description" : "The FileSystem for kfs: uris.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.hftp.impl",
+    "value" : "org.apache.hadoop.hdfs.HftpFileSystem",
+    "description" : null,
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.hsftp.impl",
+    "value" : "org.apache.hadoop.hdfs.HsftpFileSystem",
+    "description" : null,
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.webhdfs.impl",
+    "value" : "org.apache.hadoop.hdfs.web.WebHdfsFileSystem",
+    "description" : null,
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.ftp.impl",
+    "value" : "org.apache.hadoop.fs.ftp.FTPFileSystem",
+    "description" : "The FileSystem for ftp: uris.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.ramfs.impl",
+    "value" : "org.apache.hadoop.fs.InMemoryFileSystem",
+    "description" : "The FileSystem for ramfs: uris.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.har.impl",
+    "value" : "org.apache.hadoop.fs.HarFileSystem",
+    "description" : "The filesystem for Hadoop archives. ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.har.impl.disable.cache",
+    "value" : "true",
+    "description" : "Don't cache 'har' filesystem instances.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.checkpoint.dir",
+    "value" : "${hadoop.tmp.dir}/dfs/namesecondary",
+    "description" : "Determines where on the local filesystem the DFS secondary\n      name node should store the temporary images to merge.\n      If this is a comma-delimited list of directories then the image is\n      replicated in all of the directories for redundancy.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.checkpoint.edits.dir",
+    "value" : "${fs.checkpoint.dir}",
+    "description" : "Determines where on the local filesystem the DFS secondary\n      name node should store the temporary edits to merge.\n      If this is a comma-delimited list of directoires then teh edits is\n      replicated in all of the directoires for redundancy.\n      Default value is same as fs.checkpoint.dir\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.checkpoint.period",
+    "value" : "3600",
+    "description" : "The number of seconds between two periodic checkpoints.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.checkpoint.size",
+    "value" : "67108864",
+    "description" : "The size of the current edit log (in bytes) that triggers\n       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.s3.block.size",
+    "value" : "67108864",
+    "description" : "Block size to use when writing files to S3.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.s3.buffer.dir",
+    "value" : "${hadoop.tmp.dir}/s3",
+    "description" : "Determines where on the local filesystem the S3 filesystem\n  should store files before sending them to S3\n  (or after retrieving them from S3).\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.s3.maxRetries",
+    "value" : "4",
+    "description" : "The maximum number of retries for reading or writing files to S3,\n  before we signal failure to the application.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.s3.sleepTimeSeconds",
+    "value" : "10",
+    "description" : "The number of seconds to sleep between each S3 retry.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "local.cache.size",
+    "value" : "10737418240",
+    "description" : "The limit on the size of cache you want to keep, set by default\n  to 10GB. This will act as a soft limit on the cache directory for out of band data.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.seqfile.compress.blocksize",
+    "value" : "1000000",
+    "description" : "The minimum block size for compression in block compressed\n          SequenceFiles.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.seqfile.lazydecompress",
+    "value" : "true",
+    "description" : "Should values of block-compressed SequenceFiles be decompressed\n          only when necessary.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.seqfile.sorter.recordlimit",
+    "value" : "1000000",
+    "description" : "The limit on number of records to be kept in memory in a spill\n          in SequenceFiles.Sorter\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.mapfile.bloom.size",
+    "value" : "1048576",
+    "description" : "The size of BloomFilter-s used in BloomMapFile. Each time this many\n  keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter).\n  Larger values minimize the number of filters, which slightly increases the performance,\n  but may waste too much space if the total number of keys is usually much smaller\n  than this number.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.mapfile.bloom.error.rate",
+    "value" : "0.005",
+    "description" : "The rate of false positives in BloomFilter-s used in BloomMapFile.\n  As this value decreases, the size of BloomFilter-s increases exponentially. This\n  value is the probability of encountering false positives (default is 0.5%).\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.util.hash.type",
+    "value" : "murmur",
+    "description" : "The default implementation of Hash. Currently this can take one of the\n  two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "ipc.client.idlethreshold",
+    "value" : "4000",
+    "description" : "Defines the threshold number of connections after which\n               connections will be inspected for idleness.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "ipc.client.kill.max",
+    "value" : "10",
+    "description" : "Defines the maximum number of clients to disconnect in one go.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "ipc.client.connection.maxidletime",
+    "value" : "10000",
+    "description" : "The maximum time in msec after which a client will bring down the\n               connection to the server.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "ipc.client.connect.max.retries",
+    "value" : "10",
+    "description" : "Indicates the number of retries a client will make to establish\n               a server connection.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "ipc.server.listen.queue.size",
+    "value" : "128",
+    "description" : "Indicates the length of the listen queue for servers accepting\n               client connections.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "ipc.server.tcpnodelay",
+    "value" : "false",
+    "description" : "Turn on/off Nagle's algorithm for the TCP socket connection on\n  the server. Setting to true disables the algorithm and may decrease latency\n  with a cost of more/smaller packets.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "ipc.client.tcpnodelay",
+    "value" : "false",
+    "description" : "Turn on/off Nagle's algorithm for the TCP socket connection on\n  the client. Setting to true disables the algorithm and may decrease latency\n  with a cost of more/smaller packets.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "webinterface.private.actions",
+    "value" : "false",
+    "description" : " If set to true, the web interfaces of JT and NN may contain\n                actions, such as kill job, delete file, etc., that should\n                not be exposed to public. Enable this option if the interfaces\n                are only reachable by those who have the right authorization.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.rpc.socket.factory.class.default",
+    "value" : "org.apache.hadoop.net.StandardSocketFactory",
+    "description" : " Default SocketFactory to use. This parameter is expected to be\n    formatted as \"package.FactoryClassName\".\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "topology.node.switch.mapping.impl",
+    "value" : "org.apache.hadoop.net.ScriptBasedMapping",
+    "description" : " The default implementation of the DNSToSwitchMapping. It\n    invokes a script specified in topology.script.file.name to resolve\n    node names. If the value for topology.script.file.name is not set, the\n    default value of DEFAULT_RACK is returned for all node names.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "topology.script.number.args",
+    "value" : "100",
+    "description" : " The max number of args that the script configured with\n    topology.script.file.name should be run with. Each arg is an\n    IP address.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.security.uid.cache.secs",
+    "value" : "14400",
+    "description" : " NativeIO maintains a cache from UID to UserName. This is\n  the timeout for an entry in that cache. ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.http.authentication.type",
+    "value" : "simple",
+    "description" : "\n    Defines authentication used for Oozie HTTP endpoint.\n    Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.http.authentication.token.validity",
+    "value" : "36000",
+    "description" : "\n    Indicates how long (in seconds) an authentication token is valid before it has\n    to be renewed.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.http.authentication.signature.secret.file",
+    "value" : "${user.home}/hadoop-http-auth-signature-secret",
+    "description" : "\n    The signature secret for signing the authentication tokens.\n    If not set a random secret is generated at startup time.\n    The same secret should be used for JT/NN/DN/TT configurations.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.http.authentication.simple.anonymous.allowed",
+    "value" : "true",
+    "description" : "\n    Indicates if anonymous requests are allowed when using 'simple' authentication.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.http.authentication.kerberos.principal",
+    "value" : "HTTP/localhost@LOCALHOST",
+    "description" : "\n    Indicates the Kerberos principal to be used for HTTP endpoint.\n    The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.http.authentication.kerberos.keytab",
+    "value" : "${user.home}/hadoop.keytab",
+    "description" : "\n    Location of the keytab file with the credentials for the principal.\n    Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "hadoop.relaxed.worker.version.check",
+    "value" : "false",
+    "description" : "\n    By default datanodes refuse to connect to namenodes if their build\n    revision (svn revision) do not match, and tasktrackers refuse to\n    connect to jobtrackers if their build version (version, revision,\n    user, and source checksum) do not match. This option changes the\n    behavior of hadoop workers to only check for a version match (eg\n    \"1.0.2\") but ignore the other build fields (revision, user, and\n    source checksum).\n  ",
+    "filename" : "core-site.xml"
+  } ],
+  "components" : [ {
+    "name" : "NAMENODE",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "DATANODE",
+    "category" : "SLAVE",
+    "client" : false,
+    "master" : false
+  }, {
+    "name" : "SECONDARY_NAMENODE",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "HDFS_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  } ],
+  "clientComponent" : {
+    "name" : "HDFS_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  }
+}
\ No newline at end of file

Added: incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/stacks.json
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/stacks.json?rev=1418917&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/stacks.json (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-web/app/assets/data/wizard/stack/stacks.json Sun Dec  9 12:43:31 2012
@@ -0,0 +1,311 @@
+[ {
+  "name" : "HDP",
+  "version" : "0.1",
+  "repositories" : [ {
+    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6",
+    "osType" : "centos6",
+    "repoId" : "HDP-1.1.1.16",
+    "repoName" : "HDP",
+    "mirrorsList" : null
+  }, {
+    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6",
+    "osType" : "centos6",
+    "repoId" : "HDP-UTILS-1.1.0.15",
+    "repoName" : "HDP-UTILS",
+    "mirrorsList" : null
+  }, {
+    "baseUrl" : null,
+    "osType" : "centos6",
+    "repoId" : "epel",
+    "repoName" : "epel",
+    "mirrorsList" : "https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch"
+  }, {
+    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5",
+    "osType" : "centos5",
+    "repoId" : "HDP-1.1.1.16",
+    "repoName" : "HDP",
+    "mirrorsList" : null
+  }, {
+    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5",
+    "osType" : "centos5",
+    "repoId" : "HDP-UTILS-1.1.0.15",
+    "repoName" : "HDP-UTILS",
+    "mirrorsList" : null
+  }, {
+    "baseUrl" : null,
+    "osType" : "centos5",
+    "repoId" : "epel",
+    "repoName" : "epel",
+    "mirrorsList" : "https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch"
+  } ],
+  "services" : [ {
+    "name" : "TEMPLETON",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for TEMPLETON service",
+    "components" : [ {
+      "name" : "TEMPLETON_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "TEMPLETON_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "TEMPLETON_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "SQOOP",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for SQOOP service",
+    "components" : [ {
+      "name" : "SQOOP",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "SQOOP",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "NAGIOS",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for NAGIOS service",
+    "components" : [ {
+      "name" : "NAGIOS_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    } ],
+    "clientComponent" : {
+      "name" : "NAGIOS_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }
+  }, {
+    "name" : "HDFS",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for HDFS service",
+    "components" : [ {
+      "name" : "NAMENODE",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "DATANODE",
+      "category" : "SLAVE",
+      "client" : false,
+      "master" : false
+    }, {
+      "name" : "SECONDARY_NAMENODE",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "HDFS_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "HDFS_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "PIG",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for PIG service",
+    "components" : [ {
+      "name" : "PIG",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "PIG",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "HBASE",
+    "version" : "1.0",
+    "user" : "mapred",
+    "comment" : "This is comment for HBASE service",
+    "components" : [ {
+      "name" : "HBASE_MASTER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "HBASE_REGIONSERVER",
+      "category" : "SLAVE",
+      "client" : false,
+      "master" : false
+    }, {
+      "name" : "HBASE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "HBASE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "ZOOKEEPER",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for ZOOKEEPER service",
+    "components" : [ {
+      "name" : "ZOOKEEPER_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "ZOOKEEPER_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "ZOOKEEPER_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "GANGLIA",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for GANGLIA service",
+    "components" : [ {
+      "name" : "GANGLIA_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "GANGLIA_MONITOR",
+      "category" : "SLAVE",
+      "client" : false,
+      "master" : false
+    }, {
+      "name" : "MONITOR_WEBSERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    } ],
+    "clientComponent" : {
+      "name" : "GANGLIA_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }
+  }, {
+    "name" : "HCATALOG",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for HCATALOG service",
+    "components" : [ {
+      "name" : "HCAT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "HCAT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "HIVE",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for HIVE service",
+    "components" : [ {
+      "name" : "HIVE_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "HIVE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "HIVE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "OOZIE",
+    "version" : "1.0",
+    "user" : "root",
+    "comment" : "This is comment for OOZIE service",
+    "components" : [ {
+      "name" : "OOZIE_SERVER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "OOZIE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "OOZIE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  }, {
+    "name" : "MAPREDUCE",
+    "version" : "1.0",
+    "user" : "mapred",
+    "comment" : "This is comment for MAPREDUCE service",
+    "components" : [ {
+      "name" : "JOBTRACKER",
+      "category" : "MASTER",
+      "client" : false,
+      "master" : true
+    }, {
+      "name" : "TASKTRACKER",
+      "category" : "SLAVE",
+      "client" : false,
+      "master" : false
+    }, {
+      "name" : "MAPREDUCE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    } ],
+    "clientComponent" : {
+      "name" : "MAPREDUCE_CLIENT",
+      "category" : "CLIENT",
+      "client" : true,
+      "master" : false
+    }
+  } ]
+} ]
\ No newline at end of file

Modified: incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/installer.js
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/installer.js?rev=1418917&r1=1418916&r2=1418917&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/installer.js (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/installer.js Sun Dec  9 12:43:31 2012
@@ -227,7 +227,8 @@ App.InstallerController = Em.Controller.
     hostSlaveComponents: null,
     masterComponentHosts: null,
     hostToMasterComponent: null,
-    serviceConfigProperties: null
+    serviceConfigProperties: null,
+    advancedServiceConfig: null
   }),
 
   /**
@@ -613,10 +614,18 @@ App.InstallerController = Em.Controller.
     var serviceConfigProperties = [];
     stepController.get('stepConfigs').forEach(function (_content) {
       _content.get('configs').forEach(function (_configProperties) {
+        var displayType =  _configProperties.get('displayType');
+        if(displayType === 'directories' || displayType === 'advanced' || displayType === 'directory') {
+          var value = _configProperties.get('value').replace(/[\s,]+/g,',');
+          _configProperties.set('value',value);
+        }
         var configProperty = {
+          id: _configProperties.get('id'),
           name: _configProperties.get('name'),
           value: _configProperties.get('value'),
-          service: _configProperties.get('serviceName')
+          defaultValue: _configProperties.get('defaultValue'),
+          service: _configProperties.get('serviceName'),
+          filename: _configProperties.get('filename')
         };
         serviceConfigProperties.push(configProperty);
       }, this);
@@ -707,8 +716,95 @@ App.InstallerController = Em.Controller.
   },
 
   /**
+   * Generate serviceComponents as pr the stack definition  and save it to localdata
+   * called form stepController step4WizardController
+   */
+  loadComponents: function (stepController) {
+    var self = this;
+    var method = 'GET';
+    var url = (App.testMode) ? '/data/wizard/stack/hdp/version0.1.json' : '/api/stacks/HDP/version/0.1'; // TODO: get this url from the stack selected by the user in Install Options page
+    $.ajax({
+      type: method,
+      url: url,
+      async: false,
+      dataType: 'text',
+      timeout: 5000,
+      success: function (data) {
+        var jsonData = jQuery.parseJSON(data);
+        console.log("TRACE: STep5 -> In success function for the getServiceComponents call");
+        console.log("TRACE: STep5 -> value of the url is: " + url);
+        var serviceComponents = [];
+        jsonData.services.forEach(function (_service) {
+
+        }, this);
+        stepController.set('components', jsonData.services);
+        console.log('TRACE: service components: ' + JSON.stringify(stepController.get('components')));
+      },
+
+      error: function (request, ajaxOptions, error) {
+        console.log("TRACE: STep5 -> In error function for the getServiceComponents call");
+        console.log("TRACE: STep5 -> value of the url is: " + url);
+        console.log("TRACE: STep5 -> error code status is: " + request.status);
+        console.log('Step8: Error message is: ' + request.responseText);
+      },
+
+      statusCode: require('data/statusCodes')
+    });
+
+  },
+
+  loadAdvancedConfigs: function () {
+    App.db.getSelectedServiceNames().forEach(function (_serviceName) {
+      this.loadAdvancedConfig(_serviceName);
+    }, this);
+  },
+  /**
+   * Generate serviceProperties save it to localdata
+   * called form stepController step6WizardController
+   */
+
+  loadAdvancedConfig: function (serviceName) {
+    var self = this;
+    var url = (App.testMode) ? '/data/wizard/stack/hdp/version01/' + serviceName + '.json' : '/api/stacks/HDP/version/0.1/services/' + serviceName; // TODO: get this url from the stack selected by the user in Install Options page
+    var method = 'GET';
+    $.ajax({
+      type: method,
+      url: url,
+      async: false,
+      dataType: 'text',
+      timeout: 5000,
+      success: function (data) {
+        var jsonData = jQuery.parseJSON(data);
+        console.log("TRACE: Step6 submit -> In success function for the loadAdvancedConfig call");
+        console.log("TRACE: Step6 submit -> value of the url is: " + url);
+        var serviceComponents = jsonData.properties;
+        serviceComponents.setEach('serviceName', serviceName);
+        var configs;
+        if (App.db.getAdvancedServiceConfig()) {
+          configs = App.db.getAdvancedServiceConfig();
+        } else {
+          configs = [];
+        }
+        configs = configs.concat(serviceComponents);
+        self.set('content.advancedServiceConfig', configs);
+        App.db.setAdvancedServiceConfig(configs);
+        console.log('TRACE: servicename: ' + serviceName);
+      },
+
+      error: function (request, ajaxOptions, error) {
+        console.log("TRACE: STep6 submit -> In error function for the loadAdvancedConfig call");
+        console.log("TRACE: STep6 submit-> value of the url is: " + url);
+        console.log("TRACE: STep6 submit-> error code status is: " + request.status);
+        console.log('Step6 submit: Error message is: ' + request.responseText);
+      },
+
+      statusCode: require('data/statusCodes')
+    });
+  },
+
+  /**
    * Generate clients list for selected services and save it to model
-   * @param stepController step8WizardController or step9WizardController
+   * called form stepController step8WizardController or step9WizardController
    */
   installServices: function () {
     var self = this;
@@ -726,8 +822,8 @@ App.InstallerController = Em.Controller.
       success: function (data) {
         var jsonData = jQuery.parseJSON(data);
         var installSartTime = new Date().getTime();
-        console.log("TRACE: STep8 -> In success function for the installService call");
-        console.log("TRACE: STep8 -> value of the url is: " + url);
+        console.log("TRACE: In success function for the installService call");
+        console.log("TRACE: value of the url is: " + url);
         if (jsonData) {
           var requestId = jsonData.href.match(/.*\/(.*)$/)[1];
 

Modified: incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/wizard/step7_controller.js
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/wizard/step7_controller.js?rev=1418917&r1=1418916&r2=1418917&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/wizard/step7_controller.js (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/wizard/step7_controller.js Sun Dec  9 12:43:31 2012
@@ -55,6 +55,9 @@ App.WizardStep7Controller = Em.Controlle
   }.property('content.slaveComponentHosts'),
 
   serviceConfigs: require('data/service_configs'),
+  configMapping: require('data/configMapping'),
+  customConfigs: require('data/custom_configs'),
+  customData: [],
 
   clearStep: function () {
     this.get('stepConfigs').clear();
@@ -65,10 +68,28 @@ App.WizardStep7Controller = Em.Controlle
    */
   loadStep: function () {
     console.log("TRACE: Loading step7: Configure Services");
-
     this.clearStep();
-    this.renderServiceConfigs(this.serviceConfigs);
-
+    var serviceConfigs = this.get('serviceConfigs');
+    var advancedConfig = this.get('content.advancedServiceConfig');
+    advancedConfig.forEach(function(_config){
+      var service = serviceConfigs.findProperty('serviceName',_config.serviceName);
+      if(service) {
+        if(this.get('configMapping').someProperty('name',_config.name)) {
+        } else if(!(service.configs.someProperty('name',_config.name))) {
+          _config.id = "site property";
+          _config.category = 'Advanced';
+          _config.displayName = _config.name;
+          _config.defaultValue = _config.value;
+          _config.value = '',
+          _config.isVisible = true;
+          _config.isRequired = false;
+          _config.displayType = 'advanced';
+          service.configs.pushObject(_config);
+        }
+      }
+    },this);
+    this.loadCustomConfig();
+    this.renderServiceConfigs(serviceConfigs);
     var storedServices = this.get('content.serviceConfigProperties');
     if (storedServices) {
       var configs = new Ember.Set();
@@ -81,7 +102,6 @@ App.WizardStep7Controller = Em.Controlle
           var componentVal = storedServices.findProperty('name', _config.get('name'));
           //if we have config for specified component
           if(componentVal){
-
             //set it
             _config.set('value', componentVal.value)
           }
@@ -92,6 +112,18 @@ App.WizardStep7Controller = Em.Controlle
     }
   },
 
+  loadCustomConfig: function() {
+    var serviceConfigs = this.get('serviceConfigs');
+    this.get('customConfigs').forEach(function(_config){
+      var service = serviceConfigs.findProperty('serviceName',_config.serviceName);
+        if(service) {
+          if(!(service.configs.someProperty('name',_config.name))) {
+            service.configs.pushObject(_config);
+          }
+        }
+    },this);
+  },
+
   /**
    * Render configs for active services
    * @param serviceConfigs
@@ -99,6 +131,8 @@ App.WizardStep7Controller = Em.Controlle
   renderServiceConfigs: function (serviceConfigs) {
     serviceConfigs.forEach(function (_serviceConfig) {
       var serviceConfig = App.ServiceConfig.create({
+        id: _serviceConfig.id,
+        filename: _serviceConfig.filename,
         serviceName: _serviceConfig.serviceName,
         displayName: _serviceConfig.displayName,
         configCategories: _serviceConfig.configCategories,
@@ -129,6 +163,7 @@ App.WizardStep7Controller = Em.Controlle
       var serviceConfigProperty = App.ServiceConfigProperty.create(_serviceConfigProperty);
       serviceConfigProperty.serviceConfig = componentConfig;
       serviceConfigProperty.initialValue();
+
       componentConfig.configs.pushObject(serviceConfigProperty);
       serviceConfigProperty.validate();
     }, this);

Modified: incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/wizard/step8_controller.js
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/wizard/step8_controller.js?rev=1418917&r1=1418916&r2=1418917&view=diff
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/wizard/step8_controller.js (original)
+++ incubator/ambari/branches/AMBARI-666/ambari-web/app/controllers/wizard/step8_controller.js Sun Dec  9 12:43:31 2012
@@ -24,6 +24,9 @@ App.WizardStep8Controller = Em.Controlle
   totalHosts: [],
   clusterInfo: [],
   services: [],
+  configs: [],
+  globals: [],
+  configMapping: require('data/configMapping'),
 
   selectedServices: function () {
     return this.get('content.services').filterProperty('isSelected', true);
@@ -31,16 +34,168 @@ App.WizardStep8Controller = Em.Controlle
 
   clearStep: function () {
     this.get('services').clear();
+    this.get('configs').clear();
+    this.get('globals').clear();
     this.get('clusterInfo').clear();
   },
 
   loadStep: function () {
     console.log("TRACE: Loading step8: Review Page");
     this.clearStep();
+    this.loadGlobals();
+    this.loadConfigs();
+    this.setCustomConfigs();
     this.loadClusterInfo();
     this.loadServices();
   },
 
+  loadGlobals: function () {
+    var globals = this.get('content.serviceConfigProperties').filterProperty('id', 'puppet var');
+    if (globals.someProperty('name', 'hive_database')) {
+      //TODO: Hive host depends on the type of db selected. Change puppet variable name if postgress is not the default db
+      var hiveDb = globals.findProperty('name', 'hive_database');
+      if (hiveDb.value === 'New PostgreSQL Database') {
+        globals.findProperty('name', 'hive_ambari_host').name = 'hive_mysql_host';
+        globals.without(globals.findProperty('name', 'hive_existing_host'));
+        globals.without(globals.findProperty('name', 'hive_existing_database'));
+      } else {
+        globals.findProperty('name', 'hive_existing_host').name = 'hive_mysql_host';
+        globals.without(globals.findProperty('name', 'hive_ambari_host'));
+        globals.without(globals.findProperty('name', 'hive_ambari_database'));
+      }
+    }
+    this.set('globals', globals);
+  },
+
+  loadConfigs: function () {
+    var storedConfigs = this.get('content.serviceConfigProperties').filterProperty('id', 'site property').filterProperty('value');
+    var uiConfigs = this.loadUiSideConfigs();
+    this.set('configs', storedConfigs.concat(uiConfigs));
+  },
+
+  loadUiSideConfigs: function () {
+    var uiConfig = [];
+    var configs = this.get('configMapping').filterProperty('foreignKey', null);
+    configs.forEach(function (_config) {
+      var value = this.getGlobConfigValue(_config.templateName, _config.value);
+      uiConfig.pushObject({
+        "id": "site property",
+        "name": _config.name,
+        "value": value,
+        "filename": _config.filename
+      });
+    }, this);
+    var dependentConfig = this.get('configMapping').filterProperty('foreignKey');
+    dependentConfig.forEach(function (_config) {
+      this.setConfigValue(uiConfig, _config);
+      uiConfig.pushObject({
+        "id": "site property",
+        "name": _config.name,
+        "value": _config.value,
+        "filename": _config.filename
+      });
+    }, this);
+    return uiConfig;
+  },
+  /**
+   * Set all site property that are derived from other puppet-variable
+   */
+
+  getGlobConfigValue: function (templateName, expression) {
+    var express = expression.match(/<(.*?)>/g);
+    var value = expression;
+    express.forEach(function (_express) {
+      //console.log("The value of template is: " + _express);
+      var index = parseInt(_express.match(/\[([\d]*)(?=\])/)[1]);
+      if (this.get('globals').someProperty('name', templateName[index])) {
+        //console.log("The name of the variable is: " + this.get('content.serviceConfigProperties').findProperty('name', templateName[index]).name);
+        var globValue = this.get('globals').findProperty('name', templateName[index]).value;
+        value = value.replace(_express, globValue);
+      } else {
+        /*
+         console.log("ERROR: The variable name is: " + templateName[index]);
+         console.log("ERROR: mapped config from configMapping file has no corresponding variable in " +
+         "content.serviceConfigProperties. Two possible reasons for the error could be: 1) The service is not selected. " +
+         "and/OR 2) The service_config metadata file has no corresponding global var for the site property variable");
+         */
+        value = null;
+      }
+    }, this);
+    return value;
+  },
+  /**
+   * Set all site property that are derived from other site-properties
+   */
+  setConfigValue: function (uiConfig, config) {
+    var fkValue = config.value.match(/<(foreignKey.*?)>/g);
+    if (fkValue) {
+      fkValue.forEach(function (_fkValue) {
+        var index = parseInt(_fkValue.match(/\[([\d]*)(?=\])/)[1]);
+        if (uiConfig.someProperty('name', config.foreignKey[index])) {
+          var globalValue = uiConfig.findProperty('name', config.foreignKey[index]).value;
+          config.value = config.value.replace(_fkValue, globalValue);
+        } else if (this.get('content.serviceConfigProperties').someProperty('name', config.foreignKey[index])) {
+          var globalValue;
+          if (this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).value === '') {
+            globalValue = this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).defaultValue;
+          } else {
+            globalValue = this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).value;
+          }
+          config.value = config.value.replace(_fkValue, globalValue);
+        }
+      }, this);
+    }
+    if (fkValue = config.name.match(/<(foreignKey.*?)>/g)) {
+      fkValue.forEach(function (_fkValue) {
+        var index = parseInt(_fkValue.match(/\[([\d]*)(?=\])/)[1]);
+        if (uiConfig.someProperty('name', config.foreignKey[index])) {
+          var globalValue = uiConfig.findProperty('name', config.foreignKey[index]).value;
+          config.name = config.name.replace(_fkValue, globalValue);
+        } else if (this.get('content.serviceConfigProperties').someProperty('name', config.foreignKey[index])) {
+          var globalValue;
+          if (this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).value === '') {
+            globalValue = this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).defaultValue;
+          } else {
+            globalValue = this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).value;
+          }
+          config.name = config.name.replace(_fkValue, globalValue);
+        }
+      }, this);
+    }
+    //For properties in the configMapping file having foreignKey and templateName properties.
+    var templateValue = config.value.match(/<(templateName.*?)>/g);
+    if (templateValue) {
+      templateValue.forEach(function (_value) {
+        var index = parseInt(_value.match(/\[([\d]*)(?=\])/)[1]);
+        if (this.get('globals').someProperty('name', config.templateName[index])) {
+          var globalValue = this.get('globals').findProperty('name', config.templateName[index]).value;
+          config.value = config.value.replace(_value, globalValue);
+        }
+      }, this);
+    }
+  },
+
+  /**
+   * override site properties with the entered key-value pair in *-site.xml
+   */
+  setCustomConfigs: function () {
+    var site = this.get('content.serviceConfigProperties').filterProperty('id', 'conf-site');
+    site.forEach(function (_site) {
+      var keys = _site.value.match(/[\s]*(\w*)=/g);
+      var configs = [];
+      if (keys) {
+        keys.forEach(function (_key) {
+          _key = _key.trim();
+          console.log("Value of key is: " + _key.substring(0, _key.length - 1));
+          var configKey = _key.substring(0, _key.length - 1);
+          if (configKey) {
+            configs.pushObject({key: configKey});
+          }
+        }, this);
+      }
+    }, this)
+  },
+
   /**
    * Load all info about cluster to <code>clusterInfo</code> variable
    */
@@ -64,7 +219,7 @@ App.WizardStep8Controller = Em.Controlle
     slaveHosts = hostObj.mapProperty('hostname').uniq();
 
     var totalHosts = masterHosts.concat(slaveHosts).uniq();
-    this.set('totalHosts',totalHosts);
+    this.set('totalHosts', totalHosts);
     var totalHostsObj = this.rawContent.findProperty('config_name', 'hosts');
     totalHostsObj.config_value = totalHosts.length;
     this.get('clusterInfo').pushObject(Ember.Object.create(totalHostsObj));
@@ -123,9 +278,6 @@ App.WizardStep8Controller = Em.Controlle
           default:
         }
       }
-      //serviceObj.displayName = tempObj.service_name;
-      //serviceObj.componentNames =  tempObj.service_components;
-
     }, this);
   },
 
@@ -423,8 +575,8 @@ App.WizardStep8Controller = Em.Controlle
   submit: function () {
 
     if (App.testMode) {
-      App.router.send('next');
-      return;
+      // App.router.send('next');
+      //return;
     }
 
     this.createCluster();
@@ -543,13 +695,13 @@ App.WizardStep8Controller = Em.Controlle
     });
   },
 
-  registerHostsToCluster: function() {
-    this.get('totalHosts').forEach(function(_hostname){
+  registerHostsToCluster: function () {
+    this.get('totalHosts').forEach(function (_hostname) {
       this.registerHostToCluster(_hostname);
-    },this);
+    }, this);
   },
 
-  registerHostToCluster: function(hostname) {
+  registerHostToCluster: function (hostname) {
     var clusterName = this.get('clusterInfo').findProperty('config_name', 'cluster').config_value;
     var url = '/api/clusters/' + clusterName + '/hosts/' + hostname;
     $.ajax({
@@ -642,6 +794,7 @@ App.WizardStep8Controller = Em.Controlle
 
   createConfigurations: function () {
     var selectedServices = this.get('selectedServices');
+    this.createConfigSite(this.createGlobalSiteObj());
     this.createConfigSite(this.createCoreSiteObj());
     this.createConfigSite(this.createHdfsSiteObj('HDFS'));
     if (selectedServices.someProperty('serviceName', 'MAPREDUCE')) {
@@ -649,7 +802,7 @@ App.WizardStep8Controller = Em.Controlle
     }
     if (selectedServices.someProperty('serviceName', 'HBASE')) {
       // TODO
-      // this.createConfigSite(this.createHbaseSiteObj('HBASE'));
+      this.createConfigSite(this.createHbaseSiteObj('HBASE'));
     }
     if (selectedServices.someProperty('serviceName', 'HIVE')) {
       // TODO
@@ -686,23 +839,40 @@ App.WizardStep8Controller = Em.Controlle
     console.log("Exiting createConfigSite");
   },
 
+  createGlobalSiteObj: function () {
+    var globalSiteProperties = {};
+    this.get('globals').forEach(function (_globalSiteObj) {
+      globalSiteProperties[_globalSiteObj.name] = _globalSiteObj.value;
+      console.log("STEP8: name of the global property is: " + _globalSiteObj.name);
+      console.log("STEP8: value of the global property is: " + _globalSiteObj.value);
+    }, this);
+    return {"type": "global", "tag": "version1", "properties": globalSiteProperties};
+  },
+
   createCoreSiteObj: function () {
-    return {"type": "core-site", "tag": "version1", "properties": { "fs.default.name": "localhost:8020"}};
+    var coreSiteObj = this.get('configs').filterProperty('filename', 'core-site.xml');
+    var coreSiteProperties = {};
+    coreSiteObj.forEach(function (_coreSiteObj) {
+      coreSiteProperties[_coreSiteObj.name] = _coreSiteObj.value;
+      console.log("STEP*: name of the property is: " + _coreSiteObj.name);
+      console.log("STEP8: value of the property is: " + _coreSiteObj.value);
+    }, this);
+    return {"type": "core-site", "tag": "version1", "properties": coreSiteProperties};
   },
 
   createHdfsSiteObj: function (serviceName) {
-    var configs = App.db.getServiceConfigProperties().filterProperty('serviceName', serviceName);
+    var hdfsSiteObj = this.get('configs').filterProperty('filename', 'hdfs-site.xml');
     var hdfsProperties = {};
-    configs.forEach(function (_configProperty) {
+    hdfsSiteObj.forEach(function (_configProperty) {
       hdfsProperties[_configProperty.name] = _configProperty.value;
+      console.log("STEP*: name of the property is: " + _configProperty.name);
+      console.log("STEP8: value of the property is: " + _configProperty.value);
     }, this);
-    // TODO: Using hardcoded params until meta data API becomes available
-    hdfsProperties = {"dfs.datanode.data.dir.perm": "750"};
     return {"type": "hdfs-site", "tag": "version1", "properties": hdfsProperties };
   },
 
   createMrSiteObj: function (serviceName) {
-    var configs = App.db.getServiceConfigProperties().filterProperty('serviceName', serviceName);
+    var configs = this.get('configs').filterProperty('filename', 'mapred-site.xml');
     var mrProperties = {};
     configs.forEach(function (_configProperty) {
       mrProperties[_configProperty.name] = _configProperty.value;
@@ -717,7 +887,7 @@ App.WizardStep8Controller = Em.Controlle
   },
 
   createHbaseSiteObj: function (serviceName) {
-    var configs = App.db.getServiceConfigProperties().filterProperty('serviceName', serviceName);
+    var configs = this.get('configs').filterProperty('filename', 'hbase-site.xml');
     var hbaseProperties = {};
     configs.forEach(function (_configProperty) {
       hbaseProperties[_configProperty.name] = _configProperty.value;
@@ -726,7 +896,7 @@ App.WizardStep8Controller = Em.Controlle
   },
 
   createHiveSiteObj: function (serviceName) {
-    var configs = App.db.getServiceConfigProperties().filterProperty('serviceName', serviceName);
+    var configs = this.get('configs').filterProperty('filename', 'hive-site.xml');
     var hiveProperties = {};
     configs.forEach(function (_configProperty) {
       hiveProperties[_configProperty.name] = _configProperty.value;
@@ -775,9 +945,11 @@ App.WizardStep8Controller = Em.Controlle
   getConfigForService: function (serviceName) {
     switch (serviceName) {
       case 'HDFS':
-        return {config: {'core-site': 'version1', 'hdfs-site': 'version1'}};
+        return {config: {'global': 'version1', 'core-site': 'version1', 'hdfs-site': 'version1'}};
       case 'MAPREDUCE':
-        return {config: {'core-site': 'version1', 'mapred-site': 'version1'}};
+        return {config: {'global': 'version1', 'core-site': 'version1', 'mapred-site': 'version1'}};
+      case 'HBASE' :
+        return {config: {'global': 'version1', 'core-site': 'version1', 'hbase-site': 'version1'}};
     }
   }
 



Mime
View raw message