incubator-ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jai...@apache.org
Subject [1/4] AMBARI-3568: Clean up unnecessary globals from Ambari Web. (jaimin)
Date Tue, 22 Oct 2013 22:56:56 GMT
Updated Branches:
  refs/heads/trunk 63d563854 -> 88f513259


http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/88f51325/ambari-web/app/data/site_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/site_properties.js b/ambari-web/app/data/site_properties.js
index bc68d25..502bca1 100644
--- a/ambari-web/app/data/site_properties.js
+++ b/ambari-web/app/data/site_properties.js
@@ -18,5 +18,629 @@
 module.exports =
 {
   "configProperties": [
+  /**********************************************HDFS***************************************/
+    {
+      "id": "site property",
+      "name": "fs.checkpoint.dir",
+      "displayName": "SecondaryNameNode Checkpoint directory",
+      "description": "Directory on the local filesystem where the Secondary NameNode should
store the temporary images to merge",
+      "defaultValue": "",
+      "defaultDirectory": "/hadoop/hdfs/namesecondary",
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "serviceName": "HDFS",
+      "category": "SNameNode",
+      "index": 1
+    },
+    {
+      "id": "site property",
+      "name": "fs.checkpoint.period",
+      "displayName": "HDFS Maximum Checkpoint Delay",
+      "description": "Maximum delay between two consecutive checkpoints for HDFS",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "seconds",
+      "isVisible": true,
+      "category": "General",
+      "serviceName": "HDFS",
+      "index": 3
+    },
+    {
+      "id": "site property",
+      "name": "fs.checkpoint.size",
+      "displayName": "HDFS Maximum Edit Log Size for Checkpointing",
+      "description": "Maximum size of the edits log file that forces an urgent checkpoint
even if the maximum checkpoint delay is not reached",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "bytes",
+      "isVisible": true,
+      "serviceName": "HDFS",
+      "category": "General",
+      "index": 4
+    },
+    {
+      "id": "site property",
+      "name": "dfs.name.dir",
+      "displayName": "NameNode directories",
+      "description": "NameNode directories for HDFS to store the file system image",
+      "defaultValue": "",
+      "defaultDirectory": "/hadoop/hdfs/namenode",
+      "displayType": "directories",
+      "isOverridable": false,
+      "isVisible": true,
+      "serviceName": "HDFS",
+      "category": "NameNode",
+      "index": 1
+    },
+    {
+      "id": "site property",
+      "name": "dfs.webhdfs.enabled",
+      "displayName": "WebHDFS enabled",
+      "description": "Whether to enable WebHDFS feature",
+      "defaultValue": "",
+      "displayType": "checkbox",
+      "isOverridable": false,
+      "category": "General",
+      "serviceName": "HDFS",
+      "index": 0
+    },
+
+    {
+      "id": "site property",
+      "name": "dfs.datanode.failed.volumes.tolerated",
+      "displayName": "DataNode volumes failure toleration",
+      "description": "The number of volumes that are allowed to fail before a DataNode stops
offering service",
+      "defaultValue": "",
+      "displayType": "int",
+      "isVisible": true,
+      "category": "DataNode",
+      "serviceName": "HDFS",
+      "index": 3
+    },
+    {
+      "id": "site property",
+      "name": "dfs.data.dir",
+      "displayName": "DataNode directories",
+      "description": "DataNode directories for HDFS to store the data blocks",
+      "defaultValue": "",
+      "defaultDirectory": "/hadoop/hdfs/data",
+      "displayType": "directories",
+      "isVisible": true,
+      "category": "DataNode",
+      "serviceName": "HDFS",
+      "index": 1
+    },
+    {
+      "id": "site property",
+      "name": "dfs.datanode.data.dir.perm",
+      "displayName": "DataNode directories permission",
+      "description": "",
+      "defaultValue": "",
+      "displayType": "int",
+      "isVisible": false,
+      "category": "DataNode",
+      "serviceName": "HDFS"
+    },
+    {
+      "id": "site property",
+      "name": "dfs.replication",
+      "displayName": "Block replication",
+      "description": "Default block replication.",
+      "displayType": "int",
+      "defaultValue": "",
+      "isVisible": true,
+      "category": "Advanced",
+      "serviceName": "HDFS"
+    },
+    {
+      "id": "site property",
+      "name": "dfs.datanode.du.reserved",
+      "displayName": "Reserved space for HDFS",
+      "description": "Reserved space in bytes per volume. Always leave this much space free
for non dfs use.",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "bytes",
+      "isVisible": true,
+      "category": "General",
+      "serviceName": "HDFS",
+      "index": 2
+    },
+
+  /******************************************MAPREDUCE***************************************/
+    {
+      "id": "site property",
+      "name": "mapred.local.dir",
+      "displayName": "MapReduce local directories",
+      "description": "Directories for MapReduce to store intermediate data files",
+      "defaultValue": "",
+      "defaultDirectory": "/hadoop/mapred",
+      "displayType": "directories",
+      "serviceName": "MAPREDUCE",
+      "category": "TaskTracker",
+      "index": 1
+    },
+    {
+      "id": "site property",
+      "name": "mapred.jobtracker.taskScheduler",
+      "displayName": "MapReduce Capacity Scheduler",
+      "description": "The scheduler to use for scheduling of MapReduce jobs",
+      "defaultValue": "",
+      "displayType": "advanced",
+      "isOverridable": false,
+      "serviceName": "MAPREDUCE",
+      "index": 0
+    },
+    {
+      "id": "site property",
+      "name": "mapred.tasktracker.map.tasks.maximum",
+      "displayName": "Number of Map slots per node",
+      "description": "Number of slots that Map tasks that run simultaneously can occupy on
a TaskTracker",
+      "defaultValue": "",
+      "displayType": "int",
+      "serviceName": "MAPREDUCE",
+      "category": "TaskTracker",
+      "index": 2
+    },
+    {
+      "id": "site property",
+      "name": "mapred.tasktracker.reduce.tasks.maximum",
+      "displayName": "Number of Reduce slots per node",
+      "description": "Number of slots that Reduce tasks that run simultaneously can occupy
on a TaskTracker.",
+      "defaultValue": "",
+      "displayType": "int",
+      "serviceName": "MAPREDUCE",
+      "category": "TaskTracker",
+      "index": 3
+    },
+    {
+      "id": "site property",
+      "name": "mapred.cluster.reduce.memory.mb",
+      "displayName": "Cluster's Reduce slot size (virtual memory)",
+      "description": "The virtual memory size of a single Reduce slot in the MapReduce framework",
+      "defaultValue": "2048",
+      "displayType": "int",
+      "unit": "MB",
+      "serviceName": "MAPREDUCE",
+      "index": 2
+    },
+    {
+      "id": "site property",
+      "name": "mapred.job.map.memory.mb",
+      "displayName": "Default virtual memory for a job's map-task",
+      "description": "Virtual memory for single Map task",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "MB",
+      "serviceName": "MAPREDUCE",
+      "index": 5
+    },
+    {
+      "id": "site property",
+      "name": "mapred.cluster.max.map.memory.mb",
+      "displayName": "Upper limit on virtual memory for single Map task",
+      "description": "Upper limit on virtual memory size for a single Map task of any MapReduce
job",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "MB",
+      "serviceName": "MAPREDUCE",
+      "index": 3
+    },
+    {
+      "id": "site property",
+      "name": "mapred.cluster.max.reduce.memory.mb",
+      "displayName": "Upper limit on virtual memory for single Reduce task",
+      "description": "Upper limit on virtual memory size for a single Reduce task of any
MapReduce job",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "MB",
+      "serviceName": "MAPREDUCE",
+      "index": 4
+    },
+    {
+      "id": "site property",
+      "name": "mapred.job.reduce.memory.mb",
+      "displayName": "Default virtual memory for a job's reduce-task",
+      "description": "Virtual memory for single Reduce task",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "MB",
+      "serviceName": "MAPREDUCE",
+      "index": 6
+    },
+    {
+      "id": "site property",
+      "name": "mapred.cluster.map.memory.mb",
+      "displayName": "Cluster's Map slot size (virtual memory)",
+      "description": "The virtual memory size of a single Map slot in the MapReduce framework",
+      "defaultValue": "1536",
+      "displayType": "int",
+      "unit": "MB",
+      "serviceName": "MAPREDUCE",
+      "index": 1
+    },
+    {
+      "id": "site property",
+      "name": "mapred.healthChecker.script.path",
+      "displayName": "Job Status directory",
+      "description": "Directory path to view job status",
+      "defaultValue": "",
+      "displayType": "advanced",
+      "serviceName": "MAPREDUCE",
+      "category": "Advanced"
+    },
+    {
+      "id": "site property",
+      "name": "io.sort.mb",
+      "displayName": "Map-side sort buffer memory",
+      "description": "The total amount of Map-side buffer memory to use while sorting files
(Expert-only configuration)",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "MB",
+      "serviceName": "MAPREDUCE",
+      "index": 7
+    },
+    {
+      "id": "site property",
+      "name": "io.sort.spill.percent",
+      "displayName": "Limit on buffer",
+      "description": "Percentage of sort buffer used for record collection",
+      "defaultValue": "",
+      "displayType": "float",
+      "serviceName": "MAPREDUCE",
+      "index": 8
+    },
+    {
+      "id": "site property",
+      "name": "mapred.system.dir",
+      "displayName": "MapReduce system directories",
+      "description": "Path on the HDFS where where the MapReduce framework stores system
files",
+      "defaultValue": "/mapred/system",
+      "displayType": "directories",
+      "serviceName": "MAPREDUCE",
+      "category": "Advanced"
+    },
+    {
+      "id": "site property",
+      "name": "mapred.userlog.retain.hours",
+      "displayName": "Job log retention (hours)",
+      "description": "The maximum time, in hours, for which the user-logs are to be retained
after the job completion.",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "hours",
+      "serviceName": "MAPREDUCE",
+      "index": 9
+    },
+    {
+      "id": "site property",
+      "name": "mapred.jobtracker.maxtasks.per.job",
+      "displayName": "Maximum number tasks for a Job",
+      "description": "Maximum number of tasks for a single Job",
+      "defaultValue": "",
+      "displayType": "int",
+      "serviceName": "MAPREDUCE",
+      "index": 10
+    },
+
+  /**********************************************oozie-site***************************************/
+    {
+      "id": "site property",
+      "name": "oozie.db.schema.name",
+      "displayName": "Database Name",
+      "description": "Database name used for the Oozie",
+      "defaultValue": "",
+      "isOverridable": false,
+      "displayType": "host",
+      "isVisible": true,
+      "isObserved": true,
+      "category": "Oozie Server",
+      "serviceName": "OOZIE",
+      "index": 3
+    },
+    {
+      "id": "site property",
+      "name": "oozie.service.JPAService.jdbc.username",
+      "displayName": "Database Username",
+      "description": "Database user name to use to connect to the database",
+      "defaultValue": "",
+      "isOverridable": false,
+      "displayType": "host",
+      "category": "Oozie Server",
+      "serviceName": "OOZIE",
+      "index": 4
+    },
+    {
+      "id": "site property",
+      "name": "oozie.service.JPAService.jdbc.password",
+      "displayName": "Database Password",
+      "description": "Database password to use to connect to the database",
+      "defaultValue": "",
+      "isOverridable": false,
+      "displayType": "password",
+      "isVisible": true,
+      "category": "Oozie Server",
+      "serviceName": "OOZIE",
+      "index": 5
+    },
+    {
+      "id": "site property",
+      "name": "oozie.service.JPAService.jdbc.driver",
+      "displayName": "JDBC driver class",
+      "defaultValue": "",
+      "value": "",     // the value is overwritten in code
+      "isVisible": false,
+      "description": "Database name used for the Oozie",
+      "category": "Advanced",
+      "serviceName": "OOZIE"
+    },
+    {
+      "id": "site property",
+      "name": "oozie.service.JPAService.jdbc.url",
+      "displayName": "Database URL",
+      "description": "The JDBC connection URL to the database",
+      "defaultValue": "",
+      "isOverridable": false,
+      "displayType": "advanced",
+      "category": "Oozie Server",
+      "serviceName": "OOZIE",
+      "index": 6
+    },
+
+  /**********************************************hive-site***************************************/
+    {
+      "id": "site property",
+      "name": "javax.jdo.option.ConnectionDriverName",
+      "displayName": "JDBC driver class",
+      "defaultValue": "",
+      "value": "",     // the value is overwritten in code
+      "isVisible": false,
+      "description": "Driver class name for a JDBC metastore",
+      "category": "Advanced",
+      "serviceName": "HIVE"
+    },
+    {
+      "id": "site property",
+      "name": "javax.jdo.option.ConnectionUserName",
+      "displayName": "Database Username",
+      "description": "Database user name to use to connect to the database",
+      "defaultValue": "hive",
+      "displayType": "host",
+      "isOverridable": false,
+      "category": "Hive Metastore",
+      "serviceName": "HIVE",
+      "index": 5
+    },
+    {
+      "id": "site property",
+      "name": "javax.jdo.option.ConnectionPassword",
+      "displayName": "Database Password",
+      "description": "Database password to use to connect to the metastore database",
+      "defaultValue": "",
+      "displayType": "password",
+      "isOverridable": false,
+      "isVisible": true,
+      "category": "Hive Metastore",
+      "serviceName": "HIVE",
+      "index": 6
+    },
+    {
+      "id": "site property",
+      "name": "javax.jdo.option.ConnectionURL",
+      "displayName": "Database URL",
+      "value": "",
+      "defaultValue": "", // set to a 'jdbc' to not include this in initial error count
+      "description": "The JDBC connection URL to the database",
+      "displayType": "advanced",
+      "isOverridable": false,
+      "isVisible": true,
+      "category": "Hive Metastore",
+      "serviceName": "HIVE",
+      "index": 7
+    },
+
+  /**********************************************hbase-site***************************************/
+    {
+      "id": "site property",
+      "name": "hbase.tmp.dir",
+      "displayName": "HBase local directory",
+      "description": "Temporary directory on the local filesystem",
+      "defaultDirectory": "/hadoop/hbase",
+      "defaultValue": "",
+      "displayType": "directory",
+      "isVisible": true,
+      "category": "Advanced",
+      "serviceName": "HBASE"
+
+    },
+    {
+      "id": "site property",
+      "name": "hbase.regionserver.global.memstore.upperLimit",
+      "displayName": "hbase.regionserver.global.memstore.upperLimit",
+      "description": "",
+      "defaultValue": "",
+      "displayType": "float",
+      "category": "Advanced",
+      "serviceName": "HBASE"
+    },
+    {
+      "id": "site property",
+      "name": "hbase.regionserver.global.memstore.lowerLimit",
+      "displayName": "hbase.regionserver.global.memstore.lowerLimit",
+      "defaultValue": "",
+      "description": "When memstores are being forced to flush to make room in\
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.\
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes\
+      the minimum possible flushing to occur when updates are blocked due to\
+      memstore limiting.",
+      "displayType": "float",
+      "category": "Advanced",
+      "serviceName": "HBASE"
+    },
+    {
+      "id": "site property",
+      "name": "hbase.hstore.blockingStoreFiles",
+      "displayName": "hstore blocking storefiles",
+      "description": "If more than this number of StoreFiles in any one Store (one StoreFile
is written per flush of " +
+        "MemStore) then updates are blocked for this HRegion until a compaction is completed,
or until " +
+        "hbase.hstore.blockingWaitTime has been exceeded.",
+      "defaultValue": "",
+      "isRequired": true,
+      "displayType": "int",
+      "category": "Advanced",
+      "serviceName": "HBASE"
+    },
+    {
+      "id": "site property",
+      "name": "hbase.hstore.compactionThreshold",
+      "displayName": "HBase HStore compaction threshold",
+      "description": "If more than this number of HStoreFiles in any one HStore then a compaction
is run to rewrite all HStoreFiles files as one.",
+      "defaultValue": "3",
+      "displayType": "int",
+      "category": "General",
+      "serviceName": "HBASE",
+      "index": 0
+    },
+    {
+      "id": "site property",
+      "name": "hfile.block.cache.size",
+      "displayName": "HFile block cache size ",
+      "description": "Percentage of maximum heap (-Xmx setting) to allocate to block cache
used by HFile/StoreFile. Set to 0 to disable but it's not recommended.",
+      "defaultValue": "0.40",
+      "displayType": "float",
+      "category": "General",
+      "serviceName": "HBASE",
+      "index": 1
+    },
+    {
+      "id": "site property",
+      "name": "hbase.hregion.max.filesize",
+      "displayName": "Maximum HStoreFile Size",
+      "description": "If any one of a column families' HStoreFiles has grown to exceed this
value, the hosting HRegion is split in two.",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "bytes",
+      "isVisible": true,
+      "category": "General",
+      "serviceName": "HBASE",
+      "index": 2
+    },
+    {
+      "id": "site property",
+      "name": "hbase.regionserver.handler.count",
+      "displayName": "HBase RegionServer Handler",
+      "description": "Count of RPC Listener instances spun up on RegionServers",
+      "defaultValue": "60",
+      "displayType": "int",
+      "category": "RegionServer",
+      "serviceName": "HBASE",
+      "index": 2
+    },
+    {
+      "id": "site property",
+      "name": "hbase.hregion.majorcompaction",
+      "displayName": "HBase Region Major Compaction",
+      "description": "The time between major compactions of all HStoreFiles in a region.
Set to 0 to disable automated major compactions.",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "ms",
+      "isVisible": true,
+      "category": "RegionServer",
+      "serviceName": "HBASE",
+      "index": 3
+    },
+    {
+      "id": "site property",
+      "name": "hbase.hregion.memstore.block.multiplier",
+      "displayName": "HBase Region Block Multiplier",
+      "description": "Block updates if memstore has \"Multiplier * HBase Region Memstore
Flush Size\" bytes. Useful preventing runaway memstore during spikes in update traffic",
+      "defaultValue": "",
+      "displayType": "int",
+      "category": "RegionServer",
+      "serviceName": "HBASE",
+      "index": 4
+    },
+    {
+      "id": "site property",
+      "name": "hbase.hregion.memstore.mslab.enabled",
+      "displayName": "hbase.hregion.memstore.mslab.enabled",
+      "description": "Enables the MemStore-Local Allocation Buffer,\
+      a feature which works to prevent heap fragmentation under\
+      heavy write loads. This can reduce the frequency of stop-the-world\
+      GC pauses on large heaps.",
+      "defaultValue": "",
+      "displayType": "checkbox",
+      "category": "Advanced",
+      "serviceName": "HBASE"
+    },
+    {
+      "id": "site property",
+      "name": "hbase.hregion.memstore.flush.size",
+      "displayName": "HBase Region Memstore Flush Size",
+      "description": "Memstore will be flushed to disk if size of the memstore exceeds this
number of bytes.",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "bytes",
+      "category": "RegionServer",
+      "serviceName": "HBASE",
+      "index": 5
+    },
+    {
+      "id": "site property",
+      "name": "hbase.client.scanner.caching",
+      "displayName": "HBase Client Scanner Caching",
+      "description": "Number of rows that will be fetched when calling next on a scanner
if it is not served from \
+      (local, client) memory. Do not set this value such that the time between invocations
is greater than the scanner timeout",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "rows",
+      "isVisible": true,
+      "category": "General",
+      "serviceName": "HBASE",
+      "index": 3
+    },
+    {
+      "id": "site property",
+      "name": "zookeeper.session.timeout",
+      "displayName": "Zookeeper timeout for HBase Session",
+      "description": "HBase passes this to the zk quorum as suggested maximum time for a
session",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "ms",
+      "category": "General",
+      "serviceName": "HBASE",
+      "index": 4
+    },
+    {
+      "id": "site property",
+      "name": "hbase.client.keyvalue.maxsize",
+      "displayName": "HBase Client Maximum key-value Size",
+      "description": "Specifies the combined maximum allowed size of a KeyValue instance.
It should be set to a fraction of the maximum region size.",
+      "defaultValue": "",
+      "displayType": "int",
+      "unit": "bytes",
+      "category": "General",
+      "serviceName": "HBASE",
+      "index": 5
+    },
+    {
+      "id": "site property",
+      "name": "dfs.client.read.shortcircuit",
+      "displayName": "HDFS Short-circuit read",
+      "description": "This configuration parameter turns on short-circuit local reads.",
+      "defaultValue": "",
+      "displayType": "checkbox",
+      "category": "Advanced",
+      "serviceName": "HBASE"
+    },
+    {
+      "id": "site property",
+      "name": "dfs.support.append",
+      "displayName": "HDFS append support",
+      "description": "HDFS append support",
+      "defaultValue": "",
+      "displayType": "checkbox",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    }
   ]
 };

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/88f51325/ambari-web/app/models/service_config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/service_config.js b/ambari-web/app/models/service_config.js
index e606bd9..ae7df60 100644
--- a/ambari-web/app/models/service_config.js
+++ b/ambari-web/app/models/service_config.js
@@ -287,21 +287,20 @@ App.ServiceConfigProperty = Ember.Object.extend({
       case 'zookeeperserver_hosts':
         this.set('value', masterComponentHostsInDB.filterProperty('component', 'ZOOKEEPER_SERVER').mapProperty('hostName'));
         break;
-      case 'dfs_name_dir':
-      case 'dfs_namenode_name_dir':
-      case 'dfs_data_dir':
-      case 'dfs_datanode_data_dir':
+      case 'dfs.name.dir':
+      case 'dfs.namenode.name.dir':
+      case 'dfs.data.dir':
+      case 'dfs.datanode.data.dir':
       case 'yarn.nodemanager.local-dirs':
       case 'yarn.nodemanager.log-dirs':
-      case 'mapred_local_dir':
-      case 'mapreduce_cluster_local_dir':
+      case 'mapred.local.dir':
         this.unionAllMountPoints(!isOnlyFirstOneNeeded, localDB);
         break;
-      case 'fs_checkpoint_dir':
-      case 'dfs_namenode_checkpoint_dir':
+      case 'fs.checkpoint.dir':
+      case 'dfs.namenode.checkpoint.dir':
       case 'zk_data_dir':
       case 'oozie_data_dir':
-      case 'hbase_tmp_dir':
+      case 'hbase.tmp.dir':
         this.unionAllMountPoints(isOnlyFirstOneNeeded, localDB);
         break;
     }
@@ -329,29 +328,28 @@ App.ServiceConfigProperty = Ember.Object.extend({
     var temp = '';
     var setOfHostNames = [];
     switch (this.get('name')) {
-      case 'dfs_namenode_name_dir':
-      case 'dfs_name_dir':
+      case 'dfs.namenode.name.dir':
+      case 'dfs.name.dir':
         var components = masterComponentHostsInDB.filterProperty('component', 'NAMENODE');
         components.forEach(function (component) {
           setOfHostNames.push(component.hostName);
         }, this);
         break;
-      case 'fs_checkpoint_dir':
-      case 'dfs_namenode_checkpoint_dir':
+      case 'fs.checkpoint.dir':
+      case 'dfs.namenode.checkpoint.dir':
         var components = masterComponentHostsInDB.filterProperty('component', 'SECONDARY_NAMENODE');
         components.forEach(function (component) {
           setOfHostNames.push(component.hostName);
         }, this);
         break;
-      case 'dfs_data_dir':
-      case 'dfs_datanode_data_dir':
+      case 'dfs.data.dir':
+      case 'dfs.datanode.data.dir':
         temp = slaveComponentHostsInDB.findProperty('componentName', 'DATANODE');
         temp.hosts.forEach(function (host) {
           setOfHostNames.push(host.hostName);
         }, this);
         break;
-      case 'mapred_local_dir':
-      case 'mapreduce_cluster_local_dir':
+      case 'mapred.local.dir':
         temp = slaveComponentHostsInDB.findProperty('componentName', 'TASKTRACKER') || slaveComponentHostsInDB.findProperty('componentName',
'NODEMANAGER');
         temp.hosts.forEach(function (host) {
           setOfHostNames.push(host.hostName);
@@ -376,7 +374,7 @@ App.ServiceConfigProperty = Ember.Object.extend({
           setOfHostNames.push(component.hostName);
         }, this);
         break;
-      case 'hbase_tmp_dir':
+      case 'hbase.tmp.dir':
         var temp = slaveComponentHostsInDB.findProperty('componentName', 'HBASE_REGIONSERVER');
         temp.hosts.forEach(function (host) {
           setOfHostNames.push(host.hostName);
@@ -571,7 +569,7 @@ App.ServiceConfigProperty = Ember.Object.extend({
           }
           break;
         case 'advanced':
-          if(this.get('name')=='hive_jdbc_connection_url' || this.get('name')=='oozie_jdbc_connection_url')
{
+          if(this.get('name')=='javax.jdo.option.ConnectionURL' || this.get('name')=='oozie.service.JPAService.jdbc.url')
{
             if (validator.isNotTrimmed(value)) {
               this.set('errorMessage', Em.I18n.t('host.trimspacesValidation'));
               isError = true;

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/88f51325/ambari-web/app/utils/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/config.js b/ambari-web/app/utils/config.js
index 731a988..47c2150 100644
--- a/ambari-web/app/utils/config.js
+++ b/ambari-web/app/utils/config.js
@@ -985,7 +985,7 @@ App.config = Em.Object.create({
       case 'password':
         break;
       case 'advanced':
-        if(name == 'hive_jdbc_connection_url' || name == 'oozie_jdbc_connection_url') {
+        if(name == 'javax.jdo.option.ConnectionURL' || name == 'oozie.service.JPAService.jdbc.url')
{
           rez = value.trim();
         }
       default:

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/88f51325/ambari-web/app/views/common/configs/services_config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/services_config.js b/ambari-web/app/views/common/configs/services_config.js
index c49e38c..cbda51c 100644
--- a/ambari-web/app/views/common/configs/services_config.js
+++ b/ambari-web/app/views/common/configs/services_config.js
@@ -129,14 +129,14 @@ App.ServiceConfigsByCategoryView = Ember.View.extend({
         }
       } else if (changedProperty.get("name") == "hbase_user" && !App.get('isHadoop2Stack'))
{
         curConfigs = stepConfigs.findProperty("serviceName", "HDFS").get("configs");
-        if (newValue != curConfigs.findProperty("name", "dfs_block_local_path_access_user").get("value"))
{
+        if (newValue != curConfigs.findProperty("name", "dfs.block.local-path-access.user").get("value"))
{
           this.affectedProperties.push(
             {
               serviceName: "HDFS",
-              propertyName: "dfs_block_local_path_access_user",
+              propertyName: "dfs.block.local-path-access.user",
               propertyDisplayName: "dfs.block.local-path-access.user",
               newValue: newValue,
-              curValue: curConfigs.findProperty("name", "dfs_block_local_path_access_user").get("value"),
+              curValue: curConfigs.findProperty("name", "dfs.block.local-path-access.user").get("value"),
               changedPropertyName: "hbase_user"
             }
           );

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/88f51325/ambari-web/app/views/wizard/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/wizard/controls_view.js b/ambari-web/app/views/wizard/controls_view.js
index c9f542d..de0ec4f 100644
--- a/ambari-web/app/views/wizard/controls_view.js
+++ b/ambari-web/app/views/wizard/controls_view.js
@@ -250,7 +250,7 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
       case 'HIVE':
         return this.get('categoryConfigsAll').findProperty('name', 'hive_database_name').get('value');
       case 'OOZIE':
-        return this.get('categoryConfigsAll').findProperty('name', 'oozie_database_name').get('value');
+        return this.get('categoryConfigsAll').findProperty('name', 'oozie.db.schema.name').get('value');
       default:
         return null;
     }
@@ -283,9 +283,9 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
 
   connectionUrl: function () {
     if (this.get('serviceConfig.serviceName') === 'HIVE') {
-      return this.get('categoryConfigsAll').findProperty('name', 'hive_jdbc_connection_url');
+      return this.get('categoryConfigsAll').findProperty('name', 'javax.jdo.option.ConnectionURL');
     } else {
-      return this.get('categoryConfigsAll').findProperty('name', 'oozie_jdbc_connection_url');
+      return this.get('categoryConfigsAll').findProperty('name', 'oozie.service.JPAService.jdbc.url');
     }
   }.property('serviceConfig.serviceName'),
 


Mime
View raw message