hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cdoug...@apache.org
Subject svn commit: r723855 [3/23] - in /hadoop/core/trunk: ./ src/contrib/ src/contrib/chukwa/ src/contrib/chukwa/bin/ src/contrib/chukwa/conf/ src/contrib/chukwa/docs/ src/contrib/chukwa/docs/paper/ src/contrib/chukwa/hadoop-packaging/ src/contrib/chukwa/lib...
Date Fri, 05 Dec 2008 20:30:21 GMT
Modified: hadoop/core/trunk/src/contrib/chukwa/conf/mdl.xml.template
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/conf/mdl.xml.template?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/conf/mdl.xml.template (original)
+++ hadoop/core/trunk/src/contrib/chukwa/conf/mdl.xml.template Fri Dec  5 12:30:14 2008
@@ -4,123 +4,129 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
+<!-- database tables -->
 
-<!--- collector properties -->
 <property>
-  <name>chukwa.engine.dsDirectory.rootFolder</name>
-  <value>hdfs://localhost:9000/chukwa/</value>
-  <description>root folder for Chukwa HDFS storage</description>
+  <name>report.db.name.nodeactivity</name>
+  <value>node_activity</value>
+  <description></description>
+</property>
+  
+<property>
+  <name>report.db.primary.key.nodeactivity</name>
+  <value>timestamp</value>
 </property>
 
-<!--- database properties -->
 <property>
-  <name>jdbc.host</name>
-  <value>localhost:3306</value>
-  <description>mysql server hostname and port</description>
+  <name>metric.nodeactivity.down</name>
+  <value>down</value>
 </property>
 
 <property>
-  <name>jdbc.user</name>
-  <value>gmetrics</value>
-  <description> mysql user name</description>
+  <name>metric.nodeactivity.downmachines</name>
+  <value>downMachines</value>
 </property>
 
 <property>
-  <name>jdbc.password</name>
-  <value>gmetrics</value>
-  <description>mysql password</description>
+  <name>metric.nodeactivity.free</name>
+  <value>free</value>
 </property>
 
 <property>
-  <name>jdbc.db</name>
-  <value>flubbergold</value>
-  <description>name of the database</description>
+  <name>metric.nodeactivity.freemachines</name>
+  <value>freeMachines</value>
 </property>
 
-<!-- Torque Data Loader Config -->
 <property>
-  <name>torque.server</name>
-  <value>hostname</value>
+  <name>metric.nodeactivity.used</name>
+  <value>used</value>
 </property>
 
 <property>
-  <name>torque.binDir</name>
-  <value>/grid/0/torque/current/bin</value>
+  <name>metric.nodeactivity.usedmachines</name>
+  <value>usedMachines</value>
 </property>
 
-<!-- Log Data Loader Config -->
+
 <property>
-  <name>job.log.dfs.config</name>
-  <value>/grid/0/hadoop/conf/hadoop-site.xml</value>
+  <name>report.db.name.hod_job_digest</name>
+  <value>hod_job_digest</value>
 </property>
 
 <property>
-  <name>job.log.dfs.log.suffix</name>
-  <value>host.example.com</value>
+  <name>report.db.name.cluster_system_metrics</name>
+  <value>cluster_system_metrics</value>
 </property>
 
 <property>
-  <name>job.log.dfs.path</name>
-  <value>/user</value>
+  <name>report.db.name.systemmetrics</name>
+  <value>system_metrics</value>
+  <description></description>
 </property>
 
 <property>
-  <name>job.log.dfs.path.append.uid</name>
-  <value>on</value>
+  <name>report.db.name.df</name>
+  <value>disk</value>
 </property>
 
-<!--- NodeActivity properties -->
 <property>
-  <name>mdl.plugin.NodeActivityPlugin.cmde</name>
-  <value>/grid/0/torque/current/bin/pbsnodes </value>
-  <description>Command to retrieve the node activity raw data used/free/down</description>
+  <name>report.db.name.cluster_disk</name>
+  <value>cluster_disk</value>
 </property>
 
-<!-- database tables -->
 <property>
-  <name>report.db.name.disks.perdisk</name>
-  <value>SimonData</value>
-  <description></description>
+  <name>report.db.name.hadoop_dfs_namenode</name>
+  <value>dfs_namenode</value>
 </property>
 
 <property>
-  <name>report.db.name.systemstate.pernode</name>
-  <value>SimonData</value>
+  <name>report.db.name.hadoop_dfs_datanode</name>
+  <value>dfs_datanode</value>
 </property>
 
 <property>
-  <name>report.db.name.systemstate.percluster</name>
-  <value>cluster_systemstate</value>
+  <name>report.db.name.hadoop_dfs_throughput</name>
+  <value>dfs_throughput</value>
 </property>
 
 <property>
-  <name>report.db.name.jvm.tmp</name>
-  <value>JVM</value>
+  <name>report.db.name.hadoop_dfs_fsnamesystem</name>
+  <value>dfs_fsnamesystem</value>
 </property>
 
 <property>
-  <name>report.db.name.dfs.hdfs throughput</name>
-  <value>dfs_throughput</value>
+  <name>report.db.name.hadoop_dfs_fsdirectory</name>
+  <value>dfs_namenode</value>
 </property>
 
 <property>
-  <name>report.db.name.dfs.individual datanode throughput</name>
-  <value>dfs_individual</value>
+  <name>report.db.name.hadoop_jvm_metrics</name>
+  <value>hadoop_jvm</value>
 </property>
 
 <property>
-  <name>report.db.name.dfs.namenode operations</name>
-  <value>dfs_namenode</value>
+  <name>report.db.name.hadoop_mapred_jobtracker</name>
+  <value>hadoop_mapred</value>
+</property>
+
+<property>
+  <name>report.db.name.hadoop_rpc_metrics</name>
+  <value>hadoop_rpc</value>
+</property>
+
+<property>
+  <name>report.db.name.cluster_hadoop_rpc</name>
+  <value>cluster_hadoop_rpc</value>
 </property>
 
 <property>
-  <name>report.db.name.dfs.fsnamesystem status</name>
-  <value>dfs_fsnamesystem_status</value>
+  <name>report.db.name.mssrgraph</name>
+  <value>mssrgraph</value>
 </property>
 
 <property>
-  <name>report.db.name.rpcmetrics.rpc metrics</name>
-  <value>rpc_metrics</value>
+  <name>report.db.name.mrjobcounters</name>
+  <value>MRJobCounters</value>
 </property>
 
 <property>
@@ -130,7 +136,7 @@
 
 <property>
   <name>report.db.name.hodmachine</name>
-  <value>HodMachine</value>
+  <value>hod_machine</value>
 </property>
 
 <property>
@@ -163,935 +169,1095 @@
   <value>MRJobCounters</value>
 </property>
 
-<!-- Simon Data Loader Config -->
 <property>
-  <name>normalize.disks.perdisk.device</name>
-  <value>1</value>
+  <name>report.db.name.user_util</name>
+  <value>user_util</value>
 </property>
 
+<!-- System Metrics Config -->
 <property>
-  <name>report.db.primary.key.systemstate.percluster</name>
+  <name>report.db.primary.key.systemmetrics</name>
   <value>timestamp</value>
 </property>
 
 <property>
-  <name>report.db.primary.key.disks.perdisk</name>
-  <value>timestamp</value>
+  <name>metric.systemmetrics.csource</name>
+  <value>host</value>
 </property>
 
 <property>
-  <name>report.db.primary.key.systemstate.pernode</name>
-  <value>timestamp</value>
+  <name>metric.systemmetrics.ldavg-1</name>
+  <value>load_1</value>
 </property>
 
 <property>
-  <name>report.db.primary.key.dfs.hdfs throughput</name>
-  <value>timestamp</value>
+  <name>metric.systemmetrics.ldavg-5</name>
+  <value>load_5</value>
 </property>
 
 <property>
-  <name>report.db.primary.key.dfs.individual datanode throughput</name>
-  <value>timestamp</value>
+  <name>metric.systemmetrics.ldavg-15</name>
+  <value>load_15</value>
 </property>
 
 <property>
-  <name>report.db.primary.key.dfs.namenode operations</name>
-  <value>timestamp</value>
+  <name>metric.systemmetrics.tasks_total</name>
+  <value>task_total</value>
 </property>
-
 <property>
-  <name>report.db.primary.key.dfs.fsnamesystem status</name>
-  <value>timestamp</value>
+  <name>metric.systemmetrics.tasks_running</name>
+  <value>task_running</value>
 </property>
 
 <property>
-  <name>report.db.primary.key.rpcmetrics.rpc metrics</name>
-  <value>timestamp</value>
+  <name>metric.systemmetrics.tasks_sleeping</name>
+  <value>task_sleep</value>
 </property>
 
 <property>
-  <name>metric.disks.perdisk.node</name>
-  <value>machine</value>
+  <name>metric.systemmetrics.tasks_stopped</name>
+  <value>task_stopped</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node</name>
-  <value>machine</value>
+  <name>metric.systemmetrics.tasks_zombie</name>
+  <value>task_zombie</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.cpu_busy%</name>
-  <value>CPUBusy</value>
+  <name>metric.systemmetrics.mem_total</name>
+  <value>mem_total</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.mem_free%</name>
-  <value>FreeMemPercentage</value>
+  <name>metric.systemmetrics.mem_buffers</name>
+  <value>mem_buffers</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.cpu_idle%</name>
-  <value>CPUIdle</value>
+  <name>metric.systemmetrics.mem_free</name>
+  <value>mem_free</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.cpu_nice%</name>
-  <value>CPUNice</value>
+  <name>metric.systemmetrics.mem_used</name>
+  <value>mem_used</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.cpu_system%</name>
-  <value>CPUSystem</value>
+  <name>metric.systemmetrics.mem_shared</name>
+  <value>mem_shared</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.cpu_user%</name>
-  <value>CPUUser</value>
+  <name>metric.systemmetrics.kbcached</name>
+  <value>mem_cached</value>
 </property>
 
-<property>V
-  <name>metric.systemstate.pernode.node.cpu_iowait%</name>
-  <value>CPUIOWait</value>
+<property>
+  <name>metric.systemmetrics.eth0.rxerr/s</name>
+  <value>eth0_rxerrs</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.cpu_irq%</name>
-  <value>CPUIrq</value>
+  <name>metric.systemmetrics.eth0.rxbyt/s</name>
+  <value>eth0_rxbyts</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.cpu_softirq%</name>
-  <value>CPUSoftIrq</value>
+  <name>metric.systemmetrics.eth0.rxpck/s</name>
+  <value>eth0_rxpcks</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.load_fifteen</name>
-  <value>LoadFifteen</value>
+  <name>metric.systemmetrics.eth0.rxdrop/s</name>
+  <value>eth0_rxdrops</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.load_five</name>
-  <value>LoadFive</value>
+  <name>metric.systemmetrics.eth0.txerr/s</name>
+  <value>eth0_txerrs</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.load_one</name>
-  <value>LoadOne</value>
+  <name>metric.systemmetrics.eth0.txbyt/s</name>
+  <value>eth0_txbyts</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.mem_buffers%</name>
-  <value>MemBuffersPercentage</value>
+  <name>metric.systemmetrics.eth0.txpck/s</name>
+  <value>eth0_txpcks</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.mem_cached%</name>
-  <value>MemCachedPercentage</value>
+  <name>metric.systemmetrics.eth0.txdrop/s</name>
+  <value>eth0_txdrops</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.mem_user%</name>
-  <value>MemUserPercentage</value>
+  <name>metric.systemmetrics.eth1.rxerr/s</name>
+  <value>eth1_rxerrs</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.mem_shared%</name>
-  <value>MemSharedPercentage</value>
+  <name>metric.systemmetrics.eth1.rxbyt/s</name>
+  <value>eth1_rxbyts</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.disk_max_busy%</name>
-  <value>MaxDiskBusyPercentage</value>
+  <name>metric.systemmetrics.eth1.rxpck/s</name>
+  <value>eth1_rxpcks</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.bytes_in</name>
-  <value>NetworkInKBps</value>
+  <name>metric.systemmetrics.eth1.rxdrop/s</name>
+  <value>eth1_rxdrops</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.bytes_out</name>
-  <value>NetworkOutKBps</value>
+  <name>metric.systemmetrics.eth1.txerr/s</name>
+  <value>eth1_txerrs</value>
 </property>
 
 <property>
-  <name>metric.disks.perdisk.node.device.sda.rKb/s</name>
-  <value>DiskAReadKBps</value>
+  <name>metric.systemmetrics.eth1.txbyt/s</name>
+  <value>eth1_txbyts</value>
 </property>
 
 <property>
-  <name>metric.disks.perdisk.node.device.sda.wKb/s</name>
-  <value>DiskAWriteKBps</value>
+  <name>metric.systemmetrics.eth1.txpck/s</name>
+  <value>eth1_txpcks</value>
 </property>
 
 <property>
-  <name>metric.disks.perdisk.node.device.sdb.rKb/s</name>
-  <value>DiskBReadKBps</value>
+  <name>metric.systemmetrics.eth1.txdrop/s</name>
+  <value>eth1_txdrops</value>
 </property>
 
 <property>
-  <name>metric.disks.perdisk.node.device.sdb.wKb/s</name>
-  <value>DiskBWriteKBps</value>
+  <name>metric.systemmetrics.sda.rkb/s</name>
+  <value>sda_rkbs</value>
 </property>
 
 <property>
-  <name>metric.disks.perdisk.node.device.sdc.rKb/s</name>
-  <value>DiskCReadKBps</value>
+  <name>metric.systemmetrics.sda.wkb/s</name>
+  <value>sda_wkbs</value>
 </property>
 
 <property>
-  <name>metric.disks.perdisk.node.device.sdc.wKb/s</name>
-  <value>DiskCWriteKBps</value>
+  <name>metric.systemmetrics.sdb.rkb/s</name>
+  <value>sdb_rkbs</value>
 </property>
 
 <property>
-  <name>metric.disks.perdisk.node.device.sdd.rKb/s</name>
-  <value>DiskDReadKBps</value>
+  <name>metric.systemmetrics.sdb.wkb/s</name>
+  <value>sdb_wkbs</value>
 </property>
 
 <property>
-  <name>metric.disks.perdisk.node.device.sdd.wKb/s</name>
-  <value>DiskDWriteKBps</value>
+  <name>metric.systemmetrics.sdc.rkb/s</name>
+  <value>sdc_rkbs</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.disk_total</name>
-  <value>DiskTotalGB</value>
+  <name>metric.systemmetrics.sdc.wkb/s</name>
+  <value>sdc_wkbs</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.disk_used%</name>
-  <value>DiskUsedPercentage</value>
+  <name>metric.systemmetrics.sdd.rkb/s</name>
+  <value>sdd_rkbs</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.pkts_in</name>
-  <value>PacketsIn</value>
+  <name>metric.systemmetrics.sdd.wkb/s</name>
+  <value>sdd_wkbs</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.pkts_out</name>
-  <value>PacketsOut</value>
+  <name>metric.systemmetrics.%idle</name>
+  <value>cpu_idle_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.swap_in_kBps</name>
-  <value>SwapInKBps</value>
+  <name>metric.systemmetrics.%nice</name>
+  <value>cpu_nice_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.pernode.node.swap_out_kBps</name>
-  <value>SwapOutKBps</value>
+  <name>metric.systemmetrics.%sys</name>
+  <value>cpu_system_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..#nodes</name>
-  <value>nodes</value>
+  <name>metric.systemmetrics.%user</name>
+  <value>cpu_user_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..#pids</name>
-  <value>pids</value>
+  <name>metric.systemmetrics.cpu_hi%</name>
+  <value>cpu_hirq_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgBlurbs</name>
-  <value>avgBlurbs</value>
+  <name>metric.systemmetrics.cpu_si%</name>
+  <value>cpu_sirq_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgBlurbRate%</name>
-  <value>avgBlurbRate</value>
+  <name>metric.systemmetrics.%iowait</name>
+  <value>iowait_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgCpuIdle%</name>
-  <value>avgCpuIdle</value>
+  <name>metric.systemmetrics.mem_buffers_pcnt</name>
+  <value>mem_buffers_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgCpuNice</name>
-  <value>avgCpuNice</value>
+  <name>metric.systemmetrics.mem_cached_pcnt</name>
+  <value>mem_cached_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgCpuNice%</name>
-  <value>avgCpuNicePercent</value>
+  <name>metric.systemmetrics.%memused</name>
+  <value>mem_used_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgCpuSystem%</name>
-  <value>avgCpuSystem</value>
+  <name>metric.systemmetrics.eth0_busy_pcnt</name>
+  <value>eth0_busy_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgCpuUser%</name>
-  <value>avgCpuUser</value>
+  <name>metric.systemmetrics.eth1_busy_pcnt</name>
+  <value>eth1_busy_pcnt</value>
 </property>
- 
+
 <property>
-  <name>metric.systemstate.percluster..avgCpuIowait%</name>
-  <value>avgCpuIowait</value>
+  <name>metric.systemmetrics.sda.%util</name>
+  <value>sda_busy_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgCpuIrq%</name>
-  <value>avgCpuIrq</value>
+  <name>metric.systemmetrics.sdb.%util</name>
+  <value>sdb_busy_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgCpuSoftirq%</name>
-  <value>avgCpuSoftirq</value>
+  <name>metric.systemmetrics.sdc.%util</name>
+  <value>sdc_busy_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgCpuBusy%</name>
-  <value>avgCpuBusy</value>
+  <name>metric.systemmetrics.sdd.%util</name>
+  <value>sdd_busy_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgLoadOne</name>
-  <value>avgLoadOne</value>
+  <name>metric.systemmetrics.swap_used_pcnt</name>
+  <value>swap_used_pcnt</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgLoadFive</name>
-  <value>avgLoadFive</value>
+  <name>report.db.primary.key.df</name>
+  <value>timestamp</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgLoadFifteen</name>
-  <value>avgLoadFifteen</value>
+  <name>metric.df.available</name>
+  <value>available</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgMemBuffers%</name>
-  <value>avgMemBuffers</value>
+  <name>metric.df.used</name>
+  <value>used</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgMemCached</name>
-  <value>avgMemCached</value>
+  <name>metric.df.use%</name>
+  <value>used_percent</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgMemCached%</name>
-  <value>avgMemCachedPercent</value>
+  <name>metric.df.mounted-on</name>
+  <value>mount</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgMemFree</name>
-  <value>avgMemFree</value>
+  <name>metric.df.filesystem</name>
+  <value>fs</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgMemFree%</name>
-  <value>avgMemFreePercent</value>
+  <name>metric.df.csource</name>
+  <value>host</value>
 </property>
 
+<!-- dfs name node metrics -->
 <property>
-  <name>metric.systemstate.percluster..avgMemUser</name>
-  <value>avgMemUser</value>
+  <name>report.db.primary.key.hadoop_dfs_namenode</name>
+  <value>timestamp</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgMemUser%</name>
-  <value>avgMemUserPercent</value>
+  <name>metric.hadoop_dfs_namenode.csource</name>
+  <value>host</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgMemShared</name>
-  <value>avgMemShared</value>
+  <name>metric.hadoop_dfs_namenode.addblockops</name>
+  <value>add_block_ops</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgMemShared%</name>
-  <value>avgMemSharedPercent</value>
+  <name>metric.hadoop_dfs_namenode.blockscorrupted</name>
+  <value>blocks_corrupted</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgMemTotal</name>
-  <value>avgMemTotal</value>
+  <name>metric.hadoop_dfs_namenode.createfileops</name>
+  <value>create_file_ops</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgSwapTotal</name>
-  <value>avgSwapTotal</value>
+  <name>metric.hadoop_dfs_namenode.deletefileops</name>
+  <value>delete_file_ops</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgSwapInKbps</name>
-  <value>avgSwapInKbps</value>
+  <name>metric.hadoop_dfs_namenode.filescreated</name>
+  <value>files_created</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgSwapOutKbps</name>
-  <value>avgSwapOutKbps</value>
+  <name>metric.hadoop_dfs_namenode.filesrenamed</name>
+  <value>files_renamed</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgBytesIn</name>
-  <value>avgBytesIn</value>
+  <name>metric.hadoop_dfs_namenode.getblocklocations</name>
+  <value>get_block_locations</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgBytesOut</name>
-  <value>avgBytesOut</value>
+  <name>metric.hadoop_dfs_namenode.getlistingops</name>
+  <value>get_listing_ops</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgPktsIn</name>
-  <value>avgPktsIn</value>
+  <name>metric.hadoop_dfs_namenode.safemodetime</name>
+  <value>safe_mode_time</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgPktsOut</name>
-  <value>avgPktsOut</value>
+  <name>metric.hadoop_dfs_namenode.syncs_avg_time</name>
+  <value>syncs_avg_time</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgDiskFree</name>
-  <value>avgDiskFree</value>
+  <name>metric.hadoop_dfs_namenode.syncs_num_ops</name>
+  <value>syncs_num_ops</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgDiskTotal</name>
-  <value>avgDiskTotal</value>
+  <name>metric.hadoop_dfs_namenode.transactions_avg_time</name>
+  <value>transactions_avg_time</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgDiskUsed%</name>
-  <value>avgDiskUsed</value>
+  <name>metric.hadoop_dfs_namenode.transactions_num_ops</name>
+  <value>transactions_num_ops</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgPartMaxUsed%</name>
-  <value>avgPartMaxUsed</value>
+  <name>metric.hadoop_dfs_namenode.blockreport_avg_time</name>
+  <value>block_report_avg_time</value>
 </property>
 
 <property>
-  <name>metric.systemstate.percluster..avgDiskMaxBusy%</name>
-  <value>avgDiskMaxBusy</value>
+  <name>metric.hadoop_dfs_namenode.blockreport_num_ops</name>
+  <value>block_report_num_ops</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..#nodes</name>
-  <value>nodes</value>
+  <name>metric.hadoop_dfs_namenode.fsimageloadtime</name>
+  <value>fs_image_load_time</value>
 </property>
 
+<!-- dfs data node -->
 <property>
-  <name>metric.dfs.hdfs throughput..#pids</name>
-  <value>pids</value>
+  <name>report.db.primary.key.hadoop_dfs_datanode</name>
+  <value>timestamp</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..bytesWrittenPerSec</name>
-  <value>bytesWrittenPerSec</value>
+  <name>metric.hadoop_dfs_datanode.hostname</name>
+  <value>host</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..blocksRemovedPerSec</name>
-  <value>blocksRemovedPerSec</value>
+  <name>metric.hadoop_dfs_datanode.blockreports_avg_time</name>
+  <value>block_reports_avg_time</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..readsFromRemoteClientPerSec</name>
-  <value>readsFromLocalRemotePerSec</value>
+  <name>metric.hadoop_dfs_datanode.blockreports_num_ops</name>
+  <value>block_reports_num_ops</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..writesFromLocalClientPerSec</name>
-  <value>writesFromLocalClientPerSec</value>
+  <name>metric.hadoop_dfs_datanode.block_verification_failures</name>
+  <value>block_verification_failures</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..blocksVerifiedPerSec</name>
-  <value>blocksVerifiedPerSec</value>
+  <name>metric.hadoop_dfs_datanode.blocks_read</name>
+  <value>blocks_read</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..blocksWrittenPerSec</name>
-  <value>blocksWrittenPerSec</value>
+  <name>metric.hadoop_dfs_datanode.blocks_removed</name>
+  <value>blocks_removed</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..blockVerificationFailuresPerSec</name>
-  <value>blockVerificationFailuresPerSec</value>
+  <name>metric.hadoop_dfs_datanode.blocks_replicated</name>
+  <value>blocks_replicated</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..#restarts</name>
-  <value>restarts</value>
+  <name>metric.hadoop_dfs_datanode.blocks_verified</name>
+  <value>blocks_verified</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..blocksReplicatedPerSec</name>
-  <value>blocksReplicatedPerSec</value>
+  <name>metric.hadoop_dfs_datanode.blocks_written</name>
+  <value>blocks_written</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..bytesReadPerSec</name>
-  <value>bytesReadPerSec</value>
+  <name>metric.hadoop_dfs_datanode.bytes_read</name>
+  <value>bytes_read</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..writesFromRemoteClientPerSec</name>
-  <value>writesFromLocalRemotePerSec</value>
+  <name>metric.hadoop_dfs_datanode.bytes_written</name>
+  <value>bytes_written</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..readsFromLocalClientPerSec</name>
-  <value>readsFromLocalClientPerSec</value>
+  <name>metric.hadoop_dfs_datanode.copyblockop_avg_time</name>
+  <value>copy_block_op_avg_time</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..blocksReadPerSec</name>
-  <value>blocksReadPerSec</value>
+  <name>metric.hadoop_dfs_datanode.copyblockop_num_ops</name>
+  <value>copy_block_op_num_ops</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..readBlockOperationsPerSec</name>
-  <value>readBlockOperationsPerSec</value>
+  <name>metric.hadoop_dfs_datanode.heartbeats_avg_time</name>
+  <value>heart_beats_avg_time</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..numberReadBlockOperations</name>
-  <value>numberReadBlockOperations</value>
+  <name>metric.hadoop_dfs_datanode.heartbeats_num_ops</name>
+  <value>heart_beats_num_ops</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..writeBlockOperationsPerSec</name>
-  <value>writeBlockOperationsPerSec</value>
+  <name>metric.hadoop_dfs_datanode.readblockop_avg_time</name>
+  <value>read_block_op_avg_time</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..numberWriteBlockOperations</name>
-  <value>numberWriteBlockOperations</value>
+  <name>metric.hadoop_dfs_datanode.readblockop_num_ops</name>
+  <value>read_block_op_num_ops</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..readMetadataOperationsPerSec</name>
-  <value>readMetadataOperationsPerSec</value>
+  <name>metric.hadoop_dfs_datanode.readmetadataop_avg_time</name>
+  <value>read_metadata_op_avg_time</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..numberReadMetadataOperations</name>
-  <value>numberReadMetadataOperations</value>
+  <name>metric.hadoop_dfs_datanode.readmetadataop_num_ops</name>
+  <value>read_metadata_op_num_ops</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..copyBlockOperationsPerSec</name>
-  <value>copyBlockOperationsPerSec</value>
+  <name>metric.hadoop_dfs_datanode.reads_from_local_client</name>
+  <value>reads_from_local_client</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..numberCopyBlockOperations</name>
-  <value>numberCopyBlockOperations</value>
+  <name>metric.hadoop_dfs_datanode.reads_from_remote_client</name>
+  <value>reads_from_remote_client</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..replaceBlockOperationsPerSec</name>
-  <value>replaceBlockOperationsPerSec</value>
+  <name>metric.hadoop_dfs_datanode.replaceblockop_avg_time</name>
+  <value>replace_block_op_avg_time</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..numberReplaceBlockOperations</name>
-  <value>numberReplaceBlockOperations</value>
+  <name>metric.hadoop_dfs_datanode.replaceblockop_num_ops</name>
+  <value>replace_block_op_num_ops</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..heartBeatsPerSec</name>
-  <value>heartBeatsPerSec</value>
+  <name>metric.hadoop_dfs_datanode.writeblockop_avg_time</name>
+  <value>write_block_op_avg_time</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..numberHeartBeats</name>
-  <value>numberHeartBeats</value>
+  <name>metric.hadoop_dfs_datanode.writeblockop_num_ops</name>
+  <value>write_block_op_num_ops</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..blockReportsPerSec</name>
-  <value>blockReportsPerSec</value>
+  <name>metric.hadoop_dfs_datanode.writes_from_local_client</name>
+  <value>writes_from_local_client</value>
 </property>
 
 <property>
-  <name>metric.dfs.hdfs throughput..numberBlockReports</name>
-  <value>numberBlockReports</value>
+  <name>metric.hadoop_dfs_datanode.writes_from_remote_client</name>
+  <value>writes_from_remote_client</value>
 </property>
 
+<!-- dfs fs name system status -->
 <property>
-  <name>metric.dfs.individual datanode throughput.node.#pids</name>
-  <value>pids</value>
+  <name>report.db.primary.key.hadoop_dfs_fsnamesystem</name>
+  <value>timestamp</value>
 </property>
-
 <property>
-  <name>metric.dfs.individual datanode throughput.node.bytesWrittenPerSec</name>
-  <value>bytesWrittenPerSec</value>
+  <name>metric.hadoop_dfs_fsnamesystem.csource</name>
+  <value>host</value>
 </property>
-
 <property>
-  <name>metric.dfs.individual datanode throughput.node.blocksRemovedPerSec</name>
-  <value>blocksRemovedPerSec</value>
+  <name>metric.hadoop_dfs_fsnamesystem.blockstotal</name>
+  <value>blocks_total</value>
 </property>
-
 <property>
-  <name>metric.dfs.individual datanode throughput.node.readsFromRemoteClientPerSec</name>
-  <value>readsFromLocalRemotePerSec</value>
+  <name>metric.hadoop_dfs_fsnamesystem.capacityremaininggb</name>
+  <value>capacity_remaining_gb</value>
 </property>
-
 <property>
-  <name>metric.dfs.individual datanode throughput.node.writesFromLocalClientPerSec</name>
-  <value>writesFromLocalClientPerSec</value>
+  <name>metric.hadoop_dfs_fsnamesystem.capacitytotalgb</name>
+  <value>capacity_total_gb</value>
 </property>
-
 <property>
-  <name>metric.dfs.individual datanode throughput.node.blocksVerifiedPerSec</name>
-  <value>blocksVerifiedPerSec</value>
+  <name>metric.hadoop_dfs_fsnamesystem.capacityusedgb</name>
+  <value>capacity_used_gb</value>
 </property>
-
 <property>
-  <name>metric.dfs.individual datanode throughput.node.blocksWrittenPerSec</name>
-  <value>blocksWrittenPerSec</value>
+  <name>metric.hadoop_dfs_fsnamesystem.filestotal</name>
+  <value>files_total</value>
 </property>
-
 <property>
-  <name>metric.dfs.individual datanode throughput.node.blockVerificationFailuresPerSec</name>
-  <value>blockVerificationFailuresPerSec</value>
+  <name>metric.hadoop_dfs_fsnamesystem.pendingreplicationblocks</name>
+  <value>pending_replication_blocks</value>
 </property>
-
 <property>
-  <name>metric.dfs.individual datanode throughput.node</name>
-  <value>node</value>
+  <name>metric.hadoop_dfs_fsnamesystem.scheduledreplicationblocks</name>
+  <value>scheduled_replication_blocks</value>
 </property>
-
 <property>
-  <name>metric.dfs.individual datanode throughput.node.#restarts</name>
-  <value>restarts</value>
+  <name>metric.hadoop_dfs_fsnamesystem.totalload</name>
+  <value>total_load</value>
 </property>
-
 <property>
-  <name>metric.dfs.individual datanode throughput.node.blocksReplicatedPerSec</name>
-  <value>blocksReplicatedPerSec</value>
+  <name>metric.hadoop_dfs_fsnamesystem.underreplicatedblocks</name>
+  <value>under_replicated_blocks</value>
 </property>
 
+<!-- dfs fsdirectory metrics -->
 <property>
-  <name>metric.dfs.individual datanode throughput.node.bytesReadPerSec</name>
-  <value>bytesReadPerSec</value>
+  <name>report.db.primary.key.hadoop_dfs_fsdirectory</name>
+  <value>timestamp</value>
 </property>
 
 <property>
-  <name>metric.dfs.individual datanode throughput.node.writesFromRemoteClientPerSec</name>
-  <value>writesFromLocalRemotePerSec</value>
+  <name>metric.hadoop_dfs_fsdirectory.csource</name>
+  <value>host</value>
+</property>
+ <property>
+  <name>metric.hadoop_dfs_fsdirectory.files_deleted</name>
+  <value>files_deleted</value>
 </property>
 
+<!-- hadoop jvm metrics -->
 <property>
-  <name>metric.dfs.individual datanode throughput.node.readsFromLocalClientPerSec</name>
-  <value>readsFromLocalClientPerSec</value>
+  <name>report.db.primary.key.hadoop_jvm_metrics</name>
+  <value>timestamp</value>
 </property>
 
 <property>
-  <name>metric.dfs.individual datanode throughput.node.blocksReadPerSec</name>
-  <value>blocksReadPerSec</value>
+  <name>metric.hadoop_jvm_metrics.csource</name>
+  <value>host</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node</name>
-  <value>node</value>
+  <name>metric.hadoop_jvm_metrics.gctimemillis</name>
+  <value>gc_timemillis</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.#pids</name>
-  <value>pids</value>
+  <name>metric.hadoop_jvm_metrics.gccount</name>
+  <value>gc_count</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.#restarts</name>
-  <value>restarts</value>
+  <name>metric.hadoop_jvm_metrics.logerror</name>
+  <value>log_error</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.filesDeletedPerSec</name>
-  <value>filesDeletedPerSec</value>
+  <name>metric.hadoop_jvm_metrics.logfatal</name>
+  <value>log_fatal</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.filesCreatedPerSec</name>
-  <value>filesCreatedPerSec</value>
+  <name>metric.hadoop_jvm_metrics.loginfo</name>
+  <value>log_info</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.filesOpenedPerSec</name>
-  <value>filesOpenedPerSec</value>
+  <name>metric.hadoop_jvm_metrics.logwarn</name>
+  <value>log_warn</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.filesRenamedPerSec</name>
-  <value>filesRenamedPerSec</value>
+  <name>metric.hadoop_jvm_metrics.memheapcommittedm</name>
+  <value>mem_heap_committed_m</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.filesListedPerSec</name>
-  <value>filesListedPerSec</value>
+  <name>metric.hadoop_jvm_metrics.memheapusedm</name>
+  <value>mem_heap_used_m</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.numberOfTransactionsPerSec</name>
-  <value>numberOfTransactionsPerSec</value>
+  <name>metric.hadoop_jvm_metrics.memnonheapcommittedm</name>
+  <value>mem_non_heap_committed_m</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.AverageTransactions</name>
-  <value>AverageTransactions</value>
+  <name>metric.hadoop_jvm_metrics.memnonheapusedm</name>
+  <value>mem_non_heap_used_m</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.timeInSafeMode</name>
-  <value>timeInSafeMode</value>
+  <name>metric.hadoop_jvm_metrics.processname</name>
+  <value>process_name</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.numberOfSyncsPerSec</name>
-  <value>numberOfSyncsPerSec</value>
+  <name>metric.hadoop_jvm_metrics.threadsblocked</name>
+  <value>threads_blocked</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.AverageSyncTime</name>
-  <value>AverageSyncTime</value>
+  <name>metric.hadoop_jvm_metrics.threadsnew</name>
+  <value>threads_new</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.fsImageLoadTime</name>
-  <value>fsImageLoadTime</value>
+  <name>metric.hadoop_jvm_metrics.threadsrunnable</name>
+  <value>threads_runnable</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.BlocksCorrupted</name>
-  <value>BlocksCorrupted</value>
+  <name>metric.hadoop_jvm_metrics.threadsterminated</name>
+  <value>threads_terminated</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.numberOfBlockReportPerSec</name>
-  <value>numberOfBlockReportPerSec</value>
+  <name>metric.hadoop_jvm_metrics.threadstimedwaiting</name>
+  <value>threads_timed_waiting</value>
 </property>
 
 <property>
-  <name>metric.dfs.namenode operations.node.AverageBlockReportTime</name>
-  <value>AverageBlockReportTime</value>
+  <name>metric.hadoop_jvm_metrics.threadswaiting</name>
+  <value>threads_waiting</value>
 </property>
 
+<!-- hadoop map/reduce metrics -->
 <property>
-  <name>metric.dfs.fsnamesystem status.node</name>
-  <value>node</value>
+  <name>report.db.primary.key.hadoop_mapred_jobtracker</name>
+  <value>timestamp</value>
 </property>
 
 <property>
-  <name>metric.dfs.fsnamesystem status.node.FilesTotal</name>
-  <value>FilesTotal</value>
+  <name>metric.hadoop_mapred_jobtracker.csource</name>
+  <value>host</value>
 </property>
-
 <property>
-  <name>metric.dfs.fsnamesystem status.node.BlocksTotal</name>
-  <value>BlocksTotal</value>
+  <name>metric.hadoop_mapred_jobtracker.jobs_completed</name>
+  <value>jobs_completed</value>
+</property>
+<property>
+  <name>metric.hadoop_mapred_jobtracker.jobs_submitted</name>
+  <value>jobs_submitted</value>
 </property>
-
 <property>
-  <name>metric.dfs.fsnamesystem status.node.CapacityTotalGB</name>
-  <value>CapacityTotalGB</value>
+  <name>metric.hadoop_mapred_jobtracker.maps_completed</name>
+  <value>maps_completed</value>
 </property>
 
 <property>
-  <name>metric.dfs.fsnamesystem status.node.CapacityUsedGB</name>
-  <value>CapacityUsedGB</value>
+  <name>metric.hadoop_mapred_jobtracker.maps_launched</name>
+  <value>maps_launched</value>
 </property>
 
 <property>
-  <name>metric.dfs.fsnamesystem status.node.CapacityRemainingGB</name>
-  <value>CapacityRemainingGB</value>
+  <name>metric.hadoop_mapred_jobtracker.reduces_completed</name>
+  <value>reduces_completed</value>
 </property>
 
 <property>
-  <name>metric.dfs.fsnamesystem status.node.TotalLoad</name>
-  <value></value>
+  <name>metric.hadoop_mapred_jobtracker.reduces_launched</name>
+  <value>reduces_launched</value>
 </property>
 
+<!-- hadoop rpc metrics -->
 <property>
-  <name>metric.dfs.fsnamesystem status.node.PendingReplicationBlocks</name>
-  <value>PendingReplicationBlocks</value>
+  <name>report.db.primary.key.hadoop_rpc_metrics</name>
+  <value>timestamp</value>
 </property>
 
 <property>
-  <name>metric.dfs.fsnamesystem status.node.UnderReplicatedBlocks</name>
-  <value>UnderReplicatedBlocks</value>
+  <name>metric.hadoop_rpc_metrics.csource</name>
+  <value>host</value>
 </property>
 
 <property>
-  <name>metric.dfs.fsnamesystem status.node.ScheduledReplicationBlocks</name>
-  <value>ScheduledReplicationBlocks</value>
+  <name>metric.hadoop_rpc_metrics.rpcprocessingtime_avg_time</name>
+  <value>rpc_processing_time_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.rpcprocessingtime_num_ops</name>
+  <value>rpc_processing_time_num_ops</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..#nodes</name>
-  <value>nodes</value>
+  <name>metric.hadoop_rpc_metrics.getbuildversion_avg_time</name>
+  <value>get_build_version_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getbuildversion_num_ops</name>
+  <value>get_build_version_num_ops</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getjobcounters_avg_time</name>
+  <value>get_job_counters_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getjobcounters_num_ops</name>
+  <value>get_job_counters_num_ops</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getjobprofile_avg_time</name>
+  <value>get_job_profile_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getjobprofile_num_ops</name>
+  <value>get_job_profile_num_ops</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getjobstatus_avg_time</name>
+  <value>get_job_status_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getjobstatus_num_ops</name>
+  <value>get_job_status_num_ops</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getnewjobid_avg_time</name>
+  <value>get_new_job_id_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getnewjobid_num_ops</name>
+  <value>get_new_job_id_num_ops</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getprotocolversion_avg_time</name>
+  <value>get_protocol_version_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getprotocolversion_num_ops</name>
+  <value>get_protocol_version_num_ops</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getsystemdir_avg_time</name>
+  <value>get_system_dir_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.getsystemdir_num_ops</name>
+  <value>get_system_dir_num_ops</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.gettaskcompletionevents_avg_time</name>
+  <value>get_task_completion_events_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.gettaskcompletionevents_num_ops</name>
+  <value>get_task_completion_events_num_ops</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.gettaskdiagnostics_avg_time</name>
+  <value>get_task_diagnostics_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.gettaskdiagnostics_num_ops</name>
+  <value>get_task_diagnostics_num_ops</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.heartbeat_avg_time</name>
+  <value>heartbeat_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.heartbeat_num_ops</name>
+  <value>heartbeat_num_ops</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.submitjob_avg_time</name>
+  <value>submit_job_avg_time</value>
+</property>
+<property>
+  <name>metric.hadoop_rpc_metrics.submitjob_num_ops</name>
+  <value>submit_job_num_ops</value>
 </property>
 
+<!-- Hod Machine -->
 <property>
-  <name>metric.rpcmetrics.rpc metrics..AverageRpcQueueTime_num_ops</name>
-  <value>AverageRpcQueueTime_num_ops</value>
+  <name>metric.hodmachine.machine</name>
+  <value>host</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..AverageRpcQueueTime_avg_time</name>
-  <value>AverageRpcQueueTime_avg_time</value>
+  <name>metric.hodmachine.hodid</name>
+  <value>hodid</value>
 </property>
 
+<!-- Hod Job -->
 <property>
-  <name>metric.rpcmetrics.rpc metrics..AverageRpcQueueTime_num_ops</name>
-  <value>AverageRpcQueueTime_num_ops</value>
+  <name>metric.hodjob.hodid</name>
+  <value>HodID</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..AverageRpcProcessingTime_num_ops</name>
-  <value>AverageRpcProcessingTime_num_ops</value>
+  <name>metric.hodjob.userid</name>
+  <value>UserID</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..AverageRpcProcessingTime_avg_time</name>
-  <value>AverageRpcProcessingTime_avg_time</value>
+  <name>metric.hodjob.status</name>
+  <value>Status</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..RpcDiscarded_num_ops</name>
-  <value>RpcDiscarded_num_ops</value>
+  <name>metric.hodjob.timequeued</name>
+  <value>TimeQueued</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..RpcDiscarded_avg_time</name>
-  <value>RpcDiscarded_avg_time</value>
+  <name>metric.hodjob.starttime</name>
+  <value>StartTime</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..register_num_ops</name>
-  <value>register_num_ops</value>
+  <name>metric.hodjob.endtime</name>
+  <value>EndTime</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..register_avg_time</name>
-  <value>register_avg_time</value>
+  <name>metric.hodjob.numofmachines</name>
+  <value>NumOfMachines</value>
 </property>
 
+<!-- MSSRGraph -->
 <property>
-  <name>metric.rpcmetrics.rpc metrics..getProtocolVersion_num_ops</name>
-  <value>getProtocolVersion_num_ops</value>
+  <name>report.db.primary.key.mssrgraph</name>
+  <value>timestamp</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..getProtocolVersion_avg_time</name>
-  <value>getProtocolVersion_avg_time</value>
+  <name>metric.mssrgraph.jobid</name>
+  <value>job_id</value>
+</property>
+<property>
+  <name>metric.mssrgraph.count</name>
+  <value>count</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..sendHeartbeat_num_ops</name>
-  <value>sendHeartbeat_num_ops</value>
+  <name>metric.mssrgraph.type</name>
+  <value>type</value>
 </property>
 
+<!-- Map Reduce Job Counters -->
 <property>
-  <name>metric.rpcmetrics.rpc metrics..sendHeartbeat_avg_time</name>
-  <value>sendHeartbeat_avg_time</value>
+  <name>report.db.primary.key.mrjobcounters</name>
+  <value>timestamp</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..blockReport_num_ops</name>
-  <value>blockReport_num_ops</value>
+  <name>metric.mrjobcounters.file_systems_hdfs_bytes_read</name>
+  <value>hdfs_bytes_read</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..blockReport_avg_time</name>
-  <value>blockReport_avg_time</value>
+  <name>metric.mrjobcounters.file_systems_hdfs_bytes_written</name>
+  <value>hdfs_bytes_written</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.file_systems_local_bytes_read</name>
+  <value>local_bytes_read</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.file_systems_local_bytes_written</name>
+  <value>local_bytes_written</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.job_counters__data-local_map_tasks</name>
+  <value>data_local_map_tasks</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.job_counters__launched_map_tasks</name>
+  <value>launched_map_tasks</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.job_counters__launched_reduce_tasks</name>
+  <value>launched_reduce_tasks</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.jobid</name>
+  <value>job_id</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.map-reduce_framework_combine_input_records</name>
+  <value>combine_input_records</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.map-reduce_framework_combine_output_records</name>
+  <value>combine_output_records</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.map-reduce_framework_map_input_bytes</name>
+  <value>map_input_bytes</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.map-reduce_framework_map_output_bytes</name>
+  <value>map_output_bytes</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.map-reduce_framework_map_input_records</name>
+  <value>map_input_records</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.map-reduce_framework_map_output_records</name>
+  <value>map_output_records</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.map-reduce_framework_reduce_input_groups</name>
+  <value>reduce_input_groups</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.map-reduce_framework_reduce_input_records</name>
+  <value>reduce_input_records</value>
+</property>
+<property>
+  <name>metric.mrjobcounters.map-reduce_framework_reduce_output_records</name>
+  <value>reduce_output_records</value>
 </property>
 
+<!-- Database summarization intervals -->
 <property>
-  <name>metric.rpcmetrics.rpc metrics..getBlockLocations_num_ops</name>
-  <value>getBlockLocations_num_ops</value>
+  <name>consolidator.table.dfs_namenode</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..heartbeat_num_ops</name>
-  <value>heartbeat_num_ops</value>
+  <name>consolidator.table.dfs_datanode</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..versionRequest_num_ops</name>
-  <value>versionRequest_num_ops</value>
+  <name>consolidator.table.hadoop_rpc</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..setPermission_num_ops</name>
-  <value>setPermission_num_ops</value>
+  <name>consolidator.table.cluster_hadoop_rpc</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>metric.rpcmetrics.rpc metrics..rollFsImage_num_ops</name>
-  <value>rollFsImage_num_ops</value>
+  <name>consolidator.table.hadoop_mapred</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>conversion.metric.systemstate.pernode.node.bytes_in</name>
-  <value>0.001</value>
+  <name>consolidator.table.hadoop_jvm</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>conversion.metric.systemstate.pernode.node.bytes_out</name>
-  <value>0.001</value>
+  <name>consolidator.table.system_metrics</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>util.perfMetrics</name>
-  <value>CPUBusy,FreeMemPercentage,MaxDiskBusyPercentage,NetworkInKBps,NetworkOutKBps,DiskAReadKBps,DiskBReadKBps,DiskCReadKBps,DiskDReadKBps,DiskAWriteKBps,DiskBWriteKBps,DiskCWriteKBps,DiskDWriteKBps,DiskUsedPercentage</value>
+  <name>consolidator.table.dfs_throughput</name>
+  <value>5,30,180,720</value>
 </property>
 
-<!-- Database summarization intervals -->
 <property>
-  <name>consolidator.table.dfs_namenode</name>
-  <value>5,30,120</value>
+  <name>consolidator.table.node_activity</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>consolidator.table.SimonData</name>
-  <value>5,30,120</value>
+  <name>consolidator.table.dfs_fsnamesystem</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>consolidator.table.rpc_metrics</name>
-  <value>5,30,120</value>
+  <name>consolidator.table.disk</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>consolidator.table.dfs_throughput</name>
-  <value>5,30,120</value>
+  <name>consolidator.table.cluster_disk</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>consolidator.table.dfs_individual</name>
-  <value>5,30,120</value>
+  <name>consolidator.table.cluster_system_metrics</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>consolidator.table.cluster_systemstate</name>
-  <value>5,30,120</value>
+  <name>consolidator.table.hod_job_digest</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>consolidator.table.NodeActivity</name>
-  <value>5,30,120</value>
+  <name>consolidator.table.hod_machine</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>consolidator.table.HodJobDigest</name>
-  <value>5,30,120</value>
+  <name>consolidator.table.HodJob</name>
+  <value>5,30,180,720</value>
 </property>
 
 <property>
-  <name>consolidator.table.dfs_fsnamesystem_status</name>
-  <value>5,30,120</value>
+  <name>consolidator.table.user_util</name>
+  <value>5,30,180,720</value>
 </property>
+
 </configuration>
+

Modified: hadoop/core/trunk/src/contrib/chukwa/conf/system-data-loader.properties
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/conf/system-data-loader.properties?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/conf/system-data-loader.properties (original)
+++ hadoop/core/trunk/src/contrib/chukwa/conf/system-data-loader.properties Fri Dec  5 12:30:14 2008
@@ -1,6 +1,6 @@
-log4j.rootLogger=INFO, stdout, R
+log4j.rootLogger=INFO, R
 log4j.appender.R=org.apache.hadoop.chukwa.inputtools.log4j.ChukwaDailyRollingFileAppender
-log4j.appender.R.File=${CHUKWA_HOME}/logs/${RECORD_TYPE}.log
+log4j.appender.R.File=${CHUKWA_LOG_DIR}/${RECORD_TYPE}.log
 log4j.appender.R.recordType=${RECORD_TYPE}
 log4j.appender.R.layout=org.apache.log4j.PatternLayout
 log4j.appender.R.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

Modified: hadoop/core/trunk/src/contrib/chukwa/docs/README
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/docs/README?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/docs/README (original)
+++ hadoop/core/trunk/src/contrib/chukwa/docs/README Fri Dec  5 12:30:14 2008
@@ -78,7 +78,17 @@
   - Edit bin/chukwa-config.sh to match your system configuration
   
   - In the chukwa root directory, say ``bash bin/processSinkFiles.sh'' 
+
+*  Configuring and starting Database
+
+  - Download MySQL connector from http://dev.mysql.com/downloads/connector/j/5.1.html
+
+  - Copy mysql-connector-*.jar to Chukwa lib directory.
+
+  - Configure mysql, and run:  mysql -u root dbname < database_create_table to install new database schema from Chukwa conf directory.
   
+  - Configure JDBC driver name in conf/chukwa-env.sh: com.mysql.jdbc.Driver
+
 *  Configuring and starting HICC
 
   - Download Apache Tomcat from http://tomcat.apache.org/download-60.cgi

Modified: hadoop/core/trunk/src/contrib/chukwa/docs/paper/chukwa_08.tex
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/contrib/chukwa/docs/paper/chukwa_08.tex?rev=723855&r1=723854&r2=723855&view=diff
==============================================================================
--- hadoop/core/trunk/src/contrib/chukwa/docs/paper/chukwa_08.tex (original)
+++ hadoop/core/trunk/src/contrib/chukwa/docs/paper/chukwa_08.tex Fri Dec  5 12:30:14 2008
@@ -86,68 +86,90 @@
 
 We did not aim to solve the problem of real-time monitoring for failure detection, which systems such as Ganglia already do well. Rather, we wanted a system that would process large volumes of data, in a timescale of minutes, not seconds, to detect more subtle conditions, and to aid in failure diagnosis. Human engineers do not generally react on a timescale of seconds, and so a processing delay of a few minutes is not a concern for us.
 
-Such a data collection system ought not be developed in isolation. Many problems inherent in monitoring can be solved by relying on existing scalable distributed computing platforms. In particular, we believe that a distributed filesystem is a natural tool for data storage, and that MapReduce is a natural way of processing monitoring data. Leveraging this existing infrastructure drastically reduces the engineering difficulty inherent in large-scale monitoring.
+%In particular, we believe that a distributed filesystem is a natural tool for data storage, and that MapReduce is a natural way of processing monitoring data. Leveraging this existing infrastructure drastically reduces the engineering difficulty inherent in large-scale monitoring.
+
+%One of our overriding considerations was to use existing components as much as possible.  In particular, we sought to leverage the engineering that has gone into Hadoop's distributed filesystem 
  
  We are in the process of building a system, which we call Chukwa, to demonstrate that practical large-scale can be readily built atop this existing infrastructure. \footnote{In Hindu mythology, Chukwa is the turtle that holds up Maha-pudma, the elephant that hold up the world.  This name is especially appropriate for us, since the the Hadoop mascot is a yellow elephant.}   
  it uses Hadoop's distributed file system (HDFS) as its data store, and relies on MapReduce jobs to process the data. By leveraging these existing tools, Chukwa can scale to thousands of nodes in both collection and analysis capacities, while providing a standardized and familiar framework  for processing the collected data. Many components of Chukwa are pluggable, allowing easy customization and enhancement.
  
-The core components of Chukwa are largely complete, and we expect the system to enter production use at Yahoo! within the next few months. We have some initial operational experience, and preliminary performance metrics.    We begin by discussing our goals and requirements in some detail.  We then describe our design, explaining our motivation for various decisions. We then present some performance data, and conclude by offering some comparisons with related work.
+The core components of Chukwa are largely complete, and we expect the system to enter production use at Yahoo! within the next few months. We have some initial operational experience, and preliminary performance metrics.    We begin by discussing our goals and requirements in some detail.  We then describe our design, explaining our motivation for various decisions. We next present some performance data, and conclude by offering some comparisons with related work.
 
-\section{Goals and requirements} 
+\section{Motivation and requirements} 
 
+We intend to use Chukwa to monitor multiple clusters of several thousand hosts, potentially generating several terabytes of data per day. Our goals in designing Chukwa were based on survey of our cluster user's functional requirements and performance demands.
 
-There are two key figures of merit for monitoring systems such as Chukwa: scale and latency.  We intend to use Chukwa to monitor multiple clusters of several thousand hosts, potentially generating several terabytes of data per day.  Our initial goal was a latency of less than ten minutes between the generation of data on cluster nodes and its availability in HDFS for centralized analysis using Hadoop MapReduce. We believe that our design can operate effectively with latencies closer to a minute, as discussed in our conclusion.  Few useful actions can be taken on timescales significantly shorter than that, and therefore we do not believe this limit will be problematic for us.%this statement may need more support
+We expect Chukwa to be used by four different (though overlapping) constituencies:  Hadoop users, cluster operators, cluster managers, and Hadoop developers.  These different groups have different functional requirements:
 
-The initial goal for Chukwa is to monitor and analyze several-thousand-node Hadoop deployments. Nothing in our design restricts us from monitoring other clusters, however our goals are primarily driven by the motivating example of Hadoop.  Analysis encompasses many goals:  
 \begin{itemize}
-\item
-Providing canned reports to meet known needs for accounting, capacity planning, performance characterization, utilization.  Some examples:
-%\begin{itemize}
-%\item 
-%Summarize grid usage: disk usage / CPU total, grouped by account, project, and user.  
-%\item 
-%  Measure performance improvement across Hadoop versions
-%  \item 
-%  Assess the quality of service based on waiting time, node availability, and service uptime
-%\end{itemize}
-These can be done in a straightfoward way via MapReduce jobs, provided that the data has been stored suitably.  
-\item
- Reducing the number and extent of outages through trend analysis and anomaly detection to identify at-risk elements such as compute nodes, name node, switches 
-\item
-  Contributing to Hadoop development in robustness and performance by making possible the analysis of jobs and infrastructure along many different dimensions
-\item
-   Reducing the number of false alerts and increasing the value and confidence level of true alerts through correlation of metrics and environmental information
-\item
-    Reducing the time and effort required to identify and resolve cluster issues by highlighting trouble spots and by correlating metrics and environmental information to provide the required context and history, as well as by bringing together all required tools in one place.
+\item \textbf{Hadoop Users} will ask how far along their jobs are, and what resources are available for future jobs. They need access to the logs and output from their jobs.  
+
+\item \textbf{Operators} need to be notified of hardware failures and performance anomalies. They need to be warned about resource shortages, such as storage exhaustion.  
+
+\item \textbf{Managers} need guidance in provisioning, and in apportioning costs. This means that they need tools for analyzing past usage by users and groups, and for projecting future demands.   They need access to figures of merit, such as average job waiting time.
+
+\item \textbf{Hadoop Developers} need information about the performance in operation, bottlenecks within Hadoop, common failure patterns, and so forth.
 \end{itemize}
 
-Chukwa can collect a large and open-ended set of time series metrics and logs, as well as  slowly changing dimensions such as machine configuration, related to the state, behavior, characterization, and performance of a Hadoop cluster and processes running on it. Stored data will be available indefinitely, limited only by storage capacity, and not by retrieval speed.  
+Fortunately these different demands boil down to a comparatively small set of technical requirements.  Chukwa must collect a large and open-ended set of time series metrics and logs, as well as  slowly changing dimensions such as machine configuration.  Stored data should be available promptly, and should remain available indefinitely. Efficient querying and analysis of large data volumes is essential.
 
-%Chukwa should be robust against agent crashes
+Our initial goal was to be able to monitor Hadoop clusters of 2000 nodes, outputting 5 to 6 MB of data per second, and to have collected data available for processing within ten minutes. Few operational Hadoop clusters today are larger than 2000 nodes, and thus that figure represents a reasonable initial operating capability.  In section 4 of this paper, we report the operational measurements that justify our target data rate.
 
-\section{Data Collection}
+While having all data available immediately after collection might be desirable, it is not actually crucial. Systems such as Nagios or Ganglia work well for real-time monitoring of metrics such as CPU load.   Human administrators can take few useful actions on timescales shorter than a few minutes, and so low-latency execution of more complex processing is not a priority.
 
-\begin{figure}
- \includegraphics[width=75mm]{chukwa0.jpg}
-\caption{The Chukwa collection architecture} 
-\end{figure}
 
+\section{Architecture}
+
+\begin{figure*}
+ \includegraphics[width=150mm]{chukwa_pipeline.png}
+\caption{The Chukwa Pipeline, showing how long data is retained at each stage.} 
+\end{figure*}
+
+\iffalse
 \begin{figure}
   \includegraphics[width=75mm]{chukwa1.jpg}
 \caption{The agent side}
 \end{figure}
+\fi
 
-To achieve our scalability goals, we opted early in the design process to make HDFS our chief storage technology. HDFS installations can store petabytes of data, and support fairly high throughput. HDFS also facilitates parallel processing of stored data with MapReduce.
 
-Chukwa thus breaks into two halves -- the pipeline for getting data into a convenient format in HDFS, and a toolkit for analyzing data using MapReduce and displaying it in a helpful way. A pipelined approach to data collection aids flexibility, by providing natural points for adding functionality. We discuss each section of the pipeline in turn, before discussing the user interface and analysis tools offered by Chukwa.
+At the heart of any data collection system is a pipeline to pump data from where it is generated to where it is stored. The requirements at the endpoints dictate the design of the system in the middle. To meet its goals, Chukwa needs flexible, dynamically controllable data sources, and a high performance, large scale storage system. %Generated data ought to be available for processing shortly after generation
+ It also needs a suitable framework for analyzing the large volumes of collected data.
 
 \subsection{Adaptors}
 
-The particular data being collected about a machine will change with time, and from machine to machine. As Hadoop tasks start and stop, different log files must be monitored. We might want to increase our collection rate if we detect anomalies.  And of course, the monitoring package on a given machine must be customized according to the services running on that machine: it makes no sense to collect Hadoop metrics on an NFS server.
+Data sources need to be dynamically controllable because the particular data being collected from a machine changes over time, and varies from machine to machine. For example, as Hadoop tasks start and stop, different log files must be monitored. We might want to increase our collection rate if we detect anomalies.  And of course, it makes no sense to collect Hadoop metrics on an NFS server. 
+
+These dynamically controllable data sources are known in Chukwa as \textit{adaptors}, since they generally are wrapping some other data source, such as a file or a Unix command-line tool.  At present, Chukwa includes adaptors to collect Hadoop logs, application metrics, and system telemetry. We expect to write adaptors for tasks like counting recoverable disk read errors, retrieving causal logs from X-Trace \cite{xtrace}, and monitoring operating system and Java virtual machine state.
+
+%\subsection{Adaptors}
+% 
+%As a result, we bundle data collection into small dynamically loadable Adaptors which run within a local agent process on each machine. This process is left permanently running, and is restarted automatically if it crashes. The agent process is responsible for starting and stopping adaptors in response to external commands.  It also provides two crucial services to adaptors. First, it is responsible for forwarding chunks over HTTP to the collectors, where they are written to stable storage. Second, it is responsible for making regular checkpoints of adaptor state, and restarting adaptors at the appropriate position after a crash.  This checkpoint mechanism ensures that data is appropriately resent after a crash.
+
+
+\subsection{Storage}
+
+The scalability challenges in large-scale monitoring systems primarily concern the data storage and analysis components, since that is where data from multiple machines is brought together. We determined from the outset to rely on Hadoop's HDFS as our storage component. Hadoop HDFS installations can store petabytes of data, and support high throughput; 20 MB/sec for one writer is typical in operational deployments, with total cluster throughput routinely in excess of a gigabyte per second. HDFS also facilitates parallel processing of stored data with MapReduce.
+
+Unfortunately, HDFS is not designed for the sort of workloads associated with monitoring. HDFS aims to handle large files and high write rates from comparatively small numbers of writers. It is not designed for thousands of concurrent low-rate writers, and millions of small files. Worse, writes to a file are not visible to readers until the file is closed, and stable versions of HDFS do not allow closed files to be reopened for writing. As a result, some care must be taken in using HDFS to support continuous rather than batch processing.
+ Much of the Chukwa design was driven by the need to reconcile our many sporadic data sources with HDFS's performance characteristics and semantics.
+ 
+\subsection{Collectors and Agents}
+
+Chukwa resolves these conflicting demands by adding additional pipeline stages between the adaptors and the HDFS data store: \textit{collectors} and \textit{agents}.
+
+Rather than have each adaptor write directly to HDFS, data is sent across the network to a \textit{collector} process, that does the HDFS writes.  Each collector receives data from several hundred hosts, and writes all this data to a single \textit{sink file}, consisting of chunks of data plus metadata describing each chunk's source and format. Periodically, collectors close their sink files, rename them to mark them available for processing, and resume writing a new file.  Data is sent to collectors over HTTP, since this allows us to write our collector as a Java servlet. This in turn lets us use standard Java servlet containers for connection management. This is in keeping with the Chukwa philosophy of leveraging existing infrastructure when possible. 
+
+Collectors thus drastically reduce the number of HDFS files generated by Chukwa, from one per machine or adaptor per unit time, to a handful per cluster.  The decision to put collectors between data sources and the data store has other benefits. Collectors hide the details of the HDFS file system in use, such as its Hadoop version, from the adaptors.  This is a significant aid to configuration.  It is especially helpful when using Chukwa to monitor a development cluster running a different version of Hadoop or when using Chukwa to monitor a non-Hadoop cluster.  
+
+The second of our intermediate stages, agents, are less fundamental to the design. They exist primarily to provide various services to adaptors, and thus to make adaptors easier to write. Agents are long-running processes on each machine being monitored by Chukwa.  Each agent process is restarted automatically if it crashes. The agent provides three chief services to adaptors. First, the agent is responsible for starting and stopping adaptors in response to external commands.
+Second, it is responsible for forwarding chunks over HTTP to the collectors, where they are written to stable storage. 
+Third, it is responsible for making regular checkpoints of adaptor state, and restarting adaptors at the appropriate position after a crash.  
+
 
-As a result, we bundle data collection into small dynamically loadable Adaptors.  At present, Chukwa includes adaptors to collect Hadoop logs, application metrics, and system telemetry. We expect to write adaptors for tasks like counting recoverable disk read errors, retrieving causal logs from X-Trace \cite{xtrace}, and monitoring operating system and Java virtual machine state. 
 
 %FIXME: ruby/failmon
+
 %The output of an adaptor consists of chunks, each containing one or more semantically meaningful records.  These records, such as lines from a log file or batches of metrics, must be kept together.  This means that adaptors must parse files intensively enough to detect record boundaries, and they must correctly handle buffering.
 
 %These costs are offset by important advantages. Keeping records intact allows collectors to run filters or triggers against data streams, without having to buffer partial lines. In the presence of load balancing across collectors, these filters will still behave properly. Perhaps most importantly, this allows the map phase of the periodic MapReduce process to extract metadata from records and use this metadata as a sort key: a large performance win.  If records were split across collectors, this metadata could be inaccessible.
@@ -155,25 +177,18 @@
 %FIXME: say something here
 %After a crash, the local agent restarts each adaptor, passing sequence number of the last byte of their output that was successfully sent to a collector.  Adaptors that read from files can seek to that offset before starting.  Adaptors that output ephemeral data, such as CPU load, can simply begin numbering output from this point, to mask  any crash-induced discontinuity from downstream users. 
 
-\subsection{Agent}
-
-Adaptors run within a local agent process on each machine. This process is left permanently running, and is restarted automatically if it crashes. The agent process is responsible for starting and stopping adaptors in response to external commands.  It also provides two crucial services to adaptors. First, it is responsible for forwarding chunks over HTTP to the collectors, where they are written to stable storage. Second, it is responsible for making regular checkpoints of adaptor state, and restarting adaptors at the appropriate position after a crash.  This checkpoint mechanism ensures that data is appropriately resent after a crash.
-
-We opted to put most of the recovery logic in the agents, rather than the collectors. This removes state from the collectors. We considered having agents check with collectors to find out what the last thing they sent was. However, each agent would have to check with each collector, resulting in an exorbitant number of connections if many agents fail and reboot simultaneously.
+%\subsection{Agent}
 
-\subsection{Collector}
+%Adaptors run within a local agent process on each machine. This checkpoint mechanism ensures that data is appropriately resent after a crash.
 
-The collector is a web service (implemented as a Java servlet) that receives chunks via HTTP, and writes them to stable storage in the Hadoop distributed file system.  
+%We opted to put most of the recovery logic in the agents, rather than the collectors. This removes state from the collectors. We considered having agents check with collectors to find out what the last thing they sent was. However, each agent would have to check with each collector, resulting in an exorbitant number of connections if many agents fail and reboot simultaneously.
 
-Each collector writes to a sink files in the Hadoop distributed file system. Each sink file contains a sequence of chunks, potentially from many different streams.  Every few minutes, collectors ``rotate'' sink files, by closing their current sink files, and marking them as available for processing. While it would make sense to use HDFS files as circular buffers, the filesystem semantics forbid this.  Instead, each collector closes its current sink file every few minutes, and marks it available for processing.  
-
-An alternate design would be to have each local agent dump its results directly into HDFS. Unfortunately, HDFS does not perform well for filesystems with large numbers of files, due to contention and space limits at the Namenode.  The many-to-one relationship between hosts and collectors helps significantly, although if Chukwa deployments got big enough, some more drastic step might be needed.
-
-Another advantage of having collectors is that it allows us to hide the details of the Chukwa storage cluster, such as its Hadoop version from the agents.  This is important, for instance, when using Chukwa to monitor a development cluster running an unstable version of Hadoop or when using Chukwa to monitor a non-Hadoop cluster.  
 
 \subsection{Demux and archiving}
 
-A pair of MapReduce jobs run every few minutes, taking these sink files as input.  The first job simply archives all the collected data, without processing or interpreting it.  The second job parses out structured data from some of the logs, and loads this structured data into a data store.   For performance reasons, we do record parsing in the Map phase, and extract key-value pairs. Example records include job history reports, task durations, and so forth. We then use the shuffle and sort phases of the Reduce to organize records semantically by application timestamp and type. This forced us to design our adaptors to have adaptors output meaningful records, and not arbitrary chunks of streams. %% FIXME: recast last sentence
+A pair of MapReduce jobs runs every few minutes, taking all the available sink files as input.  The first job simply archives all the collected data, without processing or interpreting it.  The second job parses out structured data from some of the logs, and loads this structured data into a data store.  
+
+% For performance reasons, we do record parsing in the Map phase, and extract key-value pairs. Example records include job history reports, task durations, and so forth. We then use the shuffle and sort phases of the Reduce to organize records semantically by application timestamp and type. This forced us to design our adaptors to have adaptors output meaningful records, and not arbitrary chunks of streams. %% FIXME: recast last sentence
 
  These datastores are also pluggable. For now, we use HDFS files, one file per cluster, per data type, and time period.  So for instance there would be one file for all of a particular clusters datanode logs, for the period from noon to 1pm on a given day.   This is only an interim solution, and we are evaluating various more suitable data stores, with support for structured queries.  Hive, an HDFS-backed data warehouse might also be a good fit here. \cite{hive} 
  % Hive supports a subset of SQL for queries, which is particularly appealing for the context of ad-hoc analytics. 
@@ -183,11 +198,8 @@
 
 Data stored in HDFS in a structured format can be processed straightforwardly with MapReduce jobs. We envision a library of ``canned'' MapReduce jobs for tasks like finding common failure modes, correlating events in the logs with slowdowns, discovering flakey machines, and so forth.  Since Chukwa data is split into different files based on content, these jobs take as input only a small fraction of the total data volume, and therefore can run relatively quickly.  Most structured storage systems, including Hive and Hypertable, include their own query interfaces. We expect that these interfaces will be used by users who want to do simple ad-hoc queries over stored Chukwa data, with MapReduce being reserved for more complex processing.
 
-
 \section{Data Analysis and Display}
 
-
-
 Collected data is only as useful as the analysis that can be done on it.  To ease analysis of collected data, we've built a flexible, configurable, ``portal-style'' web interface to Chukwa, termed the Hadoop Infrastructure Care Center (HICC). A configurable interface is not simply a frill --- it is necessary, since different users have very different data analysis needs.
 
 \begin{figure}
@@ -197,49 +209,31 @@
 \caption{HICC displaying some DataNode metrics}
 \end{figure}
 
-\subsection{The Use Model}
-
-We expect HICC to be used by four different (though overlapping) constituencies:   users, operators, managers, and developers.  These different groups have different characteristic questions:
-
-
-\begin{itemize}
-\item Users will ask how far along their jobs are, and what resources are available for future jobs. They need access to the logs and output from their jobs.  
-
-\item Operators need to be notified of hardware failures and performance anomalies. They need to be warned about resource shortages, such as storage exhaustion.  
-
-\item Managers need guidance in provisioning, and in apportioning costs. This means that they need tools for analyzing past usage by users and groups, and for projecting future demands.   They need access to figures of merit, such as average job waiting time.
-
-\item Developers need information about the performance in operation, bottlenecks within Hadoop, common failure patterns, and so forth.
-
-\end{itemize}
-
-\subsection{How HICC works}
 
 In practice, a single individual often fulfills more than one of these roles, or some portion of a role. As a result, there is a compelling need to allow individuals to mix and match different components. We chose to do this by bundling each query, or family of queries, into a widget. HICC users can assemble their HICC workspace by selecting widgets from a catalog, in exactly the way that they can customize their personal Yahoo! or Google portal pages.
 
 Some of these components will display the results of canned map-reduce jobs run against data in  Chukwa storage.  Others will perform on-the-fly queries against SQL databases.    Still others might display telemetry collected with Ganglia, or report on recently opened failure tickets.  
 
-HICC stores several kinds of widget state.  There is a global context, through which different widgets can ``collaborate'' and share data. There is a dashboard view, which stores information about the user's preferred layout, and display settings.  There is a widget descriptor file, for storing widget configuration and parameters.   
+% HICC stores several kinds of widget state.  There is a global context, through which different widgets can ``collaborate'' and share data. There is a dashboard view, which stores information about the user's preferred layout, and display settings.  There is a widget descriptor file, for storing widget configuration and parameters.   
 
 %HICC is not intended for exploratory, ad-hoc queries. For that, we expect to rely on the query interface of our structured data store.  Since this store is not yet available, we have been using a MySQL database.
 
+
 \section{Evaluation}
 
 %Chukwa is currently in development, and we have not yet been able to do large-scale tests.  However, we have reason to be confident that Chukwa will comfortably meet our performance goals.
 
 Using logs from a production cluster at Yahoo!, we found that a 2000-node production cluster would generate around 5.5 MB of data per second.  Of this, the vast bulk (more than 95\%) was task tracker logs.  Metrics data accounted for more than half the remainder, with Namenode, HDFS datanode, and JobTracker logs accounting for the rest. This data rate is small enough that Chukwa should impose only very modest overhead on datacenter networks. 
 
-  We conducted a number of small experiments to verify that Chukwa could handle this sort of load.   All tests were run on an internal development cluster at Yahoo.  Machines had four 2.8 GHz Xeon processors, four IDE disks, and 3 GB of RAM, and ran Linux, with a 2.6.9 kernel.  %kernel version 2.6.9-55
-
-We checked for performance limitations at three places: the collector, the HDFS cluster and the map-reduce job.
-To assess collector throughput, we disabled HDFS writes, and measured the throughput between a single agent and collector, connected by gigabit ethernet.  We found that throughput was around 80 MB/sec, and that this limit was imposed by network latency.  Handling this input took only half of one CPU on the collector side, which is an affordable overhead. 
+We conducted a number of small experiments to verify that Chukwa could handle this load.   All tests were run on an internal development cluster at Yahoo.  Machines had four 2.8 GHz Xeon processors, four IDE disks, and 3 GB of RAM, and ran Linux, with a 2.6.9 kernel.  %kernel version 2.6.9-55
+There are two potential bottlenecks in Chukwa that we evaluated in detail, the collector, and the map-reduce job.  At present, collector throughput is more than adequate, and the demux job is the limiting phase in processing.
 
-A single Chukwa collector was easily able to saturate the HDFS back-end with very modest CPU cost. Our five-node HDFS test cluster saturated at about 25 megabytes per second.  Even writing 25 megabytes per second, the collector was only using about 10\% of a single core. This is an encouraging number, since it suggests that even small monitoring clusters have ample write bandwidth for current as well as future data collection demands.  
+To measure collector performance, we ran Chukwa on a 400 node test cluster. We configured nodes in this cluster to report data at many times the normal operational rate, emulating a much larger cluster.  In this configuration, the test cluster generated 14.4 megabytes of monitoring data per second.  A single collector was able to keep up with this data volume, and write it to HDFS; in a 30 minute test run, machine utilization never rose much above 50\%.  At this rate, we are bumping into the single-writer throughput limits imposed by HDFS, rather than any Chukwa-specific limits. Higher Chukwa bandwidth could be achieved by simply adding more writers.
 
-%The bottleneck in our current design is the demux job.  As mentioned, limitations in Hadoop restrict performance 
-%we expect that our write bandwidth would increase if we added additional collectors, since using more than one writer enables the HDFS datanodes to concurrently write across multiple physical disks.  
+%The scalability limits we observed in more intensive experiments were caused by thread limits in Jetty, the Servlet container we are using at present. These limits can likely be overcome by more careful configuration. However, 7.2 MB/sec significantly exceeds both our performance goals and the rate at which we can process incoming data, so we have yet to perform this optimization. 
 
-As mentioned, the bottleneck in our design is the Demux job. We found that our Demux job can easily keep up with incoming data.  Using four worker nodes, we were able to process a gigabyte of data in two and a half minutes; for a throughput of 6.6 MB/sec, which exceeds our target data rate. Larger data volumes improved efficiency significantly:  with 2 GB of data, the processing rate went up to 8.5 MB/sec. Optimizing MapReduce jobs is a reasonably well understood science, and we believe that significant gains can be readily achieved here. 
+At present, the rate-limiting phase of Chukwa is the Demux job. Using five worker nodes, our MapReduce job can process two gigabytes of metrics data in around three and a half minutes. We conducted five trials on the same 2 GB of test data.  Completion times ranged from 3:25 minutes to 3:34, with a mean of 3:30.  This means that we can can process six minutes' of incoming data in three and a half minutes, thus keeping up with the incoming data flow and achieving our ten minute target latency.  
+Optimizing MapReduce jobs is fairly routine engineering at this point, and we believe that significant gains can be achieved here. 
 
 These results show that Chukwa can maintain latencies well under our ten minute target, while imposing very modest overheads on the cluster: five Chukwa nodes are only 0.25\% of our notional 2000-node cluster. We expect to be able to maintain these latency targets as we scale up the number of nodes being monitored.  Ramping up the size of MapReduce jobs is routine, and the engineering issues are well understood. Even for monitoring hundreds of thousands of nodes, Chukwa's data volumes would be significantly smaller than those seen in our production web indexing clusters.
 
@@ -249,14 +243,21 @@
 
 Chukwa represents a design point in between two existing classes of systems: log collection frameworks on the one hand, and network management systems on the other.  Chukwa intends to combine the abundance of data display tools of existing NMS systems, with the high throughput and robustness expected of log collection frameworks.
 
-The syslog protocol supported streaming logs across the network as long ago as the late 1980s.  However, syslog had serious defects: no clear solution to the discovery, load balancing, or failure handing problems.  The Scribe system apparently solves some of these problems, but unfortunately, no details of scribe have been published.
+The syslog protocol supported streaming logs across the network as long ago as the late 1980s.  However, syslog had serious defects: no clear solution to the discovery, load balancing, or failure handing problems.  Facebook's Scribe \cite{scribe} system apparently solves some of these problems, but unfortunately, no details of Scribe have been published.
 
 Chukwa has some similarity with network monitoring systems such as Nagios, Ganglia, or Tivoli Monitoring \cite{Ganglia, Nagios, tivmon}. The three systems differ in emphasis, but have important commonalities.  All are capable of collecting and storing substantial volumes of metrics data. All include tools for displaying this data.  Nagios and Tivoli monitoring have centralized architectures, while Ganglia is decentralized.  Ganglia, unfortunately, is heavily adapted towards numeric time-series data, and provides minimal support for the sort of complex text-processing necessary for our applications.
 
+Chukwa, however, differs in crucial respects from these current systems. Today's monitoring systems are focused primarily on collection, with storage being a secondary priority.  Chukwa is designed for far higher data rates; metrics data, which is essentially all that Ganglia and Nagios are used to collect, is only a few percent of the data we will capture in operational settings. 
+
+With hundreds of gigabytes of data being collected per day, processing the stored data becomes a key bottleneck.  Chukwa's design was optimized precisely for storage and batch processing of collected data.  While MapReduce is routinely used at these scales, no currently available monitoring system makes provision for large-scale data intensive processing. 
+
+
 \section{Conclusion}
 %\label{sec:conclusion}
 
-Chukwa provides a flexible and powerful toolkit for analyzing displaying collected data.  We expect Chukwa to be very useful in monitoring and managing large system deployments.
+Chukwa demonstrates that a high performance distributed monitoring system can readily be built atop existing distributed data collection frameworks. The Hadoop distributed file system supports petabytes of stored data and hundreds of megabytes per second of write throughput, enough for even very demanding monitoring applications. MapReduce provides a suitable framework for organizing and analyzing these data volumes.
+
+% Chukwa provides flexible and powerful tools for analyzing and displaying collected monitoring data.  We expect Chukwa to be very useful in monitoring and managing large system deployments.
 
 Building Chukwa on top of Hadoop resulted in a few design quirks, and a modest latency penalty.  However, it greatly simplified implementation, and leverages the substantial amount of work going into Hadoop.  Hadoop 0.19, which will be released within a few months, should significantly improve the performance of short-running Map tasks, which will allow us to efficiently operate Chukwa on short timescales.
 
@@ -285,6 +286,10 @@
 \bibitem{xtrace}
 Rodrigo Fonseca, George Porter, Randy H. Katz, Scott Shenker, and Ion Stoica.  X-Trace: A Pervasive Network Tracing Framework.
 \newblock In \textit{4th USENIX Symposium on Networked Systems Design \& Implementation (NSDI'07)}, Cambridge, MA, USA, April 2007.
+\bibitem{scribe}
+Scribe logfile aggregation system described by Facebook's Jeff Hammerbacher
+\url{https://issues.apache.org/jira/browse/HADOOP-2206?focusedCommentId=12542775#action_12542775}
+
 \end{thebibliography}
 
 \end{document}



Mime
View raw message