chukwa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ey...@apache.org
Subject svn commit: r783442 [4/5] - in /hadoop/chukwa/trunk: ./ contrib/chukwa-pig/ ivy/ lib/ src/java/org/apache/hadoop/chukwa/database/ src/java/org/apache/hadoop/chukwa/datacollection/adaptor/ src/java/org/apache/hadoop/chukwa/rest/ src/java/org/apache/hado...
Date Wed, 10 Jun 2009 18:31:26 GMT
Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClientTraceHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClientTraceHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClientTraceHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClientTraceHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.ClientTrace;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class HadoopJvm.
+ * @see org.apahe.hadoop.chukwa.rest.objects.ClientTrace
+ * @author Hibernate Tools
+ */
+public class ClientTraceHome extends RestHome {
+    private static String table="[ClientTrace]";
+    private static final Log log = LogFactory
+	.getLog(ClientTraceHome.class);
+
+    private static ClientTrace createClientTrace(ResultSet rs) {
+	ClientTrace obj=null;
+	try {
+	    obj = new ClientTrace(
+				  rs.getTimestamp("Timestamp"),
+				  rs.getDouble("local_hdfs_read"),
+				  rs.getDouble("intra_rack_hdfs_read"),
+				  rs.getDouble("inter_rack_hdfs_read"),
+				  rs.getDouble("local_hdfs_write"),
+				  rs.getDouble("intra_rack_hdfs_write"),
+				  rs.getDouble("inter_rack_hdfs_write"),
+				  rs.getDouble("local_mapred_shuffle"),
+				  rs.getDouble("intra_rack_mapred_shuffle"),
+				  rs.getDouble("inter_rack_mapred_shuffle")
+				  );
+	} catch (Exception e) {	    
+	}
+	return obj;
+    }
+    
+    public static ClientTrace find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(ClientTraceHome.table,"timestamp",timestamp);
+		log.error(query);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    log.error("find it.");
+		    ClientTrace obj = createClientTrace(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    public static Collection<ClientTrace> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<ClientTrace> collection = new Vector<ClientTrace>();
+
+	try {
+	    String query = getTimeBetweenQuery(ClientTraceHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		ClientTrace obj = createClientTrace(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterDiskHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterDiskHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterDiskHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterDiskHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.ClusterDisk;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class ClusterDisk.
+ * @see org.apahe.hadoop.chukwa.rest.objects.ClusterDisk
+ * @author Hibernate Tools
+ */
+public class ClusterDiskHome extends RestHome {
+
+    private static String table="[cluster_disk]";
+    private static final Log log = LogFactory
+	.getLog(ClusterDiskHome.class);
+
+    /*
+     * convert from a result set record to an object
+     */
+    private static ClusterDisk createClusterDisk(ResultSet rs) {
+	ClusterDisk obj = null;
+	try {
+	    obj = new ClusterDisk(
+				  rs.getTimestamp("timestamp"),
+				  rs.getString("mount"),
+				  rs.getDouble("used"),
+				  rs.getDouble("available"),
+				  rs.getDouble("used_percent")
+				  );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    /*
+     * find by timestamp
+     */
+    public static ClusterDisk find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(ClusterDiskHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    ClusterDisk obj = createClusterDisk(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find by key 
+     */
+    public static ClusterDisk find(String timestamp, String mount) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		Map<String, String> criteriaMap = new HashMap<String,String>();
+		criteriaMap.put("timestamp",convertLongToDateString(Long.parseLong(timestamp)));
+		criteriaMap.put("mount",mount);
+
+		String query = getCriteriaQuery(ClusterDiskHome.table,criteriaMap);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    ClusterDisk obj = createClusterDisk(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find within the start time and end time
+     */
+    public static Collection<ClusterDisk> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<ClusterDisk> collection = new Vector<ClusterDisk>();
+
+	try {
+	    String query = getTimeBetweenQuery(ClusterDiskHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		ClusterDisk obj = createClusterDisk(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterHadoopRpcHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterHadoopRpcHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterHadoopRpcHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterHadoopRpcHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.ClusterHadoopRpc;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class ClusterHadoopRpc.
+ * @see org.apahe.hadoop.chukwa.rest.objects.ClusterHadoopRpc
+ * @author Hibernate Tools
+ */
+public class ClusterHadoopRpcHome extends RestHome {
+    private static String table="[cluster_hadoop_rpc]";
+    private static final Log log = LogFactory
+	.getLog(ClusterHadoopRpcHome.class);
+
+    /*
+     * convert from a result set record to an object
+     */
+    private static ClusterHadoopRpc createClusterHadoopRpc(ResultSet rs) {
+	ClusterHadoopRpc obj = null;
+	try {
+	    obj = new ClusterHadoopRpc(
+						    rs.getTimestamp("timestamp"),
+						    rs.getInt("host"),
+						    rs.getDouble("rpc_processing_time_avg_time"),
+						    rs.getDouble("rpc_processing_time_num_ops"),
+						    rs.getDouble("rpc_queue_time_avg_time"),
+						    rs.getDouble("rpc_queue_time_num_ops"),
+						    rs.getDouble("get_build_version_avg_time"),
+						    rs.getDouble("get_build_version_num_ops"),
+						    rs.getDouble("get_job_counters_avg_time"),
+						    rs.getDouble("get_job_counters_num_ops"),
+						    rs.getDouble("get_job_profile_avg_time"),
+						    rs.getDouble("get_job_profile_num_ops"),
+						    rs.getDouble("get_job_status_avg_time"),
+						    rs.getDouble("get_job_status_num_ops"),
+						    rs.getDouble("get_new_job_id_avg_time"),
+						    rs.getDouble("get_new_job_id_num_ops"),
+						    rs.getDouble("get_protocol_version_avg_time"),
+						    rs.getDouble("get_protocol_version_num_ops"),
+						    rs.getDouble("get_system_dir_avg_time"),
+						    rs.getDouble("get_system_dir_num_ops"),
+						    rs.getDouble("get_task_completion_events_avg_time"),
+						    rs.getDouble("get_task_completion_events_num_ops"),
+						    rs.getDouble("get_task_diagnostics_avg_time"),
+						    rs.getDouble("get_task_diagnostics_num_ops"),
+						    rs.getDouble("heartbeat_avg_time"),
+						    rs.getDouble("heartbeat_num_ops"),
+						    rs.getDouble("killJob_avg_time"),
+						    rs.getDouble("killJob_num_ops"),
+						    rs.getDouble("submit_job_avg_time"),
+						    rs.getDouble("submit_job_num_ops")
+						    );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    /*
+     * find by timestamp
+     */
+    public static ClusterHadoopRpc find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(ClusterHadoopRpcHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    ClusterHadoopRpc obj = createClusterHadoopRpc(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find by key 
+     */
+    public static ClusterHadoopRpc find(String timestamp, String host) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		Map<String, String> criteriaMap = new HashMap<String,String>();
+		criteriaMap.put("timestamp",convertLongToDateString(Long.parseLong(timestamp)));
+		criteriaMap.put("host",host);
+
+		String query = getCriteriaQuery(ClusterHadoopRpcHome.table,criteriaMap);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    ClusterHadoopRpc obj = createClusterHadoopRpc(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find within the start time and end time
+     */
+    public static Collection<ClusterHadoopRpc> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<ClusterHadoopRpc> collection = new Vector<ClusterHadoopRpc>();
+
+	try {
+	    String query = getTimeBetweenQuery(ClusterHadoopRpcHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		ClusterHadoopRpc obj = createClusterHadoopRpc(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterSystemMetricsHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterSystemMetricsHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterSystemMetricsHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/ClusterSystemMetricsHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.ClusterSystemMetrics;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class HadoopJvm.
+ * @see org.apahe.hadoop.chukwa.rest.objects.ClusterSystemMetrics
+ * @author Hibernate Tools
+ */
+public class ClusterSystemMetricsHome extends RestHome {
+    private static String table="[cluster_system_metrics]";
+    private static final Log log = LogFactory
+	.getLog(ClusterSystemMetricsHome.class);
+
+    private static ClusterSystemMetrics createClusterSystemMetrics(ResultSet rs) {
+	ClusterSystemMetrics obj=null;
+	try {
+	    obj= new ClusterSystemMetrics(
+					  rs.getTimestamp("Timestamp"),
+					  rs.getInt("host"),
+					  rs.getDouble("load_15"),
+					  rs.getDouble("load_5"),
+					  rs.getDouble("load_1"),
+					  rs.getDouble("task_total"),
+					  rs.getDouble("task_running"),
+					  rs.getDouble("task_sleep"),
+					  rs.getDouble("task_stopped"),
+					  rs.getDouble("task_zombie"),
+					  rs.getDouble("mem_total"),
+					  rs.getDouble("mem_buffers"),
+					  rs.getDouble("mem_cached"),
+					  rs.getDouble("mem_used"),
+					  rs.getDouble("mem_free"),
+					  rs.getDouble("eth0_rxerrs"),
+					  rs.getDouble("eth0_rxbyts"),
+					  rs.getDouble("eth0_rxpcks"),
+					  rs.getDouble("eth0_rxdrops"),
+					  rs.getDouble("eth0_txerrs"),
+					  rs.getDouble("eth0_txbyts"),
+					  rs.getDouble("eth0_txpcks"),
+					  rs.getDouble("eth0_txdrops"),
+					  rs.getDouble("eth1_rxerrs"),
+					  rs.getDouble("eth1_rxbyts"),
+					  rs.getDouble("eth1_rxpcks"),
+					  rs.getDouble("eth1_rxdrops"),
+					  rs.getDouble("eth1_txerrs"),
+					  rs.getDouble("eth1_txbyts"),
+					  rs.getDouble("eth1_txpcks"),
+					  rs.getDouble("eth1_txdrops"),
+					  rs.getDouble("sda_rkbs"),
+					  rs.getDouble("sda_wkbs"),
+					  rs.getDouble("sdb_rkbs"),
+					  rs.getDouble("sdb_wkbs"),
+					  rs.getDouble("sdc_rkbs"),
+					  rs.getDouble("sdc_wkbs"),
+					  rs.getDouble("sdd_rkbs"),
+					  rs.getDouble("sdd_wkbs"),
+					  rs.getFloat("cpu_idle_pcnt"),
+					  rs.getFloat("cpu_nice_pcnt"),
+					  rs.getFloat("cpu_system_pcnt"),
+					  rs.getFloat("cpu_user_pcnt"),
+					  rs.getFloat("cpu_hirq_pcnt"),
+					  rs.getFloat("cpu_sirq_pcnt"),
+					  rs.getFloat("iowait_pcnt"),
+					  rs.getFloat("mem_buffers_pcnt"),
+					  rs.getFloat("mem_used_pcnt"),
+					  rs.getFloat("eth0_busy_pcnt"),
+					  rs.getFloat("eth1_busy_pcnt"),
+					  rs.getFloat("sda_busy_pcnt"),
+					  rs.getFloat("sdb_busy_pcnt"),
+					  rs.getFloat("sdc_busy_pcnt"),
+					  rs.getFloat("sdd_busy_pcnt"),
+					  rs.getFloat("swap_used_pcnt")
+					  );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    public static ClusterSystemMetrics find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(ClusterSystemMetricsHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    ClusterSystemMetrics obj = createClusterSystemMetrics(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    public static Collection<ClusterSystemMetrics> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<ClusterSystemMetrics> collection = new Vector<ClusterSystemMetrics>();
+
+	try {
+	    String query = getTimeBetweenQuery(ClusterSystemMetricsHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		ClusterSystemMetrics obj = createClusterSystemMetrics(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsDataNodeHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsDataNodeHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsDataNodeHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsDataNodeHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.DfsDataNode;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class DfsDataNode.
+ * @see org.apahe.hadoop.chukwa.rest.objects.DfsDataNode
+ * @author Hibernate Tools
+ */
+public class DfsDataNodeHome extends RestHome {
+    private static String table="[dfs_datanode]";
+    private static final Log log = LogFactory
+	.getLog(DfsDataNodeHome.class);
+
+    /*
+     * convert from a result set record to an object
+     */
+    private static DfsDataNode createDfsDataNode(ResultSet rs) {
+	DfsDataNode obj = null;
+	try {
+	    obj = new DfsDataNode(
+					  rs.getTimestamp("timestamp"),
+					  rs.getString("host"),
+					  rs.getDouble("block_reports_avg_time"),
+					  rs.getDouble("block_reports_num_ops"),
+					  rs.getDouble("block_verification_failures"),
+					  rs.getDouble("blocks_read"),
+					  rs.getDouble("blocks_removed"),
+					  rs.getDouble("blocks_replicated"),
+					  rs.getDouble("blocks_verified"),
+					  rs.getDouble("blocks_written"),
+					  rs.getDouble("bytes_read"),
+					  rs.getDouble("bytes_written"),
+					  rs.getDouble("copy_block_op_avg_time"),
+					  rs.getDouble("copy_block_op_num_ops"),
+					  rs.getDouble("heart_beats_avg_time"),
+					  rs.getDouble("heart_beats_num_ops"),
+					  rs.getDouble("read_block_op_avg_time"),
+					  rs.getDouble("read_block_op_num_ops"),
+					  rs.getDouble("read_metadata_op_avg_time"),
+					  rs.getDouble("read_metadata_op_num_ops"),
+					  rs.getDouble("reads_from_local_client"),
+					  rs.getDouble("reads_from_remote_client"),
+					  rs.getDouble("replace_block_op_avg_time"),
+					  rs.getDouble("replace_block_op_num_ops"),
+					  rs.getDouble("session_id"),
+					  rs.getDouble("write_block_op_avg_time"),
+					  rs.getDouble("write_block_op_num_ops"),
+					  rs.getDouble("writes_from_local_client"),
+					  rs.getDouble("writes_from_remote_client")
+					  );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    /*
+     * find by timestamp
+     */
+    public static DfsDataNode find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(DfsDataNodeHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    DfsDataNode obj = createDfsDataNode(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find by key 
+     */
+    public static DfsDataNode find(String timestamp, String host) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		Map<String, String> criteriaMap = new HashMap<String,String>();
+		criteriaMap.put("timestamp",convertLongToDateString(Long.parseLong(timestamp)));
+		criteriaMap.put("host",host);
+
+		String query = getCriteriaQuery(DfsDataNodeHome.table,criteriaMap);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    DfsDataNode obj = createDfsDataNode(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find within the start time and end time
+     */
+    public static Collection<DfsDataNode> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<DfsDataNode> collection = new Vector<DfsDataNode>();
+
+	try {
+	    String query = getTimeBetweenQuery(DfsDataNodeHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		DfsDataNode obj = createDfsDataNode(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsFsNameSystemHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsFsNameSystemHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsFsNameSystemHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsFsNameSystemHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.DfsFsNameSystem;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class DfsFsNameSystem.
+ * @see org.apahe.hadoop.chukwa.rest.objects.DfsFsNameSystem
+ * @author Hibernate Tools
+ */
+public class DfsFsNameSystemHome extends RestHome {
+    private static String table="[dfs_fsnamesystem]";
+    private static final Log log = LogFactory
+	.getLog(DfsFsNameSystemHome.class);
+
+    /*
+     * convert from a result set record to an object
+     */
+    private static DfsFsNameSystem createDfsFsNameSystem(ResultSet rs) {
+	DfsFsNameSystem obj = null;
+	try {
+	    obj = new DfsFsNameSystem(
+						  rs.getTimestamp("timestamp"),
+						  rs.getString("host"),
+						  rs.getDouble("blocks_total"),
+						  rs.getDouble("capacity_remaining_gb"),
+						  rs.getDouble("capacity_total_gb"),
+						  rs.getDouble("capacity_used_gb"),
+						  rs.getDouble("files_total"),
+						  rs.getDouble("pending_replication_blocks"),
+						  rs.getDouble("scheduled_replication_blocks"),
+						  rs.getDouble("total_load"),
+						  rs.getDouble("under_replicated_blocks")
+						  );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    /*
+     * find by timestamp
+     */
+    public static DfsFsNameSystem find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(DfsFsNameSystemHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    DfsFsNameSystem obj = createDfsFsNameSystem(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find by key 
+     */
+    public static DfsFsNameSystem find(String timestamp, String host) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		Map<String, String> criteriaMap = new HashMap<String,String>();
+		criteriaMap.put("timestamp",convertLongToDateString(Long.parseLong(timestamp)));
+		criteriaMap.put("host",host);
+
+		String query = getCriteriaQuery(DfsFsNameSystemHome.table,criteriaMap);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    DfsFsNameSystem obj = createDfsFsNameSystem(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find within the start time and end time
+     */
+    public static Collection<DfsFsNameSystem> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<DfsFsNameSystem> collection = new Vector<DfsFsNameSystem>();
+
+	try {
+	    String query = getTimeBetweenQuery(DfsFsNameSystemHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		DfsFsNameSystem obj = createDfsFsNameSystem(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsNameNodeHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsNameNodeHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsNameNodeHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsNameNodeHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.DfsNameNode;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class DfsNameNode.
+ * @see org.apahe.hadoop.chukwa.rest.objects.DfsNameNode
+ * @author Hibernate Tools
+ */
+public class DfsNameNodeHome extends RestHome {
+    private static String table="[dfs_namenode]";
+    private static final Log log = LogFactory
+	.getLog(DfsNameNodeHome.class);
+
+    /*
+     * convert from a result set record to an object
+     */
+    private static DfsNameNode createDfsNameNode(ResultSet rs) {
+	DfsNameNode obj = null;
+	try {
+	    obj = new DfsNameNode(
+					  rs.getTimestamp("timestamp"),
+					  rs.getString("host"),
+					  rs.getDouble("add_block_ops"),
+					  rs.getDouble("blocks_corrupted"),
+					  rs.getDouble("create_file_ops"),
+					  rs.getDouble("delete_file_ops"),
+					  rs.getDouble("files_created"),
+					  rs.getDouble("files_renamed"),
+					  rs.getDouble("files_deleted"),
+					  rs.getDouble("get_block_locations"),
+					  rs.getDouble("get_listing_ops"),
+					  rs.getDouble("safe_mode_time"),
+					  rs.getDouble("syncs_avg_time"),
+					  rs.getDouble("syncs_num_ops"),
+					  rs.getDouble("transactions_avg_time"),
+					  rs.getDouble("transactions_num_ops"),
+					  rs.getDouble("block_report_avg_time"),
+					  rs.getDouble("block_report_num_ops"),
+					  rs.getDouble("fs_image_load_time")
+					  );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    /*
+     * find by timestamp
+     */
+    public static DfsNameNode find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(DfsNameNodeHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    DfsNameNode obj = createDfsNameNode(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find by key 
+     */
+    public static DfsNameNode find(String timestamp, String host) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		Map<String, String> criteriaMap = new HashMap<String,String>();
+		criteriaMap.put("timestamp",convertLongToDateString(Long.parseLong(timestamp)));
+		criteriaMap.put("host",host);
+
+		String query = getCriteriaQuery(DfsNameNodeHome.table,criteriaMap);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    DfsNameNode obj = createDfsNameNode(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find within the start time and end time
+     */
+    public static Collection<DfsNameNode> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<DfsNameNode> collection = new Vector<DfsNameNode>();
+
+	try {
+	    String query = getTimeBetweenQuery(DfsNameNodeHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		DfsNameNode obj = createDfsNameNode(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsThroughputHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsThroughputHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsThroughputHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DfsThroughputHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.DfsThroughput;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class DfsThroughput.
+ * @see org.apahe.hadoop.chukwa.rest.objects.DfsThroughput
+ * @author Hibernate Tools
+ */
+public class DfsThroughputHome extends RestHome {
+    private static String table="[dfs_throughput]";
+    private static final Log log = LogFactory
+	.getLog(DfsThroughputHome.class);
+
+    /*
+     * convert from a result set record to an object
+     */
+    private static DfsThroughput createDfsThroughput(ResultSet rs) {
+	DfsThroughput obj = null;
+	try {
+	    obj = new DfsThroughput(
+					      rs.getTimestamp("timestamp"),
+					      rs.getInt("host"),
+					      rs.getDouble("block_reports_avg_time"),
+					      rs.getDouble("block_reports_num_ops"),
+					      rs.getDouble("block_verification_failures"),
+					      rs.getDouble("blocks_read"),
+					      rs.getDouble("blocks_removed"),
+					      rs.getDouble("blocks_replicated"),
+					      rs.getDouble("blocks_verified"),
+					      rs.getDouble("blocks_written"),
+					      rs.getDouble("bytes_read"),
+					      rs.getDouble("bytes_written"),
+					      rs.getDouble("copy_block_op_avg_time"),
+					      rs.getDouble("copy_block_op_num_ops"),
+					      rs.getDouble("heart_beats_avg_time"),
+					      rs.getDouble("heart_beats_num_ops"),
+					      rs.getDouble("read_block_op_avg_time"),
+					      rs.getDouble("read_block_op_num_ops"),
+					      rs.getDouble("read_metadata_op_avg_time"),
+					      rs.getDouble("read_metadata_op_num_ops"),
+					      rs.getDouble("reads_from_local_client"),
+					      rs.getDouble("reads_from_remote_client"),
+					      rs.getDouble("replace_block_op_avg_time"),
+					      rs.getDouble("replace_block_op_num_ops"),
+					      rs.getDouble("session_id"),
+					      rs.getDouble("write_block_op_avg_time"),
+					      rs.getDouble("write_block_op_num_ops"),
+					      rs.getDouble("writes_from_local_client"),
+					      rs.getDouble("writes_from_remote_client")
+					      );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    /*
+     * find by timestamp
+     */
+    public static DfsThroughput find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(DfsThroughputHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    DfsThroughput obj = createDfsThroughput(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find by key 
+     */
+    public static DfsThroughput find(String timestamp, String host) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		Map<String, String> criteriaMap = new HashMap<String,String>();
+		criteriaMap.put("timestamp",convertLongToDateString(Long.parseLong(timestamp)));
+		criteriaMap.put("host",host);
+
+		String query = getCriteriaQuery(DfsThroughputHome.table,criteriaMap);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    DfsThroughput obj = createDfsThroughput(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find within the start time and end time
+     */
+    public static Collection<DfsThroughput> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<DfsThroughput> collection = new Vector<DfsThroughput>();
+
+	try {
+	    String query = getTimeBetweenQuery(DfsThroughputHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		DfsThroughput obj = createDfsThroughput(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DiskHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DiskHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DiskHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/DiskHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.Disk;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class Disk.
+ * @see org.apahe.hadoop.chukwa.rest.objects.Disk
+ * @author Hibernate Tools
+ */
+public class DiskHome extends RestHome {
+    private static String table="[disk]";
+    private static final Log log = LogFactory
+	.getLog(DiskHome.class);
+
+    /*
+     * convert from a result set record to an object
+     */
+    private static Disk createDisk(ResultSet rs) {
+	Disk obj = null;
+	try {
+	    obj = new Disk(
+			    rs.getTimestamp("timestamp"),
+			    rs.getString("host"),
+			    rs.getString("mount"),
+			    rs.getDouble("used"),
+			    rs.getDouble("available"),
+			    rs.getDouble("used_percent"),
+			    rs.getString("fs")
+			    );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    /*
+     * find by timestamp
+     */
+    public static Disk find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(DiskHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    Disk obj = createDisk(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find by key 
+     */
+    public static Disk find(String timestamp, String host, String mount) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		Map<String, String> criteriaMap = new HashMap<String,String>();
+		criteriaMap.put("timestamp",convertLongToDateString(Long.parseLong(timestamp)));
+		criteriaMap.put("host",host);
+		criteriaMap.put("mount",mount);
+
+		String query = getCriteriaQuery(DiskHome.table,criteriaMap);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    Disk obj = createDisk(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find within the start time and end time
+     */
+    public static Collection<Disk> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<Disk> collection = new Vector<Disk>();
+
+	try {
+	    String query = getTimeBetweenQuery(DiskHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		Disk obj = createDisk(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/HadoopJvmHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/HadoopJvmHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/HadoopJvmHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/HadoopJvmHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.HadoopJvm;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class HadoopJvm.
+ * @see org.apahe.hadoop.chukwa.rest.objects.HadoopJvm
+ * @author Hibernate Tools
+ */
+public class HadoopJvmHome extends RestHome {
+    private static String table="[hadoop_jvm]";
+    private static final Log log = LogFactory
+	.getLog(HadoopJvmHome.class);
+
+    /*
+     * convert from a result set record to an object
+     */
+    private static HadoopJvm createHadoopJvm(ResultSet rs) {
+	HadoopJvm obj = null;
+	try {
+	    obj = new HadoopJvm(
+				      rs.getTimestamp("timestamp"),
+				      rs.getString("host"),
+				      rs.getString("process_name"),
+
+				      rs.getDouble("gc_timemillis"),
+				      rs.getDouble("gc_count"),
+				      rs.getDouble("log_error"),
+				      rs.getDouble("log_fatal"),
+				      rs.getDouble("log_info"),
+				      rs.getDouble("log_warn"),
+				      rs.getDouble("mem_heap_committed_m"),
+				      rs.getDouble("mem_heap_used_m"),
+				      rs.getDouble("mem_non_heap_committed_m"),
+				      rs.getDouble("mem_non_heap_used_m"),
+				      rs.getDouble("threads_blocked"),
+				      rs.getDouble("threads_new"),
+				      rs.getDouble("threads_runnable"),
+				      rs.getDouble("threads_terminated"),
+				      rs.getDouble("threads_timed_waiting"),
+				      rs.getDouble("threads_waiting")
+				      );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    /*
+     * find by timestamp
+     */
+    public static HadoopJvm find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(HadoopJvmHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    HadoopJvm obj = createHadoopJvm(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find by key 
+     */
+    public static HadoopJvm find(String timestamp, String host, String process_name) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		Map<String, String> criteriaMap = new HashMap<String,String>();
+		criteriaMap.put("timestamp",convertLongToDateString(Long.parseLong(timestamp)));
+		criteriaMap.put("host",host);
+		criteriaMap.put("process_name",process_name);
+
+		String query = getCriteriaQuery(HadoopJvmHome.table,criteriaMap);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    HadoopJvm obj = createHadoopJvm(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find within the start time and end time
+     */
+    public static Collection<HadoopJvm> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<HadoopJvm> collection = new Vector<HadoopJvm>();
+
+	try {
+	    String query = getTimeBetweenQuery(HadoopJvmHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		HadoopJvm obj = createHadoopJvm(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/HadoopRpcHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/HadoopRpcHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/HadoopRpcHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/HadoopRpcHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.HadoopRpc;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class HadoopRpc.
+ * @see org.apahe.hadoop.chukwa.rest.objects.HadoopRpc
+ * @author Hibernate Tools
+ */
+public class HadoopRpcHome extends RestHome {
+    private static String table="[hadoop_rpc]";
+    private static final Log log = LogFactory
+	.getLog(HadoopRpcHome.class);
+
+    /*
+     * convert from a result set record to an object
+     */
+    private static HadoopRpc createHadoopRpc(ResultSet rs) {
+	HadoopRpc obj = null;
+	try {
+	    obj = new HadoopRpc(
+				      rs.getTimestamp("timestamp"),
+				      rs.getString("host"),
+				      rs.getDouble("rpc_processing_time_avg_time"),
+				      rs.getDouble("rpc_processing_time_num_ops"),
+				      rs.getDouble("rpc_queue_time_avg_time"),
+				      rs.getDouble("rpc_queue_time_num_ops"),
+				      rs.getDouble("get_build_version_avg_time"),
+				      rs.getDouble("get_build_version_num_ops"),
+				      rs.getDouble("get_job_counters_avg_time"),
+				      rs.getDouble("get_job_counters_num_ops"),
+				      rs.getDouble("get_job_profile_avg_time"),
+				      rs.getDouble("get_job_profile_num_ops"),
+				      rs.getDouble("get_job_status_avg_time"),
+				      rs.getDouble("get_job_status_num_ops"),
+				      rs.getDouble("get_new_job_id_avg_time"),
+				      rs.getDouble("get_new_job_id_num_ops"),
+				      rs.getDouble("get_protocol_version_avg_time"),
+				      rs.getDouble("get_protocol_version_num_ops"),
+				      rs.getDouble("get_system_dir_avg_time"),
+				      rs.getDouble("get_system_dir_num_ops"),
+				      rs.getDouble("get_task_completion_events_avg_time"),
+				      rs.getDouble("get_task_completion_events_num_ops"),
+				      rs.getDouble("get_task_diagnostics_avg_time"),
+				      rs.getDouble("get_task_diagnostics_num_ops"),
+				      rs.getDouble("heartbeat_avg_time"),
+				      rs.getDouble("heartbeat_num_ops"),
+				      rs.getDouble("killJob_avg_time"),
+				      rs.getDouble("killJob_num_ops"),
+				      rs.getDouble("submit_job_avg_time"),
+				      rs.getDouble("submit_job_num_ops")
+				      );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    /*
+     * find by timestamp
+     */
+    public static HadoopRpc find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(HadoopRpcHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    HadoopRpc obj = createHadoopRpc(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find by key 
+     */
+    public static HadoopRpc find(String timestamp, String host) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		Map<String, String> criteriaMap = new HashMap<String,String>();
+		criteriaMap.put("timestamp",convertLongToDateString(Long.parseLong(timestamp)));
+		criteriaMap.put("host",host);
+
+		String query = getCriteriaQuery(HadoopRpcHome.table,criteriaMap);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    HadoopRpc obj = createHadoopRpc(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find within the start time and end time
+     */
+    public static Collection<HadoopRpc> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<HadoopRpc> collection = new Vector<HadoopRpc>();
+
+	try {
+	    String query = getTimeBetweenQuery(HadoopRpcHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		HadoopRpc obj = createHadoopRpc(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrJobConfHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrJobConfHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrJobConfHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrJobConfHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.MrJobConf;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class HadoopJvm.
+ * @see org.apahe.hadoop.chukwa.rest.objects.MrJobConf
+ * @author Hibernate Tools
+ */
+public class MrJobConfHome extends RestHome {
+    private static String table="[mr_job_conf]";
+    private static final Log log = LogFactory
+	.getLog(MrJobConfHome.class);
+
+    private static MrJobConf createMrJobConf(ResultSet rs) {
+	MrJobConf obj = null;
+	try {
+	    obj = new MrJobConf(
+				      rs.getString("job_id"),
+				      rs.getTimestamp("ts"),
+				      rs.getString("mr_output_key_cls"),
+				      rs.getString("mr_runner_cls"),
+				      rs.getString("mr_output_value_cls"),
+				      rs.getString("mr_input_fmt_cls"),
+				      rs.getString("mr_output_fmt_cls"),
+				      rs.getString("mr_reducer_cls"),
+				      rs.getString("mr_mapper_cls")
+				      );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    public static MrJobConf find(String job_id) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+	
+	if (job_id != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(MrJobConfHome.table,"job_id",job_id);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    MrJobConf obj = createMrJobConf(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrJobHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrJobHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrJobHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrJobHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.MrJob;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class HadoopJvm.
+ * @see org.apahe.hadoop.chukwa.rest.objects.MrJob
+ * @author Hibernate Tools
+ */
+public class MrJobHome extends RestHome {
+    private static String table="[mr_job]";
+    private static final Log log = LogFactory
+	.getLog(MrJobHome.class);
+
+    private static MrJob createMrJob(ResultSet rs) {
+	MrJob obj = null;
+	try {
+	    obj = new MrJob(
+			      rs.getString("job_id"),
+			      rs.getString("user"),
+			      rs.getString("queue"),
+			      rs.getString("status"),
+			      rs.getTimestamp("submit_time"),
+			      rs.getTimestamp("launch_time"),
+			      rs.getTimestamp("finish_time"),
+			      rs.getLong("hdfs_bytes_read"),
+			      rs.getLong("hdfs_bytes_written"),
+			      rs.getLong("local_bytes_read"),
+			      rs.getLong("local_bytes_written"),
+			      rs.getLong("launched_map_tasks"),
+			      rs.getLong("launched_reduce_tasks"),
+			      rs.getLong("data_local_map_tasks"),
+			      rs.getLong("data_local_reduce_tasks"),
+			      rs.getLong("map_input_bytes"),
+			      rs.getLong("map_output_bytes"),
+			      rs.getLong("map_input_records"),
+			      rs.getLong("map_output_records"),
+			      rs.getLong("combine_input_records"),
+			      rs.getLong("combine_output_records"),
+			      rs.getLong("spilled_records"),
+			      rs.getLong("reduce_input_groups"),
+			      rs.getLong("reduce_output_groups"),
+			      rs.getLong("reduce_input_records"),
+			      rs.getLong("reduce_output_records"),
+			      rs.getString("jobconf"),
+			      rs.getLong("finished_maps"),
+			      rs.getLong("finished_reduces"),
+			      rs.getLong("failed_maps"),
+			      rs.getLong("failed_reduces"),
+			      rs.getLong("total_maps"),
+			      rs.getLong("total_reduces"),
+			      rs.getLong("reduce_shuffle_bytes")
+			      );
+	} catch (Exception e) {
+	    System.err.println("error:"+e.toString());
+	}
+	return obj;
+    }
+    
+    public static MrJob find(String job_id) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (job_id != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(MrJobHome.table,"job_id",job_id);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    MrJob obj = createMrJob(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrTaskHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrTaskHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrTaskHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/MrTaskHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.MrTask;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class HadoopJvm.
+ * @see org.apahe.hadoop.chukwa.rest.objects.MrTask
+ * @author Hibernate Tools
+ */
+public class MrTaskHome extends RestHome {
+    private static String table="[mr_task]";
+    private static final Log log = LogFactory
+	.getLog(MrTaskHome.class);
+
+    private static MrTask createMrTask(ResultSet rs) {
+	MrTask obj = null;
+	try {
+	    obj = new MrTask(
+
+				rs.getString("task_id"),
+				rs.getString("job_id"),
+				rs.getTimestamp("start_time"),
+				rs.getTimestamp("finish_time"),
+				rs.getString("status"),
+				rs.getByte("attempts"),
+				rs.getLong("hdfs_bytes_read"),
+				rs.getLong("hdfs_bytes_written"),
+				rs.getLong("local_bytes_read"),
+				rs.getLong("local_bytes_written"),
+				rs.getLong("map_input_bytes"),
+				rs.getLong("map_output_bytes"),
+				rs.getLong("map_input_records"),
+				rs.getLong("map_output_records"),
+				rs.getLong("combine_input_records"),
+				rs.getLong("combine_output_records"),
+				rs.getLong("spilled_records"),
+				rs.getLong("reduce_input_groups"),
+				rs.getLong("reduce_output_groups"),
+				rs.getLong("reduce_input_records"),
+				rs.getLong("reduce_output_records"),
+				rs.getLong("reduce_input_bytes"),
+				rs.getLong("reduce_output_bytes"),
+				rs.getString("type"),
+				rs.getLong("reduce_shuffle_bytes"),
+				rs.getString("hostname"),
+				rs.getTimestamp("shuffle_finished"),
+				rs.getTimestamp("sort_finished"),
+				rs.getLong("spilts")
+			      );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    public static MrTask find(String task_id) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (task_id != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(MrTaskHome.table,"task_id",task_id);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    MrTask obj = createMrTask(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/RestHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/RestHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/RestHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/RestHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.text.SimpleDateFormat;
+import org.apache.hadoop.chukwa.hicc.ClusterConfig;
+import org.apache.commons.logging.*;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+public class RestHome  {
+    private static final String DATE_FORMAT_NOW = "yyyy-MM-dd HH:mm:ss";
+    protected static Log log = LogFactory.getLog(RestHome.class);
+
+    public static String getCluster() {
+	String cluster=System.getProperty("CLUSTER", "");
+	if (cluster.compareTo("")==0)  {
+	    ClusterConfig cc=new ClusterConfig();
+	    Iterator<String> keys = cc.getClusters();
+	    if (keys.hasNext()) {
+		cluster=keys.next();
+	    }
+	    System.setProperty("CLUSTER",cluster);
+	}
+
+        if (cluster == null) {
+	    cluster = "demo";
+	}
+
+	return cluster;
+    }
+
+    public static String convertLongToDateString(long date) {
+	date=date;
+	SimpleDateFormat sdf = new SimpleDateFormat(DATE_FORMAT_NOW);
+	log.error("date:"+sdf.format(date));
+	return sdf.format(date);	
+    }
+
+    public static long convertDateLongStringToLong(String d) {
+	Long l=Long.parseLong(d);
+	return l;
+    }
+
+    public static String getSingleQuery(String table, String field, String timestamp) {
+	Calendar now = Calendar.getInstance();
+
+	// default time
+	long startTime = now.getTimeInMillis();
+	long endTime = now.getTimeInMillis();
+
+	if (field.compareTo("timestamp")==0) {
+	    startTime = Long.parseLong(timestamp);
+	    endTime = Long.parseLong(timestamp);
+	}
+
+	Macro mp = new Macro(startTime, endTime, table, null);
+	String query = mp.toString();
+	StringBuilder queryBuilder = new StringBuilder();
+	queryBuilder.append("select * from ");
+	queryBuilder.append(query);
+	if (field.compareTo("timestamp")==0) {
+	    queryBuilder.append(" where "+field+" = \""+convertLongToDateString(Long.parseLong(timestamp))+"\"");
+	} else {
+	    queryBuilder.append(" where "+field+" = \""+timestamp+"\"");
+	}
+	query = queryBuilder.toString();
+	log.error("query:"+query);
+	return query.toString();
+    }
+
+    public static String getTimeBetweenQuery(String table, String starttime, String endtime) {
+	long startTime = convertDateLongStringToLong(starttime)*1000;
+	long endTime = convertDateLongStringToLong(endtime)*1000;
+
+	StringBuilder queryBuilder = new StringBuilder();
+	queryBuilder.append("select * from ");
+	queryBuilder.append(table);
+	queryBuilder.append(" where timestamp between \"[start]\" and \"[end]\"");
+	String query = queryBuilder.toString();
+	Macro mp = new Macro(startTime, endTime, query, null);
+	query = mp.toString();
+	log.error("query:"+query);
+	return query;
+    }
+
+    public static String getCriteriaQuery(String table, Map<String, String> criteria) {
+	Calendar now = Calendar.getInstance();
+
+	// default time
+	long startTime = now.getTimeInMillis()-(7*24*60*60*1000);
+	long endTime = now.getTimeInMillis();
+
+	Macro mp = new Macro(startTime, endTime, table, null);
+	String query = mp.toString();
+	StringBuilder queryBuilder = new StringBuilder();
+	queryBuilder.append("select * from ");
+	queryBuilder.append(query);
+	queryBuilder.append(" where ");
+	int count=0;
+	for (Map.Entry<String, String> e : criteria.entrySet()) {
+	    if (count != 0) {
+		queryBuilder.append(" and ");
+	    }
+	    queryBuilder.append(e.getKey()+"=\""+e.getValue()+"\"");
+	    count++;
+	}
+	query = queryBuilder.toString();
+	log.error("query:"+query);
+	return query;
+    }
+
+}

Added: hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/SystemMetricsHome.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/SystemMetricsHome.java?rev=783442&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/SystemMetricsHome.java (added)
+++ hadoop/chukwa/trunk/src/java/org/apache/hadoop/chukwa/rest/services/SystemMetricsHome.java Wed Jun 10 18:31:05 2009
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.chukwa.rest.services;
+
+import java.util.*;
+import java.sql.*;
+import javax.naming.InitialContext;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.database.DatabaseConfig;
+import org.apache.hadoop.chukwa.database.Macro;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+
+import org.apache.hadoop.chukwa.rest.objects.SystemMetrics;
+import org.apache.hadoop.chukwa.rest.services.RestHome;
+
+/**
+ * Home object for domain model class SystemMetrics.
+ * @see org.apahe.hadoop.chukwa.rest.objects.SystemMetrics
+ * @author Hibernate Tools
+ */
+public class SystemMetricsHome extends RestHome {
+    private static String table="[system_metrics]";
+    private static final Log log = LogFactory
+	.getLog(SystemMetricsHome.class);
+
+    /*
+     * convert from a result set record to an object
+     */
+    private static SystemMetrics createSystemMetrics(ResultSet rs) {
+	SystemMetrics obj = null;
+	try {
+	    obj = new SystemMetrics(
+					      rs.getTimestamp("timestamp"),
+					      rs.getString("host"),
+					      
+					      rs.getDouble("load_15"),
+					      rs.getDouble("load_5"),
+					      rs.getDouble("load_1"),
+					      rs.getDouble("task_total"),
+					      rs.getDouble("task_running"),
+					      rs.getDouble("task_sleep"),
+					      rs.getDouble("task_stopped"),
+					      rs.getDouble("task_zombie"),
+					      rs.getDouble("mem_total"),
+					      rs.getDouble("mem_buffers"),
+					      rs.getDouble("mem_cached"),
+					      rs.getDouble("mem_used"),
+					      rs.getDouble("mem_free"),
+					      rs.getDouble("eth0_rxerrs"),
+					      rs.getDouble("eth0_rxbyts"),
+					      rs.getDouble("eth0_rxpcks"),
+					      rs.getDouble("eth0_rxdrops"),
+					      rs.getDouble("eth0_txerrs"),
+					      rs.getDouble("eth0_txbyts"),
+					      rs.getDouble("eth0_txpcks"),
+					      rs.getDouble("eth0_txdrops"),
+					      rs.getDouble("eth1_rxerrs"),
+					      rs.getDouble("eth1_rxbyts"),
+					      rs.getDouble("eth1_rxpcks"),
+					      rs.getDouble("eth1_rxdrops"),
+					      rs.getDouble("eth1_txerrs"),
+					      rs.getDouble("eth1_txbyts"),
+					      rs.getDouble("eth1_txpcks"),
+					      rs.getDouble("eth1_txdrops"),
+					      rs.getDouble("sda_rkbs"),
+					      rs.getDouble("sda_wkbs"),
+					      rs.getDouble("sdb_rkbs"),
+					      rs.getDouble("sdb_wkbs"),
+					      rs.getDouble("sdc_rkbs"),
+					      rs.getDouble("sdc_wkbs"),
+					      rs.getDouble("sdd_rkbs"),
+					      rs.getDouble("sdd_wkbs"),
+					      rs.getFloat("cpu_idle_pcnt"),
+					      rs.getFloat("cpu_nice_pcnt"),
+					      rs.getFloat("cpu_system_pcnt"),
+					      rs.getFloat("cpu_user_pcnt"),
+					      rs.getFloat("cpu_hirq_pcnt"),
+					      rs.getFloat("cpu_sirq_pcnt"),
+					      rs.getFloat("iowait_pcnt"),
+					      rs.getFloat("mem_buffers_pcnt"),
+					      rs.getFloat("mem_used_pcnt"),
+					      rs.getFloat("eth0_busy_pcnt"),
+					      rs.getFloat("eth1_busy_pcnt"),
+					      rs.getFloat("sda_busy_pcnt"),
+					      rs.getFloat("sdb_busy_pcnt"),
+					      rs.getFloat("sdc_busy_pcnt"),
+					      rs.getFloat("sdd_busy_pcnt"),
+					      rs.getFloat("swap_used_pcnt")
+					      );
+	} catch (Exception e) {
+	}
+	return obj;
+    }
+    
+    /*
+     * find by timestamp
+     */
+    public static SystemMetrics find(String timestamp) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		String query = getSingleQuery(SystemMetricsHome.table,"timestamp",timestamp);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    SystemMetrics obj = createSystemMetrics(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find by key 
+     */
+    public static SystemMetrics find(String timestamp, String host) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	if (timestamp != null) {
+	    // get simple value
+            try {
+		Map<String, String> criteriaMap = new HashMap<String,String>();
+		criteriaMap.put("timestamp",convertLongToDateString(Long.parseLong(timestamp)));
+		criteriaMap.put("host",host);
+
+		String query = getCriteriaQuery(SystemMetricsHome.table,criteriaMap);
+	    	ResultSet rs = dbw.query(query);
+	    	if (rs.next()) {
+		    SystemMetrics obj = createSystemMetrics(rs);
+		    return obj;
+		}
+	    } catch (Exception e) {
+		log.error("exception:"+e.toString());
+	    }
+	} else {
+	    // check start time and end time
+	}
+	return null;
+    }
+
+    /*
+     * find within the start time and end time
+     */
+    public static Collection<SystemMetrics> findBetween(String starttime, String endtime) {
+	String cluster = getCluster();
+	DatabaseWriter dbw = new DatabaseWriter(cluster);
+
+	Collection<SystemMetrics> collection = new Vector<SystemMetrics>();
+
+	try {
+	    String query = getTimeBetweenQuery(SystemMetricsHome.table,starttime,endtime);	    
+	    ResultSet rs = dbw.query(query);
+	    while (rs.next()) {
+		SystemMetrics obj = createSystemMetrics(rs);
+		collection.add(obj);
+	    }
+	} catch (Exception e) {
+	    log.error("exception:"+e.toString());
+	}
+	return collection;
+    }
+}

Modified: hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/database/TestDatabaseAggregator.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/database/TestDatabaseAggregator.java?rev=783442&r1=783441&r2=783442&view=diff
==============================================================================
--- hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/database/TestDatabaseAggregator.java (original)
+++ hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/database/TestDatabaseAggregator.java Wed Jun 10 18:31:05 2009
@@ -34,62 +34,23 @@
 import java.util.ArrayList;
 
 public class TestDatabaseAggregator extends TestCase {
+    public TestDatabaseSetup dbSetup = new TestDatabaseSetup();
 
-  long[] timeWindow = {7, 30, 91, 365, 3650};
-  String[] tables = {"system_metrics","disk","cluster_system_metrics","cluster_disk","mr_job","mr_task","dfs_namenode","dfs_datanode","dfs_fsnamesystem","dfs_throughput","hadoop_jvm","hadoop_mapred","hdfs_usage"};
-  String cluster = "demo";
-  long current = Calendar.getInstance().getTimeInMillis();
-
-  public void setUp() {
-    System.setProperty("CLUSTER","demo");
-    DatabaseWriter db = new DatabaseWriter(cluster);
-    String buffer = "";
-    File aFile = new File(System.getenv("CHUKWA_CONF_DIR")
-                 + File.separator + "database_create_tables.sql");
-    buffer = readFile(aFile);
-    String tables[] = buffer.split(";");
-    for(String table : tables) {
-      if(table.length()>5) {
-        db.execute(table);
-      }
-    }
-    db.close();
-    for(int i=0;i<timeWindow.length;i++) {
-      TableCreator tc = new TableCreator();
-      long start = current;
-      long end = current + (timeWindow[i]*1440*60*1000);
-      tc.createTables(start, end);
+    public void setUp() {
+	dbSetup.setUpDatabase();
     }
-  }
 
-  public void tearDown() {
-    DatabaseWriter db = null;
-    try {
-      db = new DatabaseWriter(cluster);
-      ResultSet rs = db.query("show tables");
-      ArrayList<String> list = new ArrayList<String>();
-      while(rs.next()) {
-        String table = rs.getString(1);
-        list.add(table);
-      }
-      for(String table : list) {
-        db.execute("drop table "+table);
-      }
-    } catch(Throwable ex) {
-    } finally {
-      if(db!=null) {
-        db.close();
-      }
+    public void tearDown() {
+	dbSetup.tearDownDatabase();
     }
-  }
 
   public void verifyTable(String table) {
     ChukwaConfiguration cc = new ChukwaConfiguration();
     String query = "select * from ["+table+"];";
-    Macro mp = new Macro(current,query);
+    Macro mp = new Macro(dbSetup.current,query);
     query = mp.toString();
     try {
-      DatabaseWriter db = new DatabaseWriter(cluster);
+      DatabaseWriter db = new DatabaseWriter(dbSetup.cluster);
       ResultSet rs = db.query(query);
       while(rs.next()) {
         int i = 1;
@@ -101,28 +62,9 @@
     }
   }
 
-  public String readFile(File aFile) {
-    StringBuffer contents = new StringBuffer();
-    try {
-      BufferedReader input = new BufferedReader(new FileReader(aFile));
-      try {
-        String line = null; // not declared within while loop
-        while ((line = input.readLine()) != null) {
-          contents.append(line);
-          contents.append(System.getProperty("line.separator"));
-        }
-      } finally {
-        input.close();
-      }
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    }
-    return contents.toString();
-  }
-
   public void testAggregator() {
     Aggregator dba = new Aggregator();
-    DatabaseWriter db = new DatabaseWriter(cluster);
+    DatabaseWriter db = new DatabaseWriter(dbSetup.cluster);
     dba.setWriter(db);
     String queries = Aggregator.getContents(new File(System
         .getenv("CHUKWA_CONF_DIR")



Mime
View raw message