hdt-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ad...@apache.org
Subject [14/17] git commit: added all the neccesary for the server (now cluster) view in the new layout
Date Mon, 28 Jan 2013 03:44:44 GMT
added all the neccesary for the server (now cluster) view in the new
layout

Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/6f00a463
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/6f00a463
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/6f00a463

Branch: refs/heads/master
Commit: 6f00a4632cf7e2a78d6c4f95ac0ecca3beec288b
Parents: ed088df
Author: adamb <adamb@apache.org>
Authored: Fri Jan 25 14:30:41 2013 -0600
Committer: adamb <adamb@apache.org>
Committed: Fri Jan 25 14:30:41 2013 -0600

----------------------------------------------------------------------
 org.apache.hdt.ui/.classpath                       |    1 +
 org.apache.hdt.ui/plugin.xml                       |   16 +
 .../apache/hdt/ui/actions/EditLocationAction.java  |   73 ++
 .../apache/hdt/ui/actions/NewLocationAction.java   |   64 +
 .../src/org/apache/hdt/ui/cluster/ConfProp.java    |  147 +++
 .../org/apache/hdt/ui/cluster/HadoopCluster.java   |  518 ++++++++
 .../src/org/apache/hdt/ui/cluster/HadoopJob.java   |  349 ++++++
 .../hdt/ui/cluster/IHadoopClusterListener.java     |   28 +
 .../org/apache/hdt/ui/cluster/IJobListener.java    |   38 +
 .../org/apache/hdt/ui/cluster/ServerRegistry.java  |  203 +++
 .../org/apache/hdt/ui/cluster/utils/JarModule.java |  146 +++
 .../apache/hdt/ui/dialogs/ErrorMessageDialog.java  |   45 +
 .../src/org/apache/hdt/ui/views/ClusterView.java   |  460 +++++++
 .../hdt/ui/wizards/HadoopLocationWizard.java       |  973 +++++++++++++++
 14 files changed, 3061 insertions(+), 0 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/.classpath b/org.apache.hdt.ui/.classpath
index ad32c83..355df07 100644
--- a/org.apache.hdt.ui/.classpath
+++ b/org.apache.hdt.ui/.classpath
@@ -3,5 +3,6 @@
 	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
 	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
 	<classpathentry kind="src" path="src"/>
+	<classpathentry kind="lib" path="/Users/amberry/tools/hadoop-1.0.4/hadoop-core-1.0.4.jar"/>
 	<classpathentry kind="output" path="bin"/>
 </classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/plugin.xml b/org.apache.hdt.ui/plugin.xml
index 56bcf3a..1beda8c 100644
--- a/org.apache.hdt.ui/plugin.xml
+++ b/org.apache.hdt.ui/plugin.xml
@@ -17,5 +17,21 @@
             name="Hadoop">
       </perspective>
    </extension>
+   <extension
+         point="org.eclipse.ui.views">
+      <category
+            id="org.apache.hdt.ui.views"
+            name="Hadoop">
+      </category>
+      <view
+            allowMultiple="false"
+            category="org.apache.hdt.ui.views"
+            class="org.apache.hdt.ui.views.ClusterView"
+            icon="resources/hadoop-logo-16x16.png"
+            id="org.apache.hdt.ui.ClusterView"
+            name="Hadoop Clusters"
+            restorable="true">
+      </view>
+   </extension>
 
 </plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java
new file mode 100644
index 0000000..6287449
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.actions;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.cluster.HadoopCluster;
+import org.apache.hdt.ui.wizards.HadoopLocationWizard;
+import org.apache.hdt.ui.views.ClusterView;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardDialog;
+
+/**
+ * Editing server properties action
+ */
+public class EditLocationAction extends Action {
+
+  private ClusterView serverView;
+
+  public EditLocationAction(ClusterView serverView) {
+    this.serverView = serverView;
+
+    setText("Edit Hadoop location...");
+    setImageDescriptor(ImageLibrary.get("server.view.action.location.edit"));
+  }
+
+  @Override
+  public void run() {
+
+    final HadoopCluster server = serverView.getSelectedServer();
+    if (server == null)
+      return;
+
+    WizardDialog dialog = new WizardDialog(null, new Wizard() {
+      private HadoopLocationWizard page = new HadoopLocationWizard(server);
+
+      @Override
+      public void addPages() {
+        super.addPages();
+        setWindowTitle("Edit Hadoop location...");
+        addPage(page);
+      }
+
+      @Override
+      public boolean performFinish() {
+        page.performFinish();
+        return true;
+      }
+    });
+
+    dialog.create();
+    dialog.setBlockOnOpen(true);
+    dialog.open();
+
+    super.run();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/NewLocationAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/NewLocationAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/NewLocationAction.java
new file mode 100644
index 0000000..a7c5c81
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/NewLocationAction.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.actions;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.wizards.HadoopLocationWizard;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardDialog;
+
+
+/**
+ * Action corresponding to creating a new MapReduce Server.
+ */
+
+public class NewLocationAction extends Action {
+  public NewLocationAction() {
+    setText("New Hadoop location...");
+    setImageDescriptor(ImageLibrary.get("server.view.action.location.new"));
+  }
+
+  @Override
+  public void run() {
+    WizardDialog dialog = new WizardDialog(null, new Wizard() {
+      private HadoopLocationWizard page = new HadoopLocationWizard();
+
+      @Override
+      public void addPages() {
+        super.addPages();
+        setWindowTitle("New Hadoop location...");
+        addPage(page);
+      }
+
+      @Override
+      public boolean performFinish() {
+        page.performFinish();
+        return true;
+      }
+
+    });
+
+    dialog.create();
+    dialog.setBlockOnOpen(true);
+    dialog.open();
+
+    super.run();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java
new file mode 100644
index 0000000..1468b01
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+
+public enum ConfProp {
+  /**
+   * Property name for the Hadoop location name
+   */
+  PI_LOCATION_NAME(true, "location.name", "New Hadoop location"),
+
+  /**
+   * Property name for the master host name (the Job tracker)
+   */
+  PI_JOB_TRACKER_HOST(true, "jobtracker.host", "localhost"),
+
+  /**
+   * Property name for the DFS master host name (the Name node)
+   */
+  PI_NAME_NODE_HOST(true, "namenode.host", "localhost"),
+
+  /**
+   * Property name for the installation directory on the master node
+   */
+  // PI_INSTALL_DIR(true, "install.dir", "/dir/hadoop-version/"),
+  /**
+   * User name to use for Hadoop operations
+   */
+  PI_USER_NAME(true, "user.name", System.getProperty("user.name",
+      "who are you?")),
+
+  /**
+   * Property name for SOCKS proxy activation
+   */
+  PI_SOCKS_PROXY_ENABLE(true, "socks.proxy.enable", "no"),
+
+  /**
+   * Property name for the SOCKS proxy host
+   */
+  PI_SOCKS_PROXY_HOST(true, "socks.proxy.host", "host"),
+
+  /**
+   * Property name for the SOCKS proxy port
+   */
+  PI_SOCKS_PROXY_PORT(true, "socks.proxy.port", "1080"),
+
+  /**
+   * TCP port number for the name node
+   */
+  PI_NAME_NODE_PORT(true, "namenode.port", "50040"),
+
+  /**
+   * TCP port number for the job tracker
+   */
+  PI_JOB_TRACKER_PORT(true, "jobtracker.port", "50020"),
+
+  /**
+   * Are the Map/Reduce and the Distributed FS masters hosted on the same
+   * machine?
+   */
+  PI_COLOCATE_MASTERS(true, "masters.colocate", "yes"),
+
+  /**
+   * Property name for naming the job tracker (URI). This property is related
+   * to {@link #PI_MASTER_HOST_NAME}
+   */
+  JOB_TRACKER_URI(false, "mapreduce.jobtracker.address", "localhost:50020"),
+
+  /**
+   * Property name for naming the default file system (URI).
+   */
+  FS_DEFAULT_URI(false, "fs.default.name", "hdfs://localhost:50040/"),
+
+  /**
+   * Property name for the default socket factory:
+   */
+  SOCKET_FACTORY_DEFAULT(false, "hadoop.rpc.socket.factory.class.default",
+      "org.apache.hadoop.net.StandardSocketFactory"),
+
+  /**
+   * Property name for the SOCKS server URI.
+   */
+  SOCKS_SERVER(false, "hadoop.socks.server", "host:1080"),
+
+  ;
+
+  /**
+   * Map <property name> -> ConfProp
+   */
+  private static Map<String, ConfProp> map;
+
+  private static synchronized void registerProperty(String name,
+      ConfProp prop) {
+
+    if (ConfProp.map == null)
+      ConfProp.map = new HashMap<String, ConfProp>();
+
+    ConfProp.map.put(name, prop);
+  }
+
+  public static ConfProp getByName(String propName) {
+    return map.get(propName);
+  }
+
+  public final String name;
+
+  public final String defVal;
+
+  ConfProp(boolean internal, String name, String defVal) {
+    if (internal)
+      name = "eclipse.plug-in." + name;
+    this.name = name;
+    this.defVal = defVal;
+
+    ConfProp.registerProperty(name, this);
+  }
+
+  String get(Configuration conf) {
+    return conf.get(name);
+  }
+
+  void set(Configuration conf, String value) {
+    assert value != null;
+    conf.set(name, value);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java
new file mode 100644
index 0000000..588dd4e
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java
@@ -0,0 +1,518 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.logging.Logger;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.cluster.utils.JarModule;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.swt.widgets.Display;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
+
+/**
+ * Representation of a Hadoop location, meaning of the master node (NameNode,
+ * JobTracker).
+ * 
+ * <p>
+ * This class does not create any SSH connection anymore. Tunneling must be
+ * setup outside of Eclipse for now (using Putty or <tt>ssh -D&lt;port&gt;
+ * &lt;host&gt;</tt>)
+ * 
+ * <p>
+ * <em> TODO </em>
+ * <li> Disable the updater if a location becomes unreachable or fails for
+ * tool long
+ * <li> Stop the updater on location's disposal/removal
+ */
+
+public class HadoopCluster {
+
+  /**
+   * Frequency of location status observations expressed as the delay in ms
+   * between each observation
+   * 
+   * TODO Add a preference parameter for this
+   */
+  protected static final long STATUS_OBSERVATION_DELAY = 1500;
+
+  /**
+   * 
+   */
+  public class LocationStatusUpdater extends Job {
+
+    JobClient client = null;
+
+    /**
+     * Setup the updater
+     */
+    public LocationStatusUpdater() {
+      super("Map/Reduce location status updater");
+      this.setSystem(true);
+    }
+
+    /* @inheritDoc */
+    @Override
+    protected IStatus run(IProgressMonitor monitor) {
+      if (client == null) {
+        try {
+          client = HadoopCluster.this.getJobClient();
+
+        } catch (IOException ioe) {
+          client = null;
+          return new Status(Status.ERROR, Activator.PLUGIN_ID, 0,
+              "Cannot connect to the Map/Reduce location: "
+                            + HadoopCluster.this.getLocationName(),
+                            ioe);
+        }
+      }
+
+      try {
+        // Set of all known existing Job IDs we want fresh info of
+        Set<JobID> missingJobIds =
+            new HashSet<JobID>(runningJobs.keySet());
+
+        JobStatus[] jstatus = client.jobsToComplete();
+        for (JobStatus status : jstatus) {
+
+          JobID jobId = status.getJobID();
+          missingJobIds.remove(jobId);
+
+          HadoopJob hJob;
+          synchronized (HadoopCluster.this.runningJobs) {
+            hJob = runningJobs.get(jobId);
+            if (hJob == null) {
+              // Unknown job, create an entry
+              RunningJob running = client.getJob(jobId);
+              hJob =
+                  new HadoopJob(HadoopCluster.this, jobId, running, status);
+              newJob(hJob);
+            }
+          }
+
+          // Update HadoopJob with fresh infos
+          updateJob(hJob, status);
+        }
+
+        // Ask explicitly for fresh info for these Job IDs
+        for (JobID jobId : missingJobIds) {
+          HadoopJob hJob = runningJobs.get(jobId);
+          if (!hJob.isCompleted())
+            updateJob(hJob, null);
+        }
+
+      } catch (IOException ioe) {
+        client = null;
+        return new Status(Status.ERROR, Activator.PLUGIN_ID, 0,
+            "Cannot retrieve running Jobs on location: "
+                          + HadoopCluster.this.getLocationName(), ioe);
+      }
+
+      // Schedule the next observation
+      schedule(STATUS_OBSERVATION_DELAY);
+
+      return Status.OK_STATUS;
+    }
+
+    /**
+     * Stores and make the new job available
+     * 
+     * @param data
+     */
+    private void newJob(final HadoopJob data) {
+      runningJobs.put(data.getJobID(), data);
+
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          fireJobAdded(data);
+        }
+      });
+    }
+
+    /**
+     * Updates the status of a job
+     * 
+     * @param job the job to update
+     */
+    private void updateJob(final HadoopJob job, JobStatus status) {
+      job.update(status);
+
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          fireJobChanged(job);
+        }
+      });
+    }
+
+  }
+
+  static Logger log = Logger.getLogger(HadoopCluster.class.getName());
+
+  /**
+   * Hadoop configuration of the location. Also contains specific parameters
+   * for the plug-in. These parameters are prefix with eclipse.plug-in.*
+   */
+  private Configuration conf;
+
+  /**
+   * Jobs listeners
+   */
+  private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
+
+  /**
+   * Jobs running on this location. The keys of this map are the Job IDs.
+   */
+  private transient Map<JobID, HadoopJob> runningJobs =
+      Collections.synchronizedMap(new TreeMap<JobID, HadoopJob>());
+
+  /**
+   * Status updater for this location
+   */
+  private LocationStatusUpdater statusUpdater;
+
+  // state and status - transient
+  private transient String state = "";
+
+  /**
+   * Creates a new default Hadoop location
+   */
+  public HadoopCluster() {
+    this.conf = new Configuration();
+    this.addPluginConfigDefaultProperties();
+  }
+
+  /**
+   * Creates a location from a file
+   * 
+   * @throws IOException
+   * @throws SAXException
+   * @throws ParserConfigurationException
+   */
+  public HadoopCluster(File file) throws ParserConfigurationException,
+      SAXException, IOException {
+
+    this.conf = new Configuration();
+    this.addPluginConfigDefaultProperties();
+    this.loadFromXML(file);
+  }
+
+  /**
+   * Create a new Hadoop location by copying an already existing one.
+   * 
+   * @param source the location to copy
+   */
+  public HadoopCluster(HadoopCluster existing) {
+    this();
+    this.load(existing);
+  }
+
+  public void addJobListener(IJobListener l) {
+    jobListeners.add(l);
+  }
+
+  public void dispose() {
+    // TODO close DFS connections?
+  }
+
+  /**
+   * List all elements that should be present in the Server window (all
+   * servers and all jobs running on each servers)
+   * 
+   * @return collection of jobs for this location
+   */
+  public Collection<HadoopJob> getJobs() {
+    startStatusUpdater();
+    return this.runningJobs.values();
+  }
+
+  /**
+   * Remove the given job from the currently running jobs map
+   * 
+   * @param job the job to remove
+   */
+  public void purgeJob(final HadoopJob job) {
+    runningJobs.remove(job.getJobID());
+    Display.getDefault().asyncExec(new Runnable() {
+      public void run() {
+        fireJobRemoved(job);
+      }
+    });
+  }
+
+  /**
+   * Returns the {@link Configuration} defining this location.
+   * 
+   * @return the location configuration
+   */
+  public Configuration getConfiguration() {
+    return this.conf;
+  }
+
+  /**
+   * Gets a Hadoop configuration property value
+   * 
+   * @param prop the configuration property
+   * @return the property value
+   */
+  public String getConfProp(ConfProp prop) {
+    return prop.get(conf);
+  }
+
+  /**
+   * Gets a Hadoop configuration property value
+   * 
+   * @param propName the property name
+   * @return the property value
+   */
+  public String getConfProp(String propName) {
+    return this.conf.get(propName);
+  }
+
+  public String getLocationName() {
+    return ConfProp.PI_LOCATION_NAME.get(conf);
+  }
+
+  /**
+   * Returns the master host name of the Hadoop location (the Job tracker)
+   * 
+   * @return the host name of the Job tracker
+   */
+  public String getMasterHostName() {
+    return getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+  }
+
+  public String getState() {
+    return state;
+  }
+
+  /**
+   * Overwrite this location with the given existing location
+   * 
+   * @param existing the existing location
+   */
+  public void load(HadoopCluster existing) {
+    this.conf = new Configuration(existing.conf);
+  }
+
+  /**
+   * Overwrite this location with settings available in the given XML file.
+   * The existing configuration is preserved if the XML file is invalid.
+   * 
+   * @param file the file path of the XML file
+   * @return validity of the XML file
+   * @throws ParserConfigurationException
+   * @throws IOException
+   * @throws SAXException
+   */
+  public boolean loadFromXML(File file) throws ParserConfigurationException,
+      SAXException, IOException {
+
+    Configuration newConf = new Configuration(this.conf);
+
+    DocumentBuilder builder =
+        DocumentBuilderFactory.newInstance().newDocumentBuilder();
+    Document document = builder.parse(file);
+
+    Element root = document.getDocumentElement();
+    if (!"configuration".equals(root.getTagName()))
+      return false;
+    NodeList props = root.getChildNodes();
+    for (int i = 0; i < props.getLength(); i++) {
+      Node propNode = props.item(i);
+      if (!(propNode instanceof Element))
+        continue;
+      Element prop = (Element) propNode;
+      if (!"property".equals(prop.getTagName()))
+        return false;
+      NodeList fields = prop.getChildNodes();
+      String attr = null;
+      String value = null;
+      for (int j = 0; j < fields.getLength(); j++) {
+        Node fieldNode = fields.item(j);
+        if (!(fieldNode instanceof Element))
+          continue;
+        Element field = (Element) fieldNode;
+        if ("name".equals(field.getTagName()))
+          attr = ((Text) field.getFirstChild()).getData();
+        if ("value".equals(field.getTagName()) && field.hasChildNodes())
+          value = ((Text) field.getFirstChild()).getData();
+      }
+      if (attr != null && value != null)
+        newConf.set(attr, value);
+    }
+
+    this.conf = newConf;
+    return true;
+  }
+
+  /**
+   * Sets a Hadoop configuration property value
+   * 
+   * @param prop the property
+   * @param propvalue the property value
+   */
+  public void setConfProp(ConfProp prop, String propValue) {
+    prop.set(conf, propValue);
+  }
+
+  /**
+   * Sets a Hadoop configuration property value
+   * 
+   * @param propName the property name
+   * @param propValue the property value
+   */
+  public void setConfProp(String propName, String propValue) {
+    this.conf.set(propName, propValue);
+  }
+
+  public void setLocationName(String newName) {
+    ConfProp.PI_LOCATION_NAME.set(conf, newName);
+  }
+
+  /**
+   * Write this location settings to the given output stream
+   * 
+   * @param out the output stream
+   * @throws IOException
+   */
+  public void storeSettingsToFile(File file) throws IOException {
+    FileOutputStream fos = new FileOutputStream(file);
+    try {
+      this.conf.writeXml(fos);
+      fos.close();
+      fos = null;
+    } finally {
+      IOUtils.closeStream(fos);
+    }
+
+  }
+
+  /* @inheritDoc */
+  @Override
+  public String toString() {
+    return this.getLocationName();
+  }
+
+  /**
+   * Fill the configuration with valid default values
+   */
+  private void addPluginConfigDefaultProperties() {
+    for (ConfProp prop : ConfProp.values()) {
+      if (conf.get(prop.name) == null)
+        conf.set(prop.name, prop.defVal);
+    }
+  }
+
+  /**
+   * Starts the location status updater
+   */
+  private synchronized void startStatusUpdater() {
+    if (statusUpdater == null) {
+      statusUpdater = new LocationStatusUpdater();
+      statusUpdater.schedule();
+    }
+  }
+
+  /*
+   * Rewrite of the connecting and tunneling to the Hadoop location
+   */
+
+  /**
+   * Provides access to the default file system of this location.
+   * 
+   * @return a {@link FileSystem}
+   */
+  public FileSystem getDFS() throws IOException {
+    return FileSystem.get(this.conf);
+  }
+
+  /**
+   * Provides access to the Job tracking system of this location
+   * 
+   * @return a {@link JobClient}
+   */
+  public JobClient getJobClient() throws IOException {
+    JobConf jconf = new JobConf(this.conf);
+    return new JobClient(jconf);
+  }
+
+  /*
+   * Listeners handling
+   */
+
+  protected void fireJarPublishDone(JarModule jar) {
+    for (IJobListener listener : jobListeners) {
+      listener.publishDone(jar);
+    }
+  }
+
+  protected void fireJarPublishStart(JarModule jar) {
+    for (IJobListener listener : jobListeners) {
+      listener.publishStart(jar);
+    }
+  }
+
+  protected void fireJobAdded(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobAdded(job);
+    }
+  }
+
+  protected void fireJobRemoved(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobRemoved(job);
+    }
+  }
+
+  protected void fireJobChanged(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobChanged(job);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java
new file mode 100644
index 0000000..78f3fc6
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java
@@ -0,0 +1,349 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+
+/**
+ * Representation of a Map/Reduce running job on a given location
+ */
+
+public class HadoopJob {
+
+  /**
+   * Enum representation of a Job state
+   */
+  public enum JobState {
+    PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(
+        JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
+
+    final int state;
+
+    JobState(int state) {
+      this.state = state;
+    }
+
+    static JobState ofInt(int state) {
+      if (state == JobStatus.PREP) {
+        return PREPARE;
+      }
+      else if (state == JobStatus.RUNNING) {
+        return RUNNING;
+      }
+      else if (state == JobStatus.FAILED) {
+        return FAILED;
+      }
+      else if (state == JobStatus.SUCCEEDED) {
+        return SUCCEEDED;
+      }
+      else {
+        return null;
+      }
+    }
+  }
+
+  /**
+   * Location this Job runs on
+   */
+  private final HadoopCluster location;
+
+  /**
+   * Unique identifier of this Job
+   */
+  final JobID jobId;
+
+  /**
+   * Status representation of a running job. This actually contains a
+   * reference to a JobClient. Its methods might block.
+   */
+  RunningJob running;
+
+  /**
+   * Last polled status
+   * 
+   * @deprecated should apparently not be used
+   */
+  JobStatus status;
+
+  /**
+   * Last polled counters
+   */
+  Counters counters;
+
+  /**
+   * Job Configuration
+   */
+  JobConf jobConf = null;
+
+  boolean completed = false;
+
+  boolean successful = false;
+
+  boolean killed = false;
+
+  int totalMaps;
+
+  int totalReduces;
+
+  int completedMaps;
+
+  int completedReduces;
+
+  float mapProgress;
+
+  float reduceProgress;
+
+  /**
+   * Constructor for a Hadoop job representation
+   * 
+   * @param location
+   * @param id
+   * @param running
+   * @param status
+   */
+  public HadoopJob(HadoopCluster location, JobID id, RunningJob running,
+      JobStatus status) {
+
+    this.location = location;
+    this.jobId = id;
+    this.running = running;
+
+    loadJobFile();
+
+    update(status);
+  }
+
+  /**
+   * Try to locate and load the JobConf file for this job so to get more
+   * details on the job (number of maps and of reduces)
+   */
+  private void loadJobFile() {
+    try {
+      String jobFile = getJobFile();
+      FileSystem fs = location.getDFS();
+      File tmp = File.createTempFile(getJobID().toString(), ".xml");
+      if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location
+          .getConfiguration())) {
+        this.jobConf = new JobConf(tmp.toString());
+
+        this.totalMaps = jobConf.getNumMapTasks();
+        this.totalReduces = jobConf.getNumReduceTasks();
+      }
+
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+    }
+  }
+
+  /* @inheritDoc */
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
+    result = prime * result + ((location == null) ? 0 : location.hashCode());
+    return result;
+  }
+
+  /* @inheritDoc */
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (!(obj instanceof HadoopJob))
+      return false;
+    final HadoopJob other = (HadoopJob) obj;
+    if (jobId == null) {
+      if (other.jobId != null)
+        return false;
+    } else if (!jobId.equals(other.jobId))
+      return false;
+    if (location == null) {
+      if (other.location != null)
+        return false;
+    } else if (!location.equals(other.location))
+      return false;
+    return true;
+  }
+
+  /**
+   * Get the running status of the Job (@see {@link JobStatus}).
+   * 
+   * @return
+   */
+  public JobState getState() {
+    if (this.completed) {
+      if (this.successful) {
+        return JobState.SUCCEEDED;
+      } else {
+        return JobState.FAILED;
+      }
+    } else {
+      return JobState.RUNNING;
+    }
+    // return JobState.ofInt(this.status.getRunState());
+  }
+
+  /**
+   * @return
+   */
+  public JobID getJobID() {
+    return this.jobId;
+  }
+
+  /**
+   * @return
+   */
+  public HadoopCluster getLocation() {
+    return this.location;
+  }
+
+  /**
+   * @return
+   */
+  public boolean isCompleted() {
+    return this.completed;
+  }
+
+  /**
+   * @return
+   */
+  public String getJobName() {
+    return this.running.getJobName();
+  }
+
+  /**
+   * @return
+   */
+  public String getJobFile() {
+    return this.running.getJobFile();
+  }
+
+  /**
+   * Return the tracking URL for this Job.
+   * 
+   * @return string representation of the tracking URL for this Job
+   */
+  public String getTrackingURL() {
+    return this.running.getTrackingURL();
+  }
+
+  /**
+   * Returns a string representation of this job status
+   * 
+   * @return string representation of this job status
+   */
+  public String getStatus() {
+
+    StringBuffer s = new StringBuffer();
+
+    s.append("Maps : " + completedMaps + "/" + totalMaps);
+    s.append(" (" + mapProgress + ")");
+    s.append("  Reduces : " + completedReduces + "/" + totalReduces);
+    s.append(" (" + reduceProgress + ")");
+
+    return s.toString();
+  }
+
+  /**
+   * Update this job status according to the given JobStatus
+   * 
+   * @param status
+   */
+  void update(JobStatus status) {
+    this.status = status;
+    try {
+      this.counters = running.getCounters();
+      this.completed = running.isComplete();
+      this.successful = running.isSuccessful();
+      this.mapProgress = running.mapProgress();
+      this.reduceProgress = running.reduceProgress();
+      // running.getTaskCompletionEvents(fromEvent);
+
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+    }
+
+    this.completedMaps = (int) (this.totalMaps * this.mapProgress);
+    this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
+  }
+
+  /**
+   * Print this job counters (for debugging purpose)
+   */
+  void printCounters() {
+    System.out.printf("New Job:\n", counters);
+    for (String groupName : counters.getGroupNames()) {
+      Counters.Group group = counters.getGroup(groupName);
+      System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
+
+      for (Counters.Counter counter : group) {
+        System.out.printf("\t\t%s: %s\n", counter.getDisplayName(),
+                                          counter.getCounter());
+      }
+    }
+    System.out.printf("\n");
+  }
+
+  /**
+   * Kill this job
+   */
+  public void kill() {
+    try {
+      this.running.killJob();
+      this.killed = true;
+
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+
+  /**
+   * Print this job status (for debugging purpose)
+   */
+  public void display() {
+    System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
+    System.out.printf("Configuration file: %s\n", getJobID());
+    System.out.printf("Tracking URL: %s\n", getTrackingURL());
+
+    System.out.printf("Completion: map: %f reduce %f\n",
+        100.0 * this.mapProgress, 100.0 * this.reduceProgress);
+
+    System.out.println("Job total maps = " + totalMaps);
+    System.out.println("Job completed maps = " + completedMaps);
+    System.out.println("Map percentage complete = " + mapProgress);
+    System.out.println("Job total reduces = " + totalReduces);
+    System.out.println("Job completed reduces = " + completedReduces);
+    System.out.println("Reduce percentage complete = " + reduceProgress);
+    System.out.flush();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java
new file mode 100644
index 0000000..2f7a245
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import org.apache.hdt.ui.cluster.HadoopCluster;
+
+/**
+ * Interface for monitoring server changes
+ */
+public interface IHadoopClusterListener {
+  void serverChanged(HadoopCluster location, int type);
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java
new file mode 100644
index 0000000..5d001c4
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import org.apache.hdt.ui.cluster.utils.JarModule;
+
+/**
+ * Interface for updating/adding jobs to the MapReduce Server view.
+ */
+public interface IJobListener {
+
+  void jobChanged(HadoopJob job);
+
+  void jobAdded(HadoopJob job);
+
+  void jobRemoved(HadoopJob job);
+
+  void publishStart(JarModule jar);
+
+  void publishDone(JarModule jar);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java
new file mode 100644
index 0000000..a1a990e
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.cluster.HadoopCluster;
+import org.eclipse.jface.dialogs.MessageDialog;
+
+/**
+ * Register of Hadoop locations.
+ * 
+ * Each location corresponds to a Hadoop {@link Configuration} stored as an
+ * XML file in the workspace plug-in configuration directory:
+ * <p>
+ * <tt>
+ * &lt;workspace-dir&gt;/.metadata/.plugins/org.apache.hadoop.eclipse/locations/*.xml
+ * </tt>
+ * 
+ */
+public class ServerRegistry {
+
+  private static final ServerRegistry INSTANCE = new ServerRegistry();
+
+  public static final int SERVER_ADDED = 0;
+
+  public static final int SERVER_REMOVED = 1;
+
+  public static final int SERVER_STATE_CHANGED = 2;
+
+  private final File baseDir =
+      Activator.getDefault().getStateLocation().toFile();
+
+  private final File saveDir = new File(baseDir, "locations");
+
+  private ServerRegistry() {
+    if (saveDir.exists() && !saveDir.isDirectory())
+      saveDir.delete();
+    if (!saveDir.exists())
+      saveDir.mkdirs();
+
+    load();
+  }
+
+  private Map<String, HadoopCluster> servers;
+
+  private Set<IHadoopClusterListener> listeners =
+      new HashSet<IHadoopClusterListener>();
+
+  public static ServerRegistry getInstance() {
+    return INSTANCE;
+  }
+
+  public synchronized Collection<HadoopCluster> getServers() {
+    return Collections.unmodifiableCollection(servers.values());
+  }
+
+  /**
+   * Load all available locations from the workspace configuration directory.
+   */
+  private synchronized void load() {
+    Map<String, HadoopCluster> map = new TreeMap<String, HadoopCluster>();
+    for (File file : saveDir.listFiles()) {
+      try {
+        HadoopCluster server = new HadoopCluster(file);
+        map.put(server.getLocationName(), server);
+
+      } catch (Exception exn) {
+        System.err.println(exn);
+      }
+    }
+    this.servers = map;
+  }
+
+  private synchronized void store() {
+    try {
+      File dir = File.createTempFile("locations", "new", baseDir);
+      dir.delete();
+      dir.mkdirs();
+
+      for (HadoopCluster server : servers.values()) {
+        server.storeSettingsToFile(new File(dir, server.getLocationName()
+            + ".xml"));
+      }
+
+      FilenameFilter XMLFilter = new FilenameFilter() {
+        public boolean accept(File dir, String name) {
+          String lower = name.toLowerCase();
+          return lower.endsWith(".xml");
+        }
+      };
+
+      File backup = new File(baseDir, "locations.backup");
+      if (backup.exists()) {
+        for (File file : backup.listFiles(XMLFilter))
+          if (!file.delete())
+            throw new IOException("Unable to delete backup location file: "
+                + file);
+        if (!backup.delete())
+          throw new IOException(
+              "Unable to delete backup location directory: " + backup);
+      }
+
+      saveDir.renameTo(backup);
+      dir.renameTo(saveDir);
+
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+      MessageDialog.openError(null,
+          "Saving configuration of Hadoop locations failed", ioe.toString());
+    }
+  }
+
+  public void dispose() {
+    for (HadoopCluster server : getServers()) {
+      server.dispose();
+    }
+  }
+
+  public synchronized HadoopCluster getServer(String location) {
+    return servers.get(location);
+  }
+
+  /*
+   * HadoopServer map listeners
+   */
+
+  public void addListener(IHadoopClusterListener l) {
+    synchronized (listeners) {
+      listeners.add(l);
+    }
+  }
+
+  public void removeListener(IHadoopClusterListener l) {
+    synchronized (listeners) {
+      listeners.remove(l);
+    }
+  }
+
+  private void fireListeners(HadoopCluster location, int kind) {
+    synchronized (listeners) {
+      for (IHadoopClusterListener listener : listeners) {
+        listener.serverChanged(location, kind);
+      }
+    }
+  }
+
+  public synchronized void removeServer(HadoopCluster server) {
+    this.servers.remove(server.getLocationName());
+    store();
+    fireListeners(server, SERVER_REMOVED);
+  }
+
+  public synchronized void addServer(HadoopCluster server) {
+    this.servers.put(server.getLocationName(), server);
+    store();
+    fireListeners(server, SERVER_ADDED);
+  }
+
+  /**
+   * Update one Hadoop location
+   * 
+   * @param originalName the original location name (might have changed)
+   * @param server the location
+   */
+  public synchronized void updateServer(String originalName,
+      HadoopCluster server) {
+
+    // Update the map if the location name has changed
+    if (!server.getLocationName().equals(originalName)) {
+      servers.remove(originalName);
+      servers.put(server.getLocationName(), server);
+    }
+    store();
+    fireListeners(server, SERVER_STATE_CHANGED);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java
new file mode 100644
index 0000000..71d5559
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster.utils;
+
+import java.io.File;
+import java.util.logging.Logger;
+
+import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.dialogs.ErrorMessageDialog;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jdt.core.ICompilationUnit;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.ui.jarpackager.IJarExportRunnable;
+import org.eclipse.jdt.ui.jarpackager.JarPackageData;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.ui.PlatformUI;
+
+/**
+ * Methods for interacting with the jar file containing the
+ * Mapper/Reducer/Driver classes for a MapReduce job.
+ */
+
+public class JarModule implements IRunnableWithProgress {
+
+  static Logger log = Logger.getLogger(JarModule.class.getName());
+
+  private IResource resource;
+
+  private File jarFile;
+
+  public JarModule(IResource resource) {
+    this.resource = resource;
+  }
+
+  public String getName() {
+    return resource.getProject().getName() + "/" + resource.getName();
+  }
+
+  /**
+   * Creates a JAR file containing the given resource (Java class with
+   * main()) and all associated resources
+   * 
+   * @param resource the resource
+   * @return a file designing the created package
+   */
+  public void run(IProgressMonitor monitor) {
+
+    log.fine("Build jar");
+    JarPackageData jarrer = new JarPackageData();
+
+    jarrer.setExportJavaFiles(true);
+    jarrer.setExportClassFiles(true);
+    jarrer.setExportOutputFolders(true);
+    jarrer.setOverwrite(true);
+
+    try {
+      // IJavaProject project =
+      // (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
+
+      // check this is the case before letting this method get called
+      Object element = resource.getAdapter(IJavaElement.class);
+      IType type = ((ICompilationUnit) element).findPrimaryType();
+      jarrer.setManifestMainClass(type);
+
+      // Create a temporary JAR file name
+      File baseDir = Activator.getDefault().getStateLocation().toFile();
+
+      String prefix =
+          String.format("%s_%s-", resource.getProject().getName(), resource
+              .getName());
+      File jarFile = File.createTempFile(prefix, ".jar", baseDir);
+      jarrer.setJarLocation(new Path(jarFile.getAbsolutePath()));
+
+      jarrer.setElements(resource.getProject().members(IResource.FILE));
+      IJarExportRunnable runnable =
+          jarrer.createJarExportRunnable(Display.getDefault()
+              .getActiveShell());
+      runnable.run(monitor);
+
+      this.jarFile = jarFile;
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Allow the retrieval of the resulting JAR file
+   * 
+   * @return the generated JAR file
+   */
+  public File getJarFile() {
+    return this.jarFile;
+  }
+
+  /**
+   * Static way to create a JAR package for the given resource and showing a
+   * progress bar
+   * 
+   * @param resource
+   * @return
+   */
+  public static File createJarPackage(IResource resource) {
+
+    JarModule jarModule = new JarModule(resource);
+    try {
+      PlatformUI.getWorkbench().getProgressService().run(false, true,
+          jarModule);
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      return null;
+    }
+
+    File jarFile = jarModule.getJarFile();
+    if (jarFile == null) {
+      ErrorMessageDialog.display("Run on Hadoop",
+          "Unable to create or locate the JAR file for the Job");
+      return null;
+    }
+
+    return jarFile;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java
new file mode 100644
index 0000000..bb0137a
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.dialogs;
+
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.swt.widgets.Display;
+
+/**
+ * Error dialog helper
+ */
+public class ErrorMessageDialog {
+
+  public static void display(final String title, final String message) {
+    Display.getDefault().syncExec(new Runnable() {
+
+      public void run() {
+        MessageDialog.openError(Display.getDefault().getActiveShell(),
+            title, message);
+      }
+
+    });
+  }
+
+  public static void display(Exception e) {
+    display("An exception has occured!", "Exception description:\n"
+        + e.getLocalizedMessage());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/6f00a463/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java
new file mode 100644
index 0000000..6b81b8d
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java
@@ -0,0 +1,460 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.views;
+
+import java.util.Collection;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.actions.EditLocationAction;
+import org.apache.hdt.ui.actions.NewLocationAction;
+import org.apache.hdt.ui.cluster.HadoopJob;
+import org.apache.hdt.ui.cluster.HadoopCluster;
+import org.apache.hdt.ui.cluster.IJobListener;
+import org.apache.hdt.ui.cluster.utils.JarModule;
+import org.apache.hdt.ui.cluster.IHadoopClusterListener;
+import org.apache.hdt.ui.cluster.ServerRegistry;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.action.IMenuListener;
+import org.eclipse.jface.action.IMenuManager;
+import org.eclipse.jface.action.MenuManager;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.ISelectionChangedListener;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.viewers.ITableLabelProvider;
+import org.eclipse.jface.viewers.ITreeContentProvider;
+import org.eclipse.jface.viewers.ITreeSelection;
+import org.eclipse.jface.viewers.SelectionChangedEvent;
+import org.eclipse.jface.viewers.TreeViewer;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Menu;
+import org.eclipse.swt.widgets.Tree;
+import org.eclipse.swt.widgets.TreeColumn;
+import org.eclipse.ui.IViewSite;
+import org.eclipse.ui.PartInitException;
+import org.eclipse.ui.actions.ActionFactory;
+import org.eclipse.ui.part.ViewPart;
+
+/**
+ * Map/Reduce locations view: displays all available Hadoop locations and the
+ * Jobs running/finished on these locations
+ */
+public class ClusterView extends ViewPart implements ITreeContentProvider,
+    ITableLabelProvider, IJobListener, IHadoopClusterListener {
+
+  /**
+   * Deletion action: delete a Hadoop location, kill a running job or remove
+   * a finished job entry
+   */
+  class DeleteAction extends Action {
+
+    DeleteAction() {
+      setText("Delete");
+      setImageDescriptor(ImageLibrary.get("server.view.action.delete"));
+    }
+
+    /* @inheritDoc */
+    @Override
+    public void run() {
+      ISelection selection =
+          getViewSite().getSelectionProvider().getSelection();
+      if ((selection != null) && (selection instanceof IStructuredSelection)) {
+        Object selItem =
+            ((IStructuredSelection) selection).getFirstElement();
+
+        if (selItem instanceof HadoopCluster) {
+          HadoopCluster location = (HadoopCluster) selItem;
+          if (MessageDialog.openConfirm(Display.getDefault()
+              .getActiveShell(), "Confirm delete Hadoop location",
+              "Do you really want to remove the Hadoop location: "
+                  + location.getLocationName())) {
+            ServerRegistry.getInstance().removeServer(location);
+          }
+
+        } else if (selItem instanceof HadoopJob) {
+
+          // kill the job
+          HadoopJob job = (HadoopJob) selItem;
+          if (job.isCompleted()) {
+            // Job already finished, remove the entry
+            job.getLocation().purgeJob(job);
+
+          } else {
+            // Job is running, kill the job?
+            if (MessageDialog.openConfirm(Display.getDefault()
+                .getActiveShell(), "Confirm kill running Job",
+                "Do you really want to kill running Job: " + job.getJobID())) {
+              job.kill();
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * This object is the root content for this content provider
+   */
+  private static final Object CONTENT_ROOT = new Object();
+
+  private final IAction deleteAction = new DeleteAction();
+
+  private final IAction editServerAction = new EditLocationAction(this);
+
+  private final IAction newLocationAction = new NewLocationAction();
+
+  private TreeViewer viewer;
+
+  public ClusterView() {
+  }
+
+  /* @inheritDoc */
+  @Override
+  public void init(IViewSite site) throws PartInitException {
+    super.init(site);
+  }
+
+  /* @inheritDoc */
+  @Override
+  public void dispose() {
+    ServerRegistry.getInstance().removeListener(this);
+  }
+
+  /**
+   * Creates the columns for the view
+   */
+  @Override
+  public void createPartControl(Composite parent) {
+    Tree main =
+        new Tree(parent, SWT.SINGLE | SWT.FULL_SELECTION | SWT.H_SCROLL
+            | SWT.V_SCROLL);
+    main.setHeaderVisible(true);
+    main.setLinesVisible(false);
+    main.setLayoutData(new GridData(GridData.FILL_BOTH));
+
+    TreeColumn serverCol = new TreeColumn(main, SWT.SINGLE);
+    serverCol.setText("Location");
+    serverCol.setWidth(300);
+    serverCol.setResizable(true);
+
+    TreeColumn locationCol = new TreeColumn(main, SWT.SINGLE);
+    locationCol.setText("Master node");
+    locationCol.setWidth(185);
+    locationCol.setResizable(true);
+
+    TreeColumn stateCol = new TreeColumn(main, SWT.SINGLE);
+    stateCol.setText("State");
+    stateCol.setWidth(95);
+    stateCol.setResizable(true);
+
+    TreeColumn statusCol = new TreeColumn(main, SWT.SINGLE);
+    statusCol.setText("Status");
+    statusCol.setWidth(300);
+    statusCol.setResizable(true);
+
+    viewer = new TreeViewer(main);
+    viewer.setContentProvider(this);
+    viewer.setLabelProvider(this);
+    viewer.setInput(CONTENT_ROOT); // don't care
+
+    getViewSite().setSelectionProvider(viewer);
+    
+    getViewSite().getActionBars().setGlobalActionHandler(
+        ActionFactory.DELETE.getId(), deleteAction);
+    getViewSite().getActionBars().getToolBarManager().add(editServerAction);
+    getViewSite().getActionBars().getToolBarManager().add(newLocationAction);
+
+    createActions();
+    createContextMenu();
+  }
+
+  /**
+   * Actions
+   */
+  private void createActions() {
+    /*
+     * addItemAction = new Action("Add...") { public void run() { addItem(); } };
+     * addItemAction.setImageDescriptor(ImageLibrary
+     * .get("server.view.location.new"));
+     */
+    /*
+     * deleteItemAction = new Action("Delete") { public void run() {
+     * deleteItem(); } };
+     * deleteItemAction.setImageDescriptor(getImageDescriptor("delete.gif"));
+     * 
+     * selectAllAction = new Action("Select All") { public void run() {
+     * selectAll(); } };
+     */
+    // Add selection listener.
+    viewer.addSelectionChangedListener(new ISelectionChangedListener() {
+      public void selectionChanged(SelectionChangedEvent event) {
+        updateActionEnablement();
+      }
+    });
+  }
+
+  private void addItem() {
+    System.out.printf("ADD ITEM\n");
+  }
+
+  private void updateActionEnablement() {
+    IStructuredSelection sel = (IStructuredSelection) viewer.getSelection();
+    // deleteItemAction.setEnabled(sel.size() > 0);
+  }
+
+  /**
+   * Contextual menu
+   */
+  private void createContextMenu() {
+    // Create menu manager.
+    MenuManager menuMgr = new MenuManager();
+    menuMgr.setRemoveAllWhenShown(true);
+    menuMgr.addMenuListener(new IMenuListener() {
+      public void menuAboutToShow(IMenuManager mgr) {
+        fillContextMenu(mgr);
+      }
+    });
+
+    // Create menu.
+    Menu menu = menuMgr.createContextMenu(viewer.getControl());
+    viewer.getControl().setMenu(menu);
+
+    // Register menu for extension.
+    getSite().registerContextMenu(menuMgr, viewer);
+  }
+
+  private void fillContextMenu(IMenuManager mgr) {
+    mgr.add(newLocationAction);
+    mgr.add(editServerAction);
+    mgr.add(deleteAction);
+    /*
+     * mgr.add(new GroupMarker(IWorkbenchActionConstants.MB_ADDITIONS));
+     * mgr.add(deleteItemAction); mgr.add(new Separator());
+     * mgr.add(selectAllAction);
+     */
+  }
+
+  /* @inheritDoc */
+  @Override
+  public void setFocus() {
+
+  }
+
+  /*
+   * IHadoopServerListener implementation
+   */
+
+  /* @inheritDoc */
+  public void serverChanged(HadoopCluster location, int type) {
+    Display.getDefault().syncExec(new Runnable() {
+      public void run() {
+        ClusterView.this.viewer.refresh();
+      }
+    });
+  }
+
+  /*
+   * IStructuredContentProvider implementation
+   */
+
+  /* @inheritDoc */
+  public void inputChanged(final Viewer viewer, Object oldInput,
+      Object newInput) {
+    if (oldInput == CONTENT_ROOT)
+      ServerRegistry.getInstance().removeListener(this);
+    if (newInput == CONTENT_ROOT)
+      ServerRegistry.getInstance().addListener(this);
+  }
+
+  /**
+   * The root elements displayed by this view are the existing Hadoop
+   * locations
+   */
+  /* @inheritDoc */
+  public Object[] getElements(Object inputElement) {
+    return ServerRegistry.getInstance().getServers().toArray();
+  }
+
+  /*
+   * ITreeStructuredContentProvider implementation
+   */
+
+  /**
+   * Each location contains a child entry for each job it runs.
+   */
+  /* @inheritDoc */
+  public Object[] getChildren(Object parent) {
+
+    if (parent instanceof HadoopCluster) {
+      HadoopCluster location = (HadoopCluster) parent;
+      location.addJobListener(this);
+      Collection<HadoopJob> jobs = location.getJobs();
+      return jobs.toArray();
+    }
+
+    return null;
+  }
+
+  /* @inheritDoc */
+  public Object getParent(Object element) {
+    if (element instanceof HadoopCluster) {
+      return CONTENT_ROOT;
+
+    } else if (element instanceof HadoopJob) {
+      return ((HadoopJob) element).getLocation();
+    }
+
+    return null;
+  }
+
+  /* @inheritDoc */
+  public boolean hasChildren(Object element) {
+    /* Only server entries have children */
+    return (element instanceof HadoopCluster);
+  }
+
+  /*
+   * ITableLabelProvider implementation
+   */
+
+  /* @inheritDoc */
+  public void addListener(ILabelProviderListener listener) {
+    // no listeners handling
+  }
+
+  public boolean isLabelProperty(Object element, String property) {
+    return false;
+  }
+
+  /* @inheritDoc */
+  public void removeListener(ILabelProviderListener listener) {
+    // no listener handling
+  }
+
+  /* @inheritDoc */
+  public Image getColumnImage(Object element, int columnIndex) {
+    if ((columnIndex == 0) && (element instanceof HadoopCluster)) {
+      return ImageLibrary.getImage("server.view.location.entry");
+
+    } else if ((columnIndex == 0) && (element instanceof HadoopJob)) {
+      return ImageLibrary.getImage("server.view.job.entry");
+    }
+    return null;
+  }
+
+  /* @inheritDoc */
+  public String getColumnText(Object element, int columnIndex) {
+    if (element instanceof HadoopCluster) {
+      HadoopCluster server = (HadoopCluster) element;
+
+      switch (columnIndex) {
+        case 0:
+          return server.getLocationName();
+        case 1:
+          return server.getMasterHostName().toString();
+        case 2:
+          return server.getState();
+        case 3:
+          return "";
+      }
+    } else if (element instanceof HadoopJob) {
+      HadoopJob job = (HadoopJob) element;
+
+      switch (columnIndex) {
+        case 0:
+          return job.getJobID().toString();
+        case 1:
+          return "";
+        case 2:
+          return job.getState().toString();
+        case 3:
+          return job.getStatus();
+      }
+    } else if (element instanceof JarModule) {
+      JarModule jar = (JarModule) element;
+
+      switch (columnIndex) {
+        case 0:
+          return jar.toString();
+        case 1:
+          return "Publishing jar to server..";
+        case 2:
+          return "";
+      }
+    }
+
+    return null;
+  }
+
+  /*
+   * IJobListener (Map/Reduce Jobs listener) implementation
+   */
+
+  /* @inheritDoc */
+  public void jobAdded(HadoopJob job) {
+    viewer.refresh();
+  }
+
+  /* @inheritDoc */
+  public void jobRemoved(HadoopJob job) {
+    viewer.refresh();
+  }
+
+  /* @inheritDoc */
+  public void jobChanged(HadoopJob job) {
+    viewer.refresh(job);
+  }
+
+  /* @inheritDoc */
+  public void publishDone(JarModule jar) {
+    viewer.refresh();
+  }
+
+  /* @inheritDoc */
+  public void publishStart(JarModule jar) {
+    viewer.refresh();
+  }
+
+  /*
+   * Miscellaneous
+   */
+
+  /**
+   * Return the currently selected server (null if there is no selection or
+   * if the selection is not a server)
+   * 
+   * @return the currently selected server entry
+   */
+  public HadoopCluster getSelectedServer() {
+    ITreeSelection selection = (ITreeSelection) viewer.getSelection();
+    Object first = selection.getFirstElement();
+    if (first instanceof HadoopCluster) {
+      return (HadoopCluster) first;
+    }
+    return null;
+  }
+
+}


Mime
View raw message