hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r566838 [4/4] - in /lucene/hadoop/trunk: ./ src/contrib/ src/contrib/eclipse-plugin/ src/contrib/eclipse-plugin/.settings/ src/contrib/eclipse-plugin/META-INF/ src/contrib/eclipse-plugin/resources/ src/contrib/eclipse-plugin/src/ src/contri...
Date Thu, 16 Aug 2007 20:43:16 GMT
Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java?view=auto&rev=566838
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java Thu Aug 16 13:43:12 2007
@@ -0,0 +1,683 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.server;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.StringReader;
+import java.net.HttpURLConnection;
+import java.net.MalformedURLException;
+import java.net.Socket;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.Vector;
+import java.util.logging.Logger;
+
+import javax.net.SocketFactory;
+
+import org.apache.hadoop.eclipse.JSchUtilities;
+import org.apache.hadoop.eclipse.launch.SWTUserInfo;
+import org.apache.hadoop.eclipse.servers.ServerRegistry;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.debug.core.DebugPlugin;
+import org.eclipse.debug.core.ILaunchConfiguration;
+import org.eclipse.debug.core.ILaunchConfigurationType;
+import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
+import org.eclipse.debug.core.model.ILaunchConfigurationDelegate;
+import org.eclipse.debug.ui.DebugUITools;
+import org.eclipse.swt.widgets.Display;
+
+import com.jcraft.jsch.JSchException;
+import com.jcraft.jsch.Session;
+
+/**
+ * Methods for defining and interacting with a Hadoop MapReduce server
+ */
+
+public class HadoopServer {
+
+  private static final int JOB_TRACKER_PORT = 50030;
+
+  private PingJob ping;
+
+  protected static final long PING_DELAY = 1500;
+
+  /**
+   * Location of Hadoop jars on the server
+   */
+  private String installPath;
+
+  /**
+   * User name to use to connect to the server
+   */
+  private String userName;
+
+  /**
+   * Host name of the hadoop server
+   */
+  private String hostName;
+
+  private String password;
+
+  // state and status - transient
+  private transient String state = "";
+
+  private transient Map<String, HadoopJob> jobs =
+      Collections.synchronizedMap(new TreeMap<String, HadoopJob>());
+
+  private transient List<JarModule> jars =
+      Collections.synchronizedList(new ArrayList<JarModule>());
+
+  /**
+   * User-defined name for the server (set from Eclipse)
+   */
+  private String name;
+
+  // the machine that we are tunneling through to get to the Hadoop server
+
+  /**
+   * Host name of the tunneling machine
+   */
+  private String tunnelHostName;
+
+  /**
+   * User name to use to connect to the tunneling machine
+   */
+  private String tunnelUserName;
+
+  private String tunnelPassword;
+
+  static Logger log = Logger.getLogger(HadoopServer.class.getName());
+
+  public HadoopServer(String uri, String name) {
+    this.name = name;
+
+    String[] hostInfo = uri.split(":");
+    String[] loginInfo = hostInfo[0].split("@");
+
+    installPath = hostInfo[1];
+    userName = loginInfo[0];
+    hostName = loginInfo[1];
+  }
+
+  public HadoopServer(String uri, String name, String tunnelVia,
+      String tunnelUserName) {
+    this(uri, name);
+    this.tunnelHostName = tunnelVia;
+    this.tunnelUserName = tunnelUserName;
+  }
+
+  /**
+   * Create an SSH session with no timeout
+   * 
+   * @return Session object with no timeout
+   * @throws JSchException
+   */
+  public Session createSessionNoTimeout() throws JSchException {
+    return createSession(0);
+  }
+
+  /**
+   * Create an SSH session with no timeout
+   * 
+   * @return Session object with no timeout
+   * @throws JSchException
+   */
+  public Session createSession() throws JSchException {
+    return createSession(0);
+  }
+
+  /**
+   * Creates a SSH session with a specified timeout
+   * 
+   * @param timeout the amount of time before the session expires
+   * @return Returns the created session object representing the SSH session.
+   * @throws JSchException
+   */
+  public Session createSession(int timeout) throws JSchException {
+    if (tunnelHostName == null) {
+      Session session =
+          JSchUtilities.createJSch().getSession(userName, hostName, 22);
+      session.setUserInfo(new SWTUserInfo() {
+        @Override
+        public String getPassword() {
+          return password;
+        }
+
+        @Override
+        public void setPassword(String pass) {
+          HadoopServer.this.password = pass;
+        }
+
+      });
+      if (!session.isConnected()) {
+        try {
+          session.connect();
+        } catch (JSchException jse) {
+          if (jse.getMessage().equals("Auth fail"))
+            this.password = null;
+          throw jse;
+        }
+      }
+
+      return session;
+    } else {
+      createSshTunnel();
+
+      Session session =
+          JSchUtilities.createJSch().getSession(userName, "localhost",
+              tunnelPort);
+      session.setUserInfo(new SWTUserInfo() {
+        @Override
+        public String getPassword() {
+          return HadoopServer.this.password;
+        }
+
+        @Override
+        public void setPassword(String pass) {
+          HadoopServer.this.password = pass;
+        }
+      });
+      if (!session.isConnected()) {
+        try {
+          session.connect();
+        } catch (JSchException jse) {
+          if (jse.getMessage().equals("Auth fail"))
+            this.password = null;
+          throw jse;
+        }
+      }
+      if (timeout > -1) {
+        session.setTimeout(timeout);
+      }
+      return session;
+    }
+  }
+
+  private Session createTunnel(int port) throws JSchException {
+    Session tunnel;
+
+    tunnelPort = -1;
+    for (int i = 0; !((i > 4) || (tunnelPort > -1)); i++) {
+      try {
+        Socket socket = SocketFactory.getDefault().createSocket();
+        socket.bind(null);
+        tunnelPort = socket.getLocalPort();
+        socket.close();
+      } catch (IOException e) {
+        // ignore, retry
+      }
+    }
+
+    if (tunnelPort == -1) {
+      throw new JSchException("No free local port found to bound to");
+    }
+
+    tunnel =
+        JSchUtilities.createJSch().getSession(tunnelUserName,
+            tunnelHostName, 22);
+    tunnel.setTimeout(0);
+    tunnel.setPortForwardingL(tunnelPort, hostName, port);
+    tunnel.setUserInfo(new SWTUserInfo() {
+      @Override
+      public String getPassword() {
+        return tunnelPassword;
+      }
+
+      @Override
+      public void setPassword(String password) {
+        tunnelPassword = password;
+      }
+    });
+    try {
+      tunnel.connect();
+    } catch (JSchException jse) {
+      if (jse.getMessage().equals("Auth fail"))
+        this.tunnelPassword = null;
+      throw jse;
+    }
+
+    return tunnel;
+  }
+
+  private void createSshTunnel() throws JSchException {
+    if ((sshTunnel != null) && sshTunnel.isConnected()) {
+      sshTunnel.disconnect();
+    }
+
+    sshTunnel = createTunnel(22);
+  }
+
+  private void createHttpTunnel(int port) throws JSchException {
+    if ((httpTunnel == null) || !httpTunnel.isConnected()) {
+      httpTunnel = createTunnel(port);
+    }
+  }
+
+  public String getHostName() {
+    if ((tunnelHostName != null) && (tunnelHostName.length() > 0)) {
+      return "localhost";
+    }
+
+    return hostName;
+  }
+
+  public void setHostname(String hostname) {
+    this.hostName = hostname;
+  }
+
+  /**
+   * Gets the path where the hadoop jars are stored.
+   * 
+   * @return String containing the path to the hadoop jars.
+   */
+  public String getInstallPath() {
+    return installPath;
+  }
+
+  /**
+   * Sets the path where the hadoop jars are stored.
+   * 
+   * @param path The directory where the hadoop jars are stored.
+   */
+  public void setPath(String path) {
+    this.installPath = path;
+  }
+
+  public String getUserName() {
+    return userName;
+  }
+
+  public void setUser(String user) {
+    this.userName = user;
+  }
+
+  public String getPassword() {
+    return password;
+  }
+
+  public void setPassword(String password) {
+    log.fine("Server password set to " + password);
+    this.password = password;
+  }
+
+  @Override
+  public String toString() {
+    return this.userName + "@" + this.hostName + ":" + this.installPath;
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  /**
+   * Returns the URL for the Job Tracker (default is port 50030)
+   * 
+   * @return URL for the Job Tracker
+   * @throws MalformedURLException
+   */
+  public URL getJobTrackerUrl() throws MalformedURLException {
+    if (tunnelHostName == null) {
+      return new URL("http://" + getHostName() + ":" + JOB_TRACKER_PORT
+          + "/jobtracker.jsp");
+    } else {
+      try {
+        createHttpTunnel(JOB_TRACKER_PORT);
+
+        String port = httpTunnel.getPortForwardingL()[0].split(":")[0];
+        return new URL("http://localhost:" + port + "/jobtracker.jsp");
+      } catch (JSchException e) {
+        // / BUG(jz) -- need to display error here
+        return null;
+      }
+    }
+  }
+
+  public String getState() {
+    return state;
+  }
+
+  public Object[] getChildren() {
+    /*
+     * List all elements that should be present in the Server window (all
+     * servers and all jobs running on each servers)
+     */
+    checkPingJobRunning();
+    Collection<Object> collection =
+        new ArrayList<Object>(this.jobs.values());
+    collection.addAll(jars);
+    return collection.toArray();
+  }
+
+  private synchronized void checkPingJobRunning() {
+    if (ping == null) {
+      ping = new PingJob();
+      ping.setSystem(true);
+      ping.schedule();
+    }
+  }
+
+  private HashSet<IJobListener> jobListeners = new HashSet<IJobListener>();
+
+  private Session sshTunnel;
+
+  private Session httpTunnel;
+
+  private int tunnelPort;
+
+  private int id;
+
+  public void addJobListener(IJobListener l) {
+    jobListeners.add(l);
+  }
+
+  protected void fireJobChanged(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobChanged(job);
+    }
+  }
+
+  protected void fireJobAdded(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobAdded(job);
+    }
+  }
+
+  protected void fireJarPublishStart(JarModule jar) {
+    for (IJobListener listener : jobListeners) {
+      listener.publishStart(jar);
+    }
+  }
+
+  protected void fireJarPublishDone(JarModule jar) {
+    for (IJobListener listener : jobListeners) {
+      listener.publishDone(jar);
+    }
+  }
+
+  public void runJar(JarModule jar, IProgressMonitor monitor) {
+    log.fine("Run Jar: " + jar);
+    ILaunchConfigurationType launchConfigType =
+        DebugPlugin.getDefault().getLaunchManager()
+            .getLaunchConfigurationType(
+                "org.apache.hadoop.eclipse.launch.StartServer");
+
+    jars.add(jar);
+    fireJarPublishStart(jar);
+
+    try {
+      ILaunchConfiguration[] matchingConfigs =
+          DebugPlugin.getDefault().getLaunchManager()
+              .getLaunchConfigurations(launchConfigType);
+      ILaunchConfiguration launchConfig = null;
+
+      // TODO(jz) allow choosing correct config, for now we're always
+      // going to use the first
+      if (matchingConfigs.length == 1) {
+        launchConfig = matchingConfigs[0];
+      } else {
+        launchConfig =
+            launchConfigType
+                .newInstance(null, DebugPlugin.getDefault()
+                    .getLaunchManager()
+                    .generateUniqueLaunchConfigurationNameFrom(
+                        "Run Hadoop Jar"));
+      }
+
+      ILaunchConfigurationWorkingCopy copy =
+          launchConfig
+              .copy("Run " + jar.getName() + " on " + this.getName());
+
+      // COMMENTED(jz) - perform the jarring in the launch delegate now
+      // copy.setAttribute("hadoop.jar",
+      // jar.buildJar(monitor).toString());
+
+      copy.setAttribute("hadoop.jarrable", jar.toMemento());
+      copy.setAttribute("hadoop.host", this.getHostName());
+      copy.setAttribute("hadoop.user", this.getUserName());
+      copy.setAttribute("hadoop.serverid", this.id);
+      copy.setAttribute("hadoop.path", this.getInstallPath());
+      ILaunchConfiguration saved = copy.doSave();
+
+      // NOTE(jz) became deprecated in 3.3, replaced with getDelegates
+      // (plural) method,
+      // as this new method is marked experimental leaving as-is for now
+      ILaunchConfigurationDelegate delegate =
+          launchConfigType.getDelegate("run");
+      // only support run for now
+      DebugUITools.launch(saved, "run");
+    } catch (CoreException e) {
+      // TODO(jz) autogen
+      e.printStackTrace();
+    } finally {
+      jars.remove(jar);
+      fireJarPublishDone(jar);
+    }
+  }
+
+  public class PingJob extends Job {
+    public PingJob() {
+      super("Get MapReduce server status");
+    }
+
+    @Override
+    protected IStatus run(IProgressMonitor monitor) {
+      HttpURLConnection connection = null;
+
+      try {
+        connection = (HttpURLConnection) getJobTrackerUrl().openConnection();
+        connection.connect();
+
+        String previousState = state;
+
+        if (connection.getResponseCode() == 200) {
+          state = "Started";
+
+          StringBuffer string = new StringBuffer();
+          byte[] buffer = new byte[1024];
+          InputStream in =
+              new BufferedInputStream(connection.getInputStream());
+          int bytes = 0;
+          while ((bytes = in.read(buffer)) != -1) {
+            string.append(new String(buffer, 0, bytes));
+          }
+
+          HadoopJob[] jobData = getJobData(string.toString());
+          for (int i = 0; i < jobData.length; i++) {
+            HadoopJob job = jobData[i];
+            if (jobs.containsKey((job.getId()))) {
+              updateJob(job);
+            } else {
+              addJob(job);
+            }
+          }
+        } else {
+          state = "Stopped";
+        }
+
+        if (!state.equals(previousState)) {
+          ServerRegistry.getInstance().stateChanged(HadoopServer.this);
+        }
+      } catch (Exception e) {
+        state = "Stopped (Connection Error)";
+      }
+
+      schedule(PING_DELAY);
+      return Status.OK_STATUS;
+    }
+  }
+
+  private void updateJob(final HadoopJob data) {
+    jobs.put(data.getId(), data);
+    // TODO(jz) only if it has changed
+    Display.getDefault().syncExec(new Runnable() {
+      public void run() {
+        fireJobChanged(data);
+      }
+    });
+  }
+
+  private void addJob(final HadoopJob data) {
+    jobs.put(data.getId(), data);
+
+    Display.getDefault().syncExec(new Runnable() {
+      public void run() {
+        fireJobAdded(data);
+      }
+    });
+  }
+
+  /**
+   * Parse the job tracker data to display currently running and completed
+   * jobs.
+   * 
+   * @param jobTrackerHtml The HTML returned from the Job Tracker port
+   * @return an array of Strings that contain job status info
+   */
+  public HadoopJob[] getJobData(String jobTrackerHtml) {
+    try {
+      Vector<HadoopJob> jobsVector = new Vector<HadoopJob>();
+
+      BufferedReader in =
+          new BufferedReader(new StringReader(jobTrackerHtml));
+
+      String inputLine;
+
+      boolean completed = false;
+      while ((inputLine = in.readLine()) != null) {
+        // stop once we reach failed jobs (which are after running and
+        // completed jobs)
+        if (inputLine.indexOf("Failed Jobs") != -1) {
+          break;
+        }
+
+        if (inputLine.indexOf("Completed Jobs") != -1) {
+          completed = true;
+        }
+
+        // skip lines without data (stored in a table)
+        if (!inputLine.startsWith("<tr><td><a")) {
+          // log.debug (" > " + inputLine, verbose);
+          continue;
+        }
+
+        HadoopJob jobData = new HadoopJob(HadoopServer.this);
+
+        String[] values = inputLine.split("</td><td>");
+
+        String jobId = values[0].trim();
+        String realJobId =
+            jobId.substring(jobId.lastIndexOf("_") + 1, jobId
+                .lastIndexOf("_") + 5);
+        String name = values[2].trim();
+        if (name.equals("&nbsp;")) {
+          name = "(untitled)";
+        }
+        jobData.name = name + "(" + realJobId + ")";
+        jobData.jobId = "job_" + realJobId;
+        jobData.completed = completed;
+
+        jobData.mapPercentage = values[3].trim();
+        jobData.totalMaps = values[4].trim();
+        jobData.completedMaps = values[5].trim();
+        jobData.reducePercentage = values[6].trim();
+        jobData.totalReduces = values[7].trim();
+        jobData.completedReduces =
+            values[8].substring(0, values[8].indexOf("<")).trim();
+
+        jobsVector.addElement(jobData);
+      }
+
+      in.close();
+
+      // convert vector to array
+      HadoopJob[] jobArray = new HadoopJob[jobsVector.size()];
+      for (int j = 0; j < jobsVector.size(); j++) {
+        jobArray[j] = jobsVector.elementAt(j);
+      }
+
+      return jobArray;
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+
+    return null;
+  }
+
+  public void dispose() {
+    if ((sshTunnel != null) && sshTunnel.isConnected()) {
+      sshTunnel.disconnect();
+    }
+
+    if ((httpTunnel != null) && httpTunnel.isConnected()) {
+      httpTunnel.disconnect();
+    }
+  }
+
+  public String getTunnelHostName() {
+    return tunnelHostName;
+  }
+
+  public String getTunnelUserName() {
+    return tunnelUserName;
+  }
+
+  public void setId(int i) {
+    this.id = i;
+  }
+
+  public void setName(String newName) {
+    this.name = newName;
+  }
+
+  public void setURI(String newURI) {
+    String[] hostInfo = newURI.split(":");
+    String[] loginInfo = hostInfo[0].split("@");
+
+    installPath = hostInfo[1];
+    userName = loginInfo[0];
+    hostName = loginInfo[1];
+  }
+  
+  public void setTunnel(String tunnelHostName, String tunnelUserName) {
+    this.tunnelHostName = tunnelHostName;
+    this.tunnelUserName = tunnelUserName;
+  }
+  
+  /**
+   * Returns whether this server uses SSH tunneling or not
+   * @return whether this server uses SSH tunneling or not
+   */
+  public boolean useTunneling() {
+    return (this.tunnelHostName != null);
+  }
+  
+}

Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java?view=auto&rev=566838
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java Thu Aug 16 13:43:12 2007
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.server;
+
+/**
+ * Interface for updating/adding jobs to the MapReduce Server view.
+ */
+
+public interface IJobListener {
+  void jobChanged(HadoopJob job);
+
+  void jobAdded(HadoopJob job);
+
+  void publishStart(JarModule jar);
+
+  void publishDone(JarModule jar);
+}

Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java?view=auto&rev=566838
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java Thu Aug 16 13:43:12 2007
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.server;
+
+import java.io.File;
+import java.util.logging.Logger;
+
+import org.apache.hadoop.eclipse.Activator;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jdt.core.ICompilationUnit;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IJavaProject;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.core.JavaCore;
+import org.eclipse.jdt.ui.jarpackager.IJarExportRunnable;
+import org.eclipse.jdt.ui.jarpackager.JarPackageData;
+
+
+/**
+ * Methods for interacting with the jar file containing the 
+ * Mapper/Reducer/Driver classes for a MapReduce job.
+ */
+
+public class JarModule {
+  static Logger log = Logger.getLogger(JarModule.class.getName());
+
+  private final IResource resource;
+
+  public JarModule(IResource resource) {
+    this.resource = resource;
+  }
+
+  /**
+   * Create the jar file containing all the MapReduce job classes.
+   */
+  public File buildJar(IProgressMonitor monitor) {
+    log.fine("Build jar");
+    JarPackageData jarrer = new JarPackageData();
+
+    jarrer.setExportJavaFiles(true);
+    jarrer.setExportClassFiles(true);
+    jarrer.setExportOutputFolders(true);
+    jarrer.setOverwrite(true);
+
+    Path path;
+
+    try {
+      IJavaProject project = (IJavaProject) resource.getProject().getNature(
+          JavaCore.NATURE_ID); // todo(jz)
+      // check this is the case before letting this method get called
+      Object element = resource.getAdapter(IJavaElement.class);
+      IType type = ((ICompilationUnit) element).findPrimaryType();
+      jarrer.setManifestMainClass(type);
+      path = new Path(new File(Activator.getDefault().getStateLocation()
+          .toFile(), resource.getProject().getName() + "_project_hadoop_"
+          + resource.getName() + "_" + System.currentTimeMillis() + ".jar")
+          .getAbsolutePath());
+      jarrer.setJarLocation(path);
+
+      jarrer.setElements(resource.getProject().members(IResource.FILE));
+      IJarExportRunnable runnable = jarrer.createJarExportRunnable(null);
+      runnable.run(monitor);
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw new RuntimeException(e);
+    }
+
+    return path.toFile();
+  }
+
+  public String getName() {
+    return resource.getProject().getName() + "/" + resource.getName();
+  }
+
+  public static JarModule fromMemento(String memento) {
+    return new JarModule(ResourcesPlugin.getWorkspace().getRoot().findMember(
+        Path.fromPortableString(memento)));
+  }
+
+  public String toMemento() {
+    return resource.getFullPath().toPortableString();
+  }
+}

Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/DefineHadoopServerLocWizardPage.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/DefineHadoopServerLocWizardPage.java?view=auto&rev=566838
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/DefineHadoopServerLocWizardPage.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/DefineHadoopServerLocWizardPage.java Thu Aug 16 13:43:12 2007
@@ -0,0 +1,445 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.servers;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.lang.reflect.InvocationTargetException;
+
+import org.apache.hadoop.eclipse.server.HadoopServer;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.jface.dialogs.IMessageProvider;
+import org.eclipse.jface.dialogs.ProgressMonitorDialog;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.wizard.WizardPage;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Event;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Listener;
+import org.eclipse.swt.widgets.Text;
+
+import com.jcraft.jsch.ChannelExec;
+import com.jcraft.jsch.JSchException;
+import com.jcraft.jsch.Session;
+
+/**
+ * Wizard for defining the location of a Hadoop server
+ */
+
+public class DefineHadoopServerLocWizardPage extends WizardPage {
+
+  /**
+   * User-defined name for the hadoop server
+   */
+  private Text serverName;
+
+  /**
+   * Host name of the Hadoop server
+   */
+  private Text hostName;
+
+  /**
+   * Location of Hadoop jars on the server
+   */
+  private Text installPath;
+
+  /**
+   * User name to use to connect to the Hadoop server
+   */
+  private Text userName;
+
+  private Button useSSHTunnel;
+
+  /**
+   * User name to use to connect to the tunneling machine
+   */
+  private Text tunnelUserName;
+
+  /**
+   * Host name of the tunneling machine
+   */
+  private Text tunnelHostName;
+
+  /**
+   * HadoopServer instance currently edited by this form (null if we create a
+   * new one)
+   */
+  private HadoopServer editedServer;
+
+  /**
+   * Constructor to create a new Hadoop server
+   */
+  public DefineHadoopServerLocWizardPage() {
+    super("Hadoop Server", "Define Hadoop Server Location", null);
+    this.editedServer = null;
+  }
+
+  /**
+   * Constructor to edit the parameters of an existing Hadoop server
+   * 
+   * @param server
+   */
+  public DefineHadoopServerLocWizardPage(HadoopServer server) {
+    super("Hadoop Server", "Edit Hadoop Server Location", null);
+    this.editedServer = server;
+  }
+
+  /**
+   * Fill the server values from the form values
+   * 
+   * @return
+   */
+  private HadoopServer defineServerFromValues() {
+    String uri =
+        userName.getText() + "@" + hostName.getText() + ":"
+            + installPath.getText();
+
+    if (editedServer == null) {
+      // Create and register the new HadoopServer
+      this.editedServer =
+          new HadoopServer(uri, serverName.getText(), (useSSHTunnel
+              .getSelection()) ? tunnelHostName.getText() : null,
+              (useSSHTunnel.getSelection()) ? tunnelUserName.getText()
+                  : null);
+      ServerRegistry.getInstance().addServer(this.editedServer);
+
+    } else {
+
+      // Update values of the already existing HadoopServer
+      editedServer.setName(this.serverName.getText());
+      editedServer.setURI(uri);
+      if (useSSHTunnel.getSelection())
+        editedServer.setTunnel(tunnelHostName.getText(), tunnelUserName
+            .getText());
+      else
+        editedServer.setTunnel(null, null);
+
+      ServerRegistry.getInstance().stateChanged(this.editedServer);
+    }
+
+    return this.editedServer;
+  }
+
+  /**
+   * Fill the form values from the server instance
+   */
+  private void defineValuesFromServer() {
+    if (this.editedServer == null) {
+      // Setup values for a new empty instance
+      // Do nothing as it may trigger listeners!!!
+      /*
+       * serverName.setText(""); userName.setText(""); hostName.setText("");
+       * installPath.setText(""); useSSHTunnel.setSelection(false);
+       * tunnelHostName.setText(""); tunnelUserName.setText("");
+       */
+    } else {
+      // Setup values from the server instance
+      serverName.setText(editedServer.getName());
+      userName.setText(editedServer.getUserName());
+      hostName.setText(editedServer.getHostName());
+      installPath.setText(editedServer.getInstallPath());
+      if (editedServer.useTunneling()) {
+        useSSHTunnel.setSelection(true);
+        tunnelHostName.setText(editedServer.getTunnelHostName());
+        tunnelUserName.setText(editedServer.getTunnelUserName());
+      } else {
+        useSSHTunnel.setSelection(false);
+        tunnelHostName.setText("");
+        tunnelUserName.setText("");
+      }
+    }
+  }
+
+  /**
+   * Performs any actions appropriate in response to the user having pressed
+   * the Finish button, or refuse if finishing now is not permitted.
+   * 
+   * @return Object containing information about the Hadoop server
+   */
+  public HadoopServer performFinish() {
+    try {
+      return defineServerFromValues();
+    } catch (Exception e) {
+      e.printStackTrace();
+      setMessage("Invalid server location values", IMessageProvider.ERROR);
+      return null;
+    }
+  }
+
+  /**
+   * Validates whether Hadoop exists at the specified server location
+   * 
+   */
+  private void testLocation() {
+    ProgressMonitorDialog dialog = new ProgressMonitorDialog(getShell());
+    dialog.setOpenOnRun(true);
+
+    try {
+      final HadoopServer location = defineServerFromValues();
+
+      try {
+        dialog.run(true, false, new IRunnableWithProgress() {
+          public void run(IProgressMonitor monitor)
+              throws InvocationTargetException, InterruptedException {
+            Session session = null;
+            try {
+              session = location.createSession();
+              try {
+                ChannelExec channel =
+                    (ChannelExec) session.openChannel("exec");
+                channel.setCommand(location.getInstallPath()
+                    + "/bin/hadoop version");
+                BufferedReader response =
+                    new BufferedReader(new InputStreamReader(channel
+                        .getInputStream()));
+                channel.connect();
+                final String versionLine = response.readLine();
+
+                if ((versionLine != null)
+                    && versionLine.startsWith("Hadoop")) {
+                  Display.getDefault().syncExec(new Runnable() {
+                    public void run() {
+                      setMessage("Found " + versionLine,
+                          IMessageProvider.INFORMATION);
+                    }
+                  });
+                } else {
+                  Display.getDefault().syncExec(new Runnable() {
+                    public void run() {
+                      setMessage("No Hadoop Found in this location",
+                          IMessageProvider.WARNING);
+                    }
+                  });
+                }
+              } finally {
+                session.disconnect();
+                location.dispose();
+              }
+            } catch (final JSchException e) {
+              Display.getDefault().syncExec(new Runnable() {
+                public void run() {
+                  System.err.println(e.getMessage());
+                  setMessage("Problems connecting to server: "
+                      + e.getLocalizedMessage(), IMessageProvider.WARNING);
+                }
+              });
+            } catch (final IOException e) {
+              Display.getDefault().syncExec(new Runnable() {
+                public void run() {
+                  setMessage("Problems communicating with server: "
+                      + e.getLocalizedMessage(), IMessageProvider.WARNING);
+                }
+              });
+            } catch (final Exception e) {
+              Display.getDefault().syncExec(new Runnable() {
+                public void run() {
+                  setMessage("Errors encountered connecting to server: "
+                      + e.getLocalizedMessage(), IMessageProvider.WARNING);
+                }
+              });
+            } finally {
+              if (session != null) {
+                session.disconnect();
+              }
+            }
+          }
+
+        });
+      } catch (InvocationTargetException e) {
+        // TODO Auto-generated catch block
+        e.printStackTrace();
+      } catch (InterruptedException e) {
+        // TODO Auto-generated catch block
+        e.printStackTrace();
+      }
+    } catch (Exception e) {
+      setMessage("Invalid server location", IMessageProvider.WARNING);
+      return;
+    }
+  }
+
+  @Override
+  /**
+   * Location is not complete (and finish button not available) until a
+   * hostname is specified.
+   */
+  public boolean isPageComplete() {
+    if (hostName.getText().length() > 0) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  public boolean canFinish() {
+    return isPageComplete();
+  }
+
+  /**
+   * Create the overall wizard
+   */
+  public void createControl(Composite parent) {
+
+    setTitle("Define Hadoop Server Location");
+    setDescription("Define the location of a Hadoop server for running MapReduce applications.");
+    Composite panel = new Composite(parent, SWT.NONE);
+    GridLayout layout = new GridLayout();
+    layout.numColumns = 3;
+    layout.makeColumnsEqualWidth = false;
+    panel.setLayout(layout);
+    panel.setLayoutData(new GridData(GridData.FILL_BOTH));
+
+    Label serverNameLabel = new Label(panel, SWT.NONE);
+    serverNameLabel.setText("&Server name:");
+
+    serverName = new Text(panel, SWT.SINGLE | SWT.BORDER);
+    GridData data = new GridData(GridData.FILL_HORIZONTAL);
+    serverName.setLayoutData(data);
+    serverName.setText("Hadoop Server");
+
+    new Label(panel, SWT.NONE).setText(" ");
+
+    // serverName.addModifyListener(this);
+
+    Label hostNameLabel = new Label(panel, SWT.NONE);
+    hostNameLabel.setText("&Hostname:");
+
+    hostName = new Text(panel, SWT.SINGLE | SWT.BORDER);
+    hostName.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+    hostName.addListener(SWT.Modify, new Listener() {
+      public void handleEvent(Event e) {
+        refreshButtons();
+      }
+
+      public void widgetDefaultSelected(SelectionEvent e) {
+      }
+    });
+
+    // COMMENTED(jz) seems to cause issues on my version of eclipse
+    // hostName.setText(server.getHost());
+
+    // hostName.addModifyListener(this);
+
+    new Label(panel, SWT.NONE).setText(" ");
+
+    Label installationPathLabel = new Label(panel, SWT.NONE);
+    installationPathLabel.setText("&Installation directory:");
+
+    installPath = new Text(panel, SWT.SINGLE | SWT.BORDER);
+    installPath.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+
+    // installationPath.addModifyListener(this);
+
+    new Label(panel, SWT.NONE).setText(" ");
+
+    Label usernameLabel = new Label(panel, SWT.NONE);
+    usernameLabel.setText("&Username:");
+
+    // new Label(panel, SWT.NONE).setText(" ");
+
+    userName = new Text(panel, SWT.SINGLE | SWT.BORDER);
+    userName.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+    // username.addModifyListener(this);
+
+    Label spacer = new Label(panel, SWT.NONE);
+    spacer.setText(" ");
+
+    Label spacer2 = new Label(panel, SWT.NONE);
+    spacer2.setText(" ");
+
+    /*
+     * Label label = new Label(panel, SWT.NONE); GridData data2 = new
+     * GridData(); data2.horizontalSpan = 2; label.setLayoutData(data2);
+     * label.setText("Example: user@host:/path/to/hadoop");
+     */
+
+    Group sshTunnelGroup = new Group(panel, SWT.NONE);
+    GridData gridData = new GridData(GridData.FILL_HORIZONTAL);
+    gridData.horizontalSpan = 3;
+    sshTunnelGroup.setLayoutData(gridData);
+    sshTunnelGroup.setLayout(new GridLayout(2, false));
+    useSSHTunnel = new Button(sshTunnelGroup, SWT.CHECK);
+    useSSHTunnel.setText("Tunnel Connections");
+    GridData span2 = new GridData(GridData.FILL_HORIZONTAL);
+    span2.horizontalSpan = 2;
+    useSSHTunnel.setLayoutData(span2);
+    Label label = new Label(sshTunnelGroup, SWT.NONE);
+    label.setText("Tunnel via");
+    tunnelHostName = new Text(sshTunnelGroup, SWT.BORDER | SWT.SINGLE);
+    tunnelHostName.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+
+    Label label2 = new Label(sshTunnelGroup, SWT.NONE);
+    label2.setText("Tunnel username");
+    tunnelUserName = new Text(sshTunnelGroup, SWT.BORDER | SWT.SINGLE);
+    tunnelUserName.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+
+    Listener refreshButtonsListener = new Listener() {
+      public void handleEvent(Event event) {
+        refreshButtons();
+      }
+    };
+    useSSHTunnel.addListener(SWT.Selection, refreshButtonsListener);
+    tunnelHostName.setEnabled(useSSHTunnel.getSelection());
+    tunnelUserName.setEnabled(useSSHTunnel.getSelection());
+
+    ((GridLayout) sshTunnelGroup.getLayout()).marginBottom = 20;
+
+    Label label4 = new Label(panel, SWT.NONE);
+    GridData span3 = new GridData(GridData.FILL_HORIZONTAL);
+    span3.horizontalSpan = 3;
+    label4.setLayoutData(span3);
+
+    final Button validate = new Button(panel, SWT.NONE);
+    validate.setText("&Validate location");
+    validate.addListener(SWT.Selection, new Listener() {
+      public void handleEvent(Event e) {
+        testLocation();
+      }
+
+      public void widgetDefaultSelected(SelectionEvent e) {
+      }
+    });
+
+    new Label(panel, SWT.NONE).setText(" ");
+
+    setControl(panel);
+
+    defineValuesFromServer();
+  }
+
+  public void refreshButtons() {
+    if (useSSHTunnel == null)
+      return;
+
+    if (tunnelHostName != null)
+      tunnelHostName.setEnabled(useSSHTunnel.getSelection());
+    if (tunnelUserName != null)
+      tunnelUserName.setEnabled(useSSHTunnel.getSelection());
+
+    getContainer().updateButtons();
+  }
+}

Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java?view=auto&rev=566838
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java Thu Aug 16 13:43:12 2007
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.servers;
+
+import org.apache.hadoop.eclipse.server.HadoopServer;
+import org.eclipse.jface.viewers.IContentProvider;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.jface.viewers.IStructuredContentProvider;
+import org.eclipse.jface.viewers.ITableLabelProvider;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.swt.graphics.Image;
+
+
+/**
+ * Provider that enables selection of a predefined Hadoop server.
+ */
+
+public class HadoopServerSelectionListContentProvider implements
+    IContentProvider, ITableLabelProvider, IStructuredContentProvider {
+  public void dispose() {
+
+  }
+
+  public void inputChanged(Viewer viewer, Object oldInput, Object newInput) {
+
+  }
+
+  public Image getColumnImage(Object element, int columnIndex) {
+    return null;
+  }
+
+  public String getColumnText(Object element, int columnIndex) {
+    if (element instanceof HadoopServer) {
+      if (columnIndex == 0) {
+        return ((HadoopServer) element).getName();
+      } else if (columnIndex == 1) {
+        return ((HadoopServer) element).toString();
+      }
+    }
+
+    return element.toString();
+  }
+
+  public void addListener(ILabelProviderListener listener) {
+
+  }
+
+  public boolean isLabelProperty(Object element, String property) {
+    return false;
+  }
+
+  public void removeListener(ILabelProviderListener listener) {
+
+  }
+
+  public Object[] getElements(Object inputElement) {
+    return ServerRegistry.getInstance().getServers().toArray();
+  }
+}

Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java?view=auto&rev=566838
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/IHadoopServerListener.java Thu Aug 16 13:43:12 2007
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.servers;
+
+import org.apache.hadoop.eclipse.server.HadoopServer;
+
+/**
+ * Interface for monitoring server changes
+ */
+public interface IHadoopServerListener {
+  void serverChanged(HadoopServer location, int type);
+}

Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java?view=auto&rev=566838
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/RunOnHadoopWizard.java Thu Aug 16 13:43:12 2007
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.servers;
+
+import org.apache.hadoop.eclipse.server.HadoopServer;
+import org.apache.hadoop.eclipse.server.JarModule;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.jface.viewers.TableViewer;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardPage;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Table;
+import org.eclipse.swt.widgets.TableColumn;
+
+/**
+ * Wizard for publishing a job to a Hadoop server.
+ */
+
+public class RunOnHadoopWizard extends Wizard implements SelectionListener {
+
+  private DefineHadoopServerLocWizardPage createNewPage;
+
+  private MainPage mainPage;
+
+  private final JarModule jar;
+
+  private boolean complete = false;
+
+  private IProgressMonitor progressMonitor;
+
+  public RunOnHadoopWizard(JarModule jar) {
+    this.jar = jar;
+    setForcePreviousAndNextButtons(true);
+    setNeedsProgressMonitor(true);
+    setWindowTitle("Run on Hadoop");
+  }
+
+  @Override
+  public void addPages() {
+    super.addPages();
+    mainPage = new MainPage();
+    addPage(mainPage);
+    createNewPage = new DefineHadoopServerLocWizardPage();
+    addPage(createNewPage);
+  }
+
+  @Override
+  /**
+   * Performs any actions appropriate in response to the user having pressed
+   * the Finish button, or refuse if finishing now is not permitted.
+   */
+  public boolean performFinish() {
+    HadoopServer location = null;
+    if (mainPage.createNew.getSelection()) {
+      location = createNewPage.performFinish();
+    } else if (mainPage.table.getSelection().length == 1) {
+      location = (HadoopServer) mainPage.table.getSelection()[0].getData();
+    }
+
+    if (location != null) {
+      location.runJar(jar, progressMonitor);
+
+      return true;
+    }
+
+    return false;
+  }
+
+  public void refreshButtons() {
+    getContainer().updateButtons();
+  }
+
+  @Override
+  /**
+   * Allows finish when an existing server is selected or when a new server
+   * location is defined
+   */
+  public boolean canFinish() {
+
+    if (mainPage.chooseExisting.getSelection()
+        && (mainPage.table.getSelectionCount() > 0)) {
+      return true;
+    } else {
+      return (createNewPage.isPageComplete());
+      // check first
+    }
+  }
+
+  public class MainPage extends WizardPage {
+
+    private Button createNew;
+
+    private Table table;
+
+    public Button chooseExisting;
+
+    public MainPage() {
+      super("Select or define server to run on");
+      setTitle("Select Hadoop Server");
+      setDescription("Select a Hadoop Server to run on.");
+    }
+
+    @Override
+    public boolean canFlipToNextPage() {
+      return createNew.getSelection();
+    }
+
+    public void createControl(Composite parent) {
+      Composite control = new Composite(parent, SWT.NONE);
+      control.setLayout(new GridLayout(4, false));
+
+      Label label = new Label(control, SWT.FILL);
+      label.setText("Select a Hadoop Server to run on.");
+      GridData data = new GridData(GridData.FILL_BOTH);
+      data.grabExcessVerticalSpace = false;
+      data.horizontalSpan = 4;
+      label.setLayoutData(data);
+
+      createNew = new Button(control, SWT.RADIO);
+      createNew.setText("Define a new Hadoop server location");
+      createNew.setLayoutData(data);
+      createNew.addSelectionListener(RunOnHadoopWizard.this);
+
+      createNew.setSelection(true);
+
+      chooseExisting = new Button(control, SWT.RADIO);
+      chooseExisting
+          .setText("Choose an existing server from the list below");
+      chooseExisting.setLayoutData(data);
+      chooseExisting.addSelectionListener(RunOnHadoopWizard.this);
+
+      chooseExisting.addSelectionListener(new SelectionListener() {
+
+        public void widgetSelected(SelectionEvent e) {
+          if (chooseExisting.getSelection()
+              && (table.getSelectionCount() == 0)) {
+            if (table.getItems().length > 0) {
+              table.setSelection(0);
+            }
+          }
+        }
+
+        public void widgetDefaultSelected(SelectionEvent e) {
+        }
+
+      });
+
+      Composite serverList = new Composite(control, SWT.NONE);
+      GridData span = new GridData(GridData.FILL_BOTH);
+      span.horizontalSpan = 4;
+      serverList.setLayoutData(span);
+      GridLayout layout = new GridLayout(4, false);
+      layout.marginTop = 12;
+      serverList.setLayout(layout);
+
+      table =
+          new Table(serverList, SWT.SINGLE | SWT.H_SCROLL | SWT.V_SCROLL
+              | SWT.FULL_SELECTION);
+      table.setHeaderVisible(true);
+      table.setLinesVisible(true);
+      GridData d = new GridData(GridData.FILL_HORIZONTAL);
+      d.horizontalSpan = 4;
+      d.heightHint = 300;
+      table.setLayoutData(d);
+
+      TableColumn nameColumn = new TableColumn(table, SWT.SINGLE);
+      nameColumn.setText("Name");
+      nameColumn.setWidth(160);
+
+      TableColumn hostColumn = new TableColumn(table, SWT.SINGLE);
+      hostColumn.setText("Location");
+      hostColumn.setWidth(200);
+
+      table.addSelectionListener(new SelectionListener() {
+        public void widgetSelected(SelectionEvent e) {
+          chooseExisting.setSelection(true);
+          createNew.setSelection(false); // shouldnt be necessary,
+          // but got a visual bug once
+
+          refreshButtons();
+        }
+
+        public void widgetDefaultSelected(SelectionEvent e) {
+
+        }
+      });
+
+      TableViewer viewer = new TableViewer(table);
+      HadoopServerSelectionListContentProvider provider =
+          new HadoopServerSelectionListContentProvider();
+      viewer.setContentProvider(provider);
+      viewer.setLabelProvider(provider);
+      viewer.setInput(new Object()); // don't care, get from singleton
+      // server registry
+
+      setControl(control);
+    }
+  }
+
+  public void widgetDefaultSelected(SelectionEvent e) {
+    // TODO Auto-generated method stub
+
+  }
+
+  public void widgetSelected(SelectionEvent e) {
+    refreshButtons();
+  }
+
+  public void setProgressMonitor(IProgressMonitor progressMonitor) {
+    this.progressMonitor = progressMonitor;
+  }
+}

Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java?view=auto&rev=566838
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/ServerRegistry.java Thu Aug 16 13:43:12 2007
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.servers;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.eclipse.Activator;
+import org.apache.hadoop.eclipse.server.HadoopServer;
+
+/**
+ * Registry for storing Hadoop Servers
+ */
+public class ServerRegistry {
+
+  private static final ServerRegistry INSTANCE = new ServerRegistry();
+
+  public static final int SERVER_ADDED = 0;
+
+  public static final int SERVER_REMOVED = 1;
+
+  public static final int SERVER_STATE_CHANGED = 2;
+
+  private ServerRegistry() {
+  }
+
+  private List<HadoopServer> servers;
+
+  private Set<IHadoopServerListener> listeners =
+      new HashSet<IHadoopServerListener>();
+
+  public static ServerRegistry getInstance() {
+    return INSTANCE;
+  }
+
+  public List<HadoopServer> getServers() {
+    return Collections.unmodifiableList(getServersInternal());
+  }
+
+  /**
+   * Returns the list of currently defined servers. The list is read from the
+   * file if it is not in memory.
+   * 
+   * @return the list of hadoop servers
+   */
+  private List<HadoopServer> getServersInternal() {
+
+    if (servers == null) {
+      servers = new ArrayList<HadoopServer>();
+
+      File store =
+          Activator.getDefault().getStateLocation().append("SERVERS.txt")
+              .toFile();
+
+      if (!store.exists()) {
+        try {
+          store.createNewFile();
+        } catch (IOException e) {
+          // pretty fatal error here - we cant save or restore
+          throw new RuntimeException(e);
+        }
+      }
+
+      BufferedReader reader = null;
+      try {
+        reader = new BufferedReader(new FileReader(store));
+        String line;
+        while ((line = reader.readLine()) != null) {
+          try {
+            String[] parts = line.split("\t");
+            if (parts.length == 1) {
+              String location = parts[0];
+              parts = new String[] { location, "Hadoop Server" };
+            }
+
+            if (parts.length > 2) {
+              servers.add(new HadoopServer(parts[0], parts[1], parts[2],
+                  parts[3]));
+            } else {
+              servers.add(new HadoopServer(parts[0], parts[1]));
+            }
+
+            servers.get(servers.size() - 1).setId(servers.size() - 1);
+
+          } catch (Exception e) {
+            // TODO(jz) show message and ignore - still want rest of
+            // servers if we can get them
+            e.printStackTrace();
+          }
+        }
+      } catch (FileNotFoundException e) {
+        e.printStackTrace();
+      } catch (IOException e) {
+        // TODO(jz) show message and ignore - may have corrupt
+        // configuration
+        e.printStackTrace();
+      } finally {
+        if (reader != null) {
+          try {
+            reader.close();
+          } catch (IOException e) {
+            /* nothing we can do */
+          }
+        }
+      }
+    }
+
+    return servers;
+  }
+
+  public synchronized void removeServer(HadoopServer server) {
+    getServersInternal().remove(server);
+    fireListeners(server, SERVER_REMOVED);
+    save();
+  }
+
+  public synchronized void addServer(HadoopServer server) {
+    getServersInternal().add(server);
+    fireListeners(server, SERVER_ADDED);
+    save();
+  }
+
+  /**
+   * Save the list of servers to the plug-in configuration file, currently
+   * SERVERS.txt in
+   * <workspace-dir>/.metadata/.plugins/org.apache.hadoop.eclipse/SERVERS.txt
+   */
+  private synchronized void save() {
+    File store =
+        Activator.getDefault().getStateLocation().append("SERVERS.txt")
+            .toFile();
+    BufferedWriter writer = null;
+
+    if (!store.exists()) {
+      try {
+        store.createNewFile();
+      } catch (IOException e) {
+        // pretty fatal error here - we can't save or restore
+        throw new RuntimeException(e);
+      }
+    }
+
+    try {
+      writer = new BufferedWriter(new FileWriter(store));
+      int i = 0;
+      for (HadoopServer server : servers) {
+        server.setId(i++);
+        writer.append(server.toString() + "\t" + server.getName());
+        if (server.getTunnelHostName() != null) {
+          writer.append("\t" + server.getTunnelHostName() + "\t"
+              + server.getTunnelUserName());
+        }
+        writer.newLine();
+      }
+    } catch (IOException e) {
+      // TODO(jz) show error message
+      e.printStackTrace();
+    } finally {
+      if (writer != null) {
+        try {
+          writer.close();
+        } catch (IOException e) {
+          /* nothing we can do */
+        }
+      }
+    }
+  }
+
+  public void addListener(IHadoopServerListener l) {
+    synchronized (listeners) {
+      listeners.add(l);
+    }
+  }
+
+  private void fireListeners(HadoopServer location, int kind) {
+    synchronized (listeners) {
+      for (IHadoopServerListener listener : listeners) {
+        listener.serverChanged(location, kind);
+      }
+    }
+  }
+
+  public void stateChanged(HadoopServer job) {
+    fireListeners(job, SERVER_STATE_CHANGED);
+  }
+
+  public void removeListener(IHadoopServerListener l) {
+    synchronized (listeners) {
+      listeners.remove(l);
+    }
+  }
+
+  public void dispose() {
+    for (HadoopServer server : getServers()) {
+      server.dispose();
+    }
+  }
+
+  public HadoopServer getServer(int serverid) {
+    return servers.get(serverid);
+  }
+
+}

Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java?view=auto&rev=566838
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/view/servers/ServerView.java Thu Aug 16 13:43:12 2007
@@ -0,0 +1,383 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.view.servers;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.eclipse.Activator;
+import org.apache.hadoop.eclipse.actions.EditServerAction;
+import org.apache.hadoop.eclipse.actions.NewServerAction;
+import org.apache.hadoop.eclipse.server.HadoopJob;
+import org.apache.hadoop.eclipse.server.HadoopServer;
+import org.apache.hadoop.eclipse.server.IJobListener;
+import org.apache.hadoop.eclipse.server.JarModule;
+import org.apache.hadoop.eclipse.servers.IHadoopServerListener;
+import org.apache.hadoop.eclipse.servers.ServerRegistry;
+import org.eclipse.core.runtime.FileLocator;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.debug.internal.ui.DebugPluginImages;
+import org.eclipse.debug.ui.IDebugUIConstants;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.resource.ImageDescriptor;
+import org.eclipse.jface.viewers.IContentProvider;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredContentProvider;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.viewers.ITableLabelProvider;
+import org.eclipse.jface.viewers.ITreeContentProvider;
+import org.eclipse.jface.viewers.ITreeSelection;
+import org.eclipse.jface.viewers.TreeViewer;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Tree;
+import org.eclipse.swt.widgets.TreeColumn;
+import org.eclipse.ui.IViewSite;
+import org.eclipse.ui.PartInitException;
+import org.eclipse.ui.actions.ActionFactory;
+import org.eclipse.ui.part.ViewPart;
+
+import com.jcraft.jsch.Channel;
+import com.jcraft.jsch.ChannelExec;
+import com.jcraft.jsch.JSchException;
+import com.jcraft.jsch.Session;
+
+/**
+ * Code for displaying/updating the MapReduce Servers view panel
+ */
+public class ServerView extends ViewPart implements IContentProvider,
+    IStructuredContentProvider, ITreeContentProvider, ITableLabelProvider,
+    IJobListener, IHadoopServerListener {
+
+  /**
+   * This object is the root content for this content provider
+   */
+  private static final Object CONTENT_ROOT = new Object();
+
+  private final IAction DELETE = new DeleteAction();
+
+  private final IAction PROPERTIES = new EditServerAction(this);
+
+  private final IAction NEWSERVER = new NewServerAction();
+
+  private Map<String, Image> images = new HashMap<String, Image>();
+
+  private TreeViewer viewer;
+
+  public ServerView() {
+  }
+
+  /* @inheritDoc */
+  @Override
+  public void init(IViewSite site) throws PartInitException {
+    super.init(site);
+
+    try {
+      images.put("hadoop", ImageDescriptor.createFromURL(
+          (FileLocator.toFileURL(FileLocator.find(Activator.getDefault()
+              .getBundle(), new Path("resources/hadoop_small.gif"), null))))
+          .createImage(true));
+      images.put("job", ImageDescriptor.createFromURL(
+          (FileLocator.toFileURL(FileLocator.find(Activator.getDefault()
+              .getBundle(), new Path("resources/job.gif"), null))))
+          .createImage(true));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+
+  /* @inheritDoc */
+  @Override
+  public void dispose() {
+    for (String key : images.keySet()) {
+      if (images.containsKey(key))
+        ((Image) images.get(key)).dispose();
+    }
+
+    ServerRegistry.getInstance().removeListener(this);
+
+    images.clear();
+  }
+
+  /**
+   * Creates the columns for the view
+   */
+  @Override
+  public void createPartControl(Composite parent) {
+    Tree main =
+        new Tree(parent, SWT.SINGLE | SWT.FULL_SELECTION | SWT.H_SCROLL
+            | SWT.V_SCROLL);
+    main.setHeaderVisible(true);
+    main.setLinesVisible(false);
+    main.setLayoutData(new GridData(GridData.FILL_BOTH));
+
+    TreeColumn serverCol = new TreeColumn(main, SWT.SINGLE);
+    serverCol.setText("Server");
+    serverCol.setWidth(185);
+    serverCol.setResizable(true);
+
+    TreeColumn locationCol = new TreeColumn(main, SWT.SINGLE);
+    locationCol.setText("Location");
+    locationCol.setWidth(185);
+    locationCol.setResizable(true);
+
+    TreeColumn stateCol = new TreeColumn(main, SWT.SINGLE);
+    stateCol.setText("State");
+    stateCol.setWidth(95);
+    stateCol.setResizable(true);
+
+    TreeColumn statusCol = new TreeColumn(main, SWT.SINGLE);
+    statusCol.setText("Status");
+    statusCol.setWidth(300);
+    statusCol.setResizable(true);
+
+    viewer = new TreeViewer(main);
+    viewer.setContentProvider(this);
+    viewer.setLabelProvider(this);
+    viewer.setInput(CONTENT_ROOT); // dont care
+
+    getViewSite().setSelectionProvider(viewer);
+    getViewSite().getActionBars().setGlobalActionHandler(
+        ActionFactory.DELETE.getId(), DELETE);
+
+    getViewSite().getActionBars().getToolBarManager().add(PROPERTIES);
+
+    // getViewSite().getActionBars().getToolBarManager().add(new
+    // StartAction());
+    getViewSite().getActionBars().getToolBarManager().add(NEWSERVER);
+  }
+
+  // NewServerAction moved to actions package for cheat sheet access --
+  // eyhung
+
+  public class DeleteAction extends Action {
+    @Override
+    public void run() {
+      ISelection selection =
+          getViewSite().getSelectionProvider().getSelection();
+      if ((selection != null) && (selection instanceof IStructuredSelection)) {
+        Object selItem =
+            ((IStructuredSelection) selection).getFirstElement();
+
+        if (selItem instanceof HadoopServer) {
+          HadoopServer location = (HadoopServer) selItem;
+          ServerRegistry.getInstance().removeServer(location);
+
+        } else if (selItem instanceof HadoopJob) {
+
+          // kill the job
+          HadoopJob job = (HadoopJob) selItem;
+          HadoopServer server = job.getServer();
+          String jobId = job.getJobId();
+
+          if (job.isCompleted())
+            return;
+
+          try {
+            Session session = server.createSession();
+
+            String command =
+                server.getInstallPath() + "/bin/hadoop job -kill " + jobId;
+            Channel channel = session.openChannel("exec");
+            ((ChannelExec) channel).setCommand(command);
+            channel.connect();
+            channel.disconnect();
+
+            session.disconnect();
+          } catch (JSchException e) {
+            e.printStackTrace();
+          }
+        }
+      }
+    }
+  }
+
+  public static class StartAction extends Action {
+    public StartAction() {
+      setText("Start");
+
+      // NOTE(jz) - all below from internal api, worst case no images
+      setImageDescriptor(DebugPluginImages
+          .getImageDescriptor(IDebugUIConstants.IMG_ACT_RUN));
+    }
+  }
+
+  /* @inheritDoc */
+  @Override
+  public void setFocus() {
+
+  }
+
+  /* @inheritDoc */
+  public void serverChanged(HadoopServer location, int type) {
+    Display.getDefault().syncExec(new Runnable() {
+      public void run() {
+        ServerView.this.viewer.refresh();
+      }
+    });
+  }
+
+  /* @inheritDoc */
+  public void inputChanged(final Viewer viewer, Object oldInput,
+      Object newInput) {
+    if (oldInput == CONTENT_ROOT)
+      ServerRegistry.getInstance().removeListener(this);
+    if (newInput == CONTENT_ROOT)
+      ServerRegistry.getInstance().addListener(this);
+  }
+
+  /* @inheritDoc */
+  public Object[] getElements(Object inputElement) {
+    return ServerRegistry.getInstance().getServers().toArray();
+  }
+
+  /* @inheritDoc */
+  public Object[] getChildren(Object parentElement) {
+    if (parentElement instanceof HadoopServer) {
+      ((HadoopServer) parentElement).addJobListener(this);
+
+      return ((HadoopServer) parentElement).getChildren();
+    }
+
+    return null;
+  }
+
+  /* @inheritDoc */
+  public Object getParent(Object element) {
+    if (element instanceof HadoopServer) {
+      return CONTENT_ROOT;
+    } else if (element instanceof HadoopJob) {
+      return ((HadoopJob) element).getServer();
+    }
+    return null;
+  }
+
+  /* @inheritDoc */
+  public boolean hasChildren(Object element) {
+    /* Only server entries have children */
+    return (element instanceof HadoopServer);
+  }
+
+  /* @inheritDoc */
+  public void addListener(ILabelProviderListener listener) {
+    // no listeners handling
+  }
+
+  public boolean isLabelProperty(Object element, String property) {
+    return false;
+  }
+
+  /* @inheritDoc */
+  public void removeListener(ILabelProviderListener listener) {
+    // no listener handling
+  }
+
+  /* @inheritDoc */
+  public Image getColumnImage(Object element, int columnIndex) {
+    if ((columnIndex == 0) && (element instanceof HadoopServer)) {
+      return images.get("hadoop");
+    } else if ((columnIndex == 0) && (element instanceof HadoopJob)) {
+      return images.get("job");
+    }
+    return null;
+  }
+
+  /* @inheritDoc */
+  public String getColumnText(Object element, int columnIndex) {
+    if (element instanceof HadoopServer) {
+      HadoopServer server = (HadoopServer) element;
+
+      switch (columnIndex) {
+        case 0:
+          return server.getName();
+        case 1:
+          return server.getHostName().toString();
+        case 2:
+          return server.getState();
+        case 3:
+          return "";
+      }
+    } else if (element instanceof HadoopJob) {
+      HadoopJob job = (HadoopJob) element;
+
+      switch (columnIndex) {
+        case 0:
+          return job.getId();
+        case 1:
+          return "";
+        case 2:
+          return job.getState();
+        case 3:
+          return job.getStatus();
+      }
+    } else if (element instanceof JarModule) {
+      JarModule jar = (JarModule) element;
+
+      switch (columnIndex) {
+        case 0:
+          return jar.toString();
+        case 1:
+          return "Publishing jar to server..";
+        case 2:
+          return "";
+      }
+    }
+
+    return null;
+  }
+
+  public void jobAdded(HadoopJob job) {
+    viewer.refresh();
+  }
+
+  public void jobChanged(HadoopJob job) {
+    viewer.refresh(job);
+  }
+
+  public void publishDone(JarModule jar) {
+    viewer.refresh();
+  }
+
+  public void publishStart(JarModule jar) {
+    viewer.refresh();
+  }
+
+  /**
+   * Return the currently selected server (null if there is no selection or
+   * if the selection is not a server)
+   * 
+   * @return the currently selected server entry
+   */
+  public HadoopServer getSelectedServer() {
+    ITreeSelection selection = (ITreeSelection) viewer.getSelection();
+    Object first = selection.getFirstElement();
+    if (first instanceof HadoopServer) {
+      return (HadoopServer) first;
+    }
+    return null;
+  }
+
+}

Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/todo.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/todo.txt?view=auto&rev=566838
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/todo.txt (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/todo.txt Thu Aug 16 13:43:12 2007
@@ -0,0 +1,149 @@
+-- DONE --------------------------
+	* Pref wizard page for hadoop libraries (eugene) -- DONE
+	* running wrong jar bug (julz) -- not using WTP any more DONE
+	* DFS only for hadoop servers (julz) -- DONE
+	* allow per-project hadoop dir, moved selection of hadoop path to first page of wizard (julz) -- DONE
+	* allow creation of new driver as part of new project wizard (julz) -- DONE
+	* BUG: ssh console sometimes drops (eugene) -- DONE
+	* Server Selection wizard - finish button should not be clickable if radio is on create server (eugene) -- DONE
+	* module icons for jar and job (dennis) -- DONE (sort of)
+	
+			 
+--- Bugs ---
+
+	* Server Selection wizard has identical name and location -- 
+	
+--- Features ----
+
+	* Limit type searches in driver wizard to current project (eugene) 
+	
+	* new.. dialogs on mapred perspective (julz) 
+	
+	* show cheat sheet, more wizardy goodness (julz) 
+
+
+--- Documentation ---
+
+	* cheat sheets (dennis)
+
+
+--- Testing ---
+
+	* test on mac osx (julz)
+
+
+--- Everything ------------------
+
+* Run/Debug.. on Hadoop runs the project on a local hadoop cloud, this will involve finding
+	the appropriate Map/Reduce classes, as a first pass I suggest we have the user specify these in the Run.. dialog
+	therefore this task breaks down to at least:
+	
+	* hadoop new proj. size
+	
+	* generate mapper/reducer screen on new project wizard
+	* title bar, titles on new X wizards
+	* hadoop perspective show cheat sheet
+	* status on server view
+	* double click on jobs, go to associated console
+	* icons for jobs
+	
+	* copy resources directory or similar to dfs, allow configurable resources directory
+	
+	* test installation directory on new server screen (i.e. ssh in and check files are present)
+	
+	* (Eugene) if server has user:pass@hostname in location, ssh file and run it on remote hadoop client
+	
+	* (Daniel) make launch on local hadoop scenario properly work, deploy jar to the server when run
+	
+	* (Julz) read info from 50030 to show jobs running on server
+
+	* contribute Format action for fs, suggest this when a server if first created
+	
+	* Possibly DFS navigator view?
+
+	*	(and to specify input and output? - how should we handle this?)
+	
+	* Restrict browse classes dialog above to subclass of Mapper, Reducer etc., add proposals to text fields
+	
+	* Make launch dialog look pretty
+	
+	* Run the specified Mapper and Reducer on a local server
+	
+	* Allow the user to Run on a server defined in a servers view (i.e. so you can run locally, and on cloud A or B with the same settings)
+	
+	* Allow the user to configure the hadoop server from this view as appropriate
+	
+	* When the job runs, keep the tracker interface and put it into a view in the perspective (see next task!) so the user
+	can track the state
+
+* Add a Hadouken perspective with
+	* the Hadoop targets view (analogous to servers view in WTP project)
+	
+	* the running jobs view which shows the status of running jobs
+	
+	* a Current Lesson/API panel showing html text from the lecturer?
+	
+	* any jazz stuff?
+	
+* JUnit support, specify expected inputs and outputs and run on server, collecting results and presenting a unified view
+ similar to the junit component.
+-- DONE --------------------------
+
+-- Current priorities ------------
+
+ ... Dennis, maybe you could move stuff from below up here?
+
+--- Everything ------------------
+
+* Run/Debug.. on Hadoop runs the project on a local hadoop cloud, this will involve finding
+	the appropriate Map/Reduce classes, as a first pass I suggest we have the user specify these in the Run.. dialog
+	therefore this task breaks down to at least:
+	
+	* hadoop new proj. size
+	
+	* generate mapper/reducer screen on new project wizard
+	* title bar, titles on new X wizards
+	* auto-focus on main on X wizards, auto show newly created stuff
+	* on new driver screen, specify mapper (allow creation for bonus points)
+	* hadoop perspective show cheat sheet
+	* remove browse button
+	* status on server view
+	* double click on jobs, go to associated console
+	* icons for jobs
+	
+	* (Eugene) if server has user:pass@hostname in location, ssh file and run it on remote hadoop client
+	
+	* (Daniel) make launch on local hadoop scenario properly work, deploy jar to the server when run
+	
+	* (Julz) read info from 50030 to show jobs running on server
+
+	* contribute Format action for fs, suggest this when a server if first created
+	
+	* Possibly DFS navigator view?
+
+	*	(and to specify input and output? - how should we handle this?)
+	
+	* Restrict browse classes dialog above to subclass of Mapper, Reducer etc., add proposals to text fields
+	
+	* Make launch dialog look pretty
+	
+	* Run the specified Mapper and Reducer on a local server
+	
+	* Allow the user to Run on a server defined in a servers view (i.e. so you can run locally, and on cloud A or B with the same settings)
+	
+	* Allow the user to configure the hadoop server from this view as appropriate
+	
+	* When the job runs, keep the tracker interface and put it into a view in the perspective (see next task!) so the user
+	can track the state
+
+* Add a Hadouken perspective with
+	* the Hadoop targets view (analogous to servers view in WTP project)
+	
+	* the running jobs view which shows the status of running jobs
+	
+	* a Current Lesson/API panel showing html text from the lecturer?
+	
+	* any jazz stuff?
+	
+* JUnit support, specify expected inputs and outputs and run on server, collecting results and presenting a unified view
+ similar to the junit component.
\ No newline at end of file



Mime
View raw message