hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ta...@apache.org
Subject svn commit: r588310 [3/4] - in /lucene/hadoop/trunk/src/contrib/eclipse-plugin: ./ .settings/ META-INF/ resources/ resources/Components/ resources/Old/ src/java/org/apache/hadoop/eclipse/ src/java/org/apache/hadoop/eclipse/actions/ src/java/org/apache/...
Date Thu, 25 Oct 2007 18:58:39 GMT
Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java?rev=588310&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/ConfProp.java Thu Oct 25 11:58:32 2007
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.server;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+
+public enum ConfProp {
+  /**
+   * Property name for the Hadoop location name
+   */
+  PI_LOCATION_NAME(true, "location.name", "New Hadoop location"),
+
+  /**
+   * Property name for the master host name (the Job tracker)
+   */
+  PI_JOB_TRACKER_HOST(true, "jobtracker.host", "localhost"),
+
+  /**
+   * Property name for the DFS master host name (the Name node)
+   */
+  PI_NAME_NODE_HOST(true, "namenode.host", "localhost"),
+
+  /**
+   * Property name for the installation directory on the master node
+   */
+  // PI_INSTALL_DIR(true, "install.dir", "/dir/hadoop-version/"),
+  /**
+   * User name to use for Hadoop operations
+   */
+  PI_USER_NAME(true, "user.name", System.getProperty("user.name",
+      "who are you?")),
+
+  /**
+   * Property name for SOCKS proxy activation
+   */
+  PI_SOCKS_PROXY_ENABLE(true, "socks.proxy.enable", "no"),
+
+  /**
+   * Property name for the SOCKS proxy host
+   */
+  PI_SOCKS_PROXY_HOST(true, "socks.proxy.host", "host"),
+
+  /**
+   * Property name for the SOCKS proxy port
+   */
+  PI_SOCKS_PROXY_PORT(true, "socks.proxy.port", "1080"),
+
+  /**
+   * TCP port number for the name node
+   */
+  PI_NAME_NODE_PORT(true, "namenode.port", "50040"),
+
+  /**
+   * TCP port number for the job tracker
+   */
+  PI_JOB_TRACKER_PORT(true, "jobtracker.port", "50020"),
+
+  /**
+   * Are the Map/Reduce and the Distributed FS masters hosted on the same
+   * machine?
+   */
+  PI_COLOCATE_MASTERS(true, "masters.colocate", "yes"),
+
+  /**
+   * Property name for naming the job tracker (URI). This property is related
+   * to {@link #PI_MASTER_HOST_NAME}
+   */
+  JOB_TRACKER_URI(false, "mapred.job.tracker", "localhost:50020"),
+
+  /**
+   * Property name for naming the default file system (URI).
+   */
+  FS_DEFAULT_URI(false, "fs.default.name", "hdfs://localhost:50040/"),
+
+  /**
+   * Property name for the default socket factory:
+   */
+  SOCKET_FACTORY_DEFAULT(false, "hadoop.rpc.socket.factory.class.default",
+      "org.apache.hadoop.net.StandardSocketFactory"),
+
+  /**
+   * Property name for the SOCKS server URI.
+   */
+  SOCKS_SERVER(false, "hadoop.socks.server", "host:1080"),
+
+  ;
+
+  /**
+   * Map <property name> -> ConfProp
+   */
+  private static Map<String, ConfProp> map;
+
+  private static synchronized void registerProperty(String name,
+      ConfProp prop) {
+
+    if (ConfProp.map == null)
+      ConfProp.map = new HashMap<String, ConfProp>();
+
+    ConfProp.map.put(name, prop);
+  }
+
+  public static ConfProp getByName(String propName) {
+    return map.get(propName);
+  }
+
+  public final String name;
+
+  public final String defVal;
+
+  ConfProp(boolean internal, String name, String defVal) {
+    if (internal)
+      name = "eclipse.plug-in." + name;
+    this.name = name;
+    this.defVal = defVal;
+
+    ConfProp.registerProperty(name, this);
+  }
+
+  String get(Configuration conf) {
+    return conf.get(name);
+  }
+
+  void set(Configuration conf, String value) {
+    assert value != null;
+    conf.set(name, value);
+  }
+
+}

Modified: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java?rev=588310&r1=588309&r2=588310&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java (original)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopJob.java Thu Oct 25 11:58:32 2007
@@ -18,83 +18,327 @@
 
 package org.apache.hadoop.eclipse.server;
 
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+
 /**
- * Helper class to pretty-print status for a hadoop job running on a MapReduce server.
+ * Representation of a Map/Reduce running job on a given location
  */
 
 public class HadoopJob {
-  String name;
-  
+
+  /**
+   * Enum representation of a Job state
+   */
+  public enum JobState {
+    PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(
+        JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
+
+    final int state;
+
+    JobState(int state) {
+      this.state = state;
+    }
+
+    static JobState ofInt(int state) {
+      switch (state) {
+        case JobStatus.PREP:
+          return PREPARE;
+        case JobStatus.RUNNING:
+          return RUNNING;
+        case JobStatus.FAILED:
+          return FAILED;
+        case JobStatus.SUCCEEDED:
+          return SUCCEEDED;
+        default:
+          return null;
+      }
+    }
+  }
+
+  /**
+   * Location this Job runs on
+   */
+  private final HadoopServer location;
+
+  /**
+   * Unique identifier of this Job
+   */
+  final String jobId;
+
+  /**
+   * Status representation of a running job. This actually contains a
+   * reference to a JobClient. Its methods might block.
+   */
+  RunningJob running;
+
+  /**
+   * Last polled status
+   * 
+   * @deprecated should apparently not be used
+   */
+  JobStatus status;
+
+  /**
+   * Last polled counters
+   */
+  Counters counters;
+
   /**
-   * Hadoop Job Id (useful to kill the job)
+   * Job Configuration
    */
-  String jobId;
+  JobConf jobConf = null;
 
-  boolean completed;
+  boolean completed = false;
 
-  String totalMaps;
+  boolean successful = false;
 
-  String totalReduces;
+  boolean killed = false;
 
-  String completedMaps;
+  int totalMaps;
 
-  String completedReduces;
+  int totalReduces;
 
-  String mapPercentage;
+  int completedMaps;
 
-  String reducePercentage;
+  int completedReduces;
 
-  private HadoopServer server;
+  float mapProgress;
 
-  public HadoopJob(HadoopServer server) {
-    this.server = server;
+  float reduceProgress;
+
+  /**
+   * Constructor for a Hadoop job representation
+   * 
+   * @param location
+   * @param id
+   * @param running
+   * @param status
+   */
+  public HadoopJob(HadoopServer location, String id, RunningJob running,
+      JobStatus status) {
+
+    this.location = location;
+    this.jobId = id;
+    this.running = running;
+
+    loadJobFile();
+
+    update(status);
   }
 
-  public void print() {
-    System.out.println("Job name = " + name);
-    System.out.println("Job id = " + jobId);
-    System.out.println("Job total maps = " + totalMaps);
-    System.out.println("Job completed maps = " + completedMaps);
-    System.out.println("Map percentage complete = " + mapPercentage);
-    System.out.println("Job total reduces = " + totalReduces);
-    System.out.println("Job completed reduces = " + completedReduces);
-    System.out.println("Reduce percentage complete = " + reducePercentage);
-    System.out.flush();
+  /**
+   * Try to locate and load the JobConf file for this job so to get more
+   * details on the job (number of maps and of reduces)
+   */
+  private void loadJobFile() {
+    try {
+      String jobFile = getJobFile();
+      FileSystem fs = location.getDFS();
+      File tmp = File.createTempFile(getJobId(), ".xml");
+      if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location
+          .getConfiguration())) {
+        this.jobConf = new JobConf(tmp.toString());
+
+        this.totalMaps = jobConf.getNumMapTasks();
+        this.totalReduces = jobConf.getNumReduceTasks();
+      }
+
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+    }
+  }
+
+  /* @inheritDoc */
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
+    result = prime * result + ((location == null) ? 0 : location.hashCode());
+    return result;
   }
 
-  public String getId() {
-    return this.name;
+  /* @inheritDoc */
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (!(obj instanceof HadoopJob))
+      return false;
+    final HadoopJob other = (HadoopJob) obj;
+    if (jobId == null) {
+      if (other.jobId != null)
+        return false;
+    } else if (!jobId.equals(other.jobId))
+      return false;
+    if (location == null) {
+      if (other.location != null)
+        return false;
+    } else if (!location.equals(other.location))
+      return false;
+    return true;
   }
-  
+
+  /**
+   * Get the running status of the Job (@see {@link JobStatus}).
+   * 
+   * @return
+   */
+  public JobState getState() {
+    if (this.completed) {
+      if (this.successful) {
+        return JobState.SUCCEEDED;
+      } else {
+        return JobState.FAILED;
+      }
+    } else {
+      return JobState.RUNNING;
+    }
+    // return JobState.ofInt(this.status.getRunState());
+  }
+
+  /**
+   * @return
+   */
   public String getJobId() {
     return this.jobId;
   }
-  
+
+  /**
+   * @return
+   */
+  public HadoopServer getLocation() {
+    return this.location;
+  }
+
+  /**
+   * @return
+   */
   public boolean isCompleted() {
     return this.completed;
   }
 
-  @Override
-  public boolean equals(Object o) {
-    return (o instanceof HadoopJob) && ((HadoopJob) o).name.equals(name);
+  /**
+   * @return
+   */
+  public String getJobName() {
+    return this.running.getJobName();
   }
 
-  public String getState() {
-    return (!completed) ? "Running" : "Completed";
+  /**
+   * @return
+   */
+  public String getJobFile() {
+    return this.running.getJobFile();
   }
 
+  /**
+   * Return the tracking URL for this Job.
+   * 
+   * @return string representation of the tracking URL for this Job
+   */
+  public String getTrackingURL() {
+    return this.running.getTrackingURL();
+  }
+
+  /**
+   * Returns a string representation of this job status
+   * 
+   * @return string representation of this job status
+   */
   public String getStatus() {
+
     StringBuffer s = new StringBuffer();
 
     s.append("Maps : " + completedMaps + "/" + totalMaps);
-    s.append(" (" + mapPercentage + ")");
+    s.append(" (" + mapProgress + ")");
     s.append("  Reduces : " + completedReduces + "/" + totalReduces);
-    s.append(" (" + reducePercentage + ")");
+    s.append(" (" + reduceProgress + ")");
 
     return s.toString();
   }
 
-  public HadoopServer getServer() {
-    return this.server;
+  /**
+   * Update this job status according to the given JobStatus
+   * 
+   * @param status
+   */
+  void update(JobStatus status) {
+    this.status = status;
+    try {
+      this.counters = running.getCounters();
+      this.completed = running.isComplete();
+      this.successful = running.isSuccessful();
+      this.mapProgress = running.mapProgress();
+      this.reduceProgress = running.reduceProgress();
+      // running.getTaskCompletionEvents(fromEvent);
+
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+    }
+
+    this.completedMaps = (int) (this.totalMaps * this.mapProgress);
+    this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
+  }
+
+  /**
+   * Print this job counters (for debugging purpose)
+   */
+  void printCounters() {
+    System.out.printf("New Job:\n", counters);
+    for (String groupName : counters.getGroupNames()) {
+      Counters.Group group = counters.getGroup(groupName);
+      System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
+
+      for (String name : group.getCounterNames()) {
+        System.out.printf("\t\t%s: %s\n", name, group.getCounter(name));
+      }
+    }
+    System.out.printf("\n");
+  }
+
+  /**
+   * Kill this job
+   */
+  public void kill() {
+    try {
+      this.running.killJob();
+      this.killed = true;
+
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+
+  /**
+   * Print this job status (for debugging purpose)
+   */
+  public void display() {
+    System.out.printf("Job id=%s, name=%s\n", getJobId(), getJobName());
+    System.out.printf("Configuration file: %s\n", getJobId());
+    System.out.printf("Tracking URL: %s\n", getTrackingURL());
+
+    System.out.printf("Completion: map: %f reduce %f\n",
+        100.0 * this.mapProgress, 100.0 * this.reduceProgress);
+
+    System.out.println("Job total maps = " + totalMaps);
+    System.out.println("Job completed maps = " + completedMaps);
+    System.out.println("Map percentage complete = " + mapProgress);
+    System.out.println("Job total reduces = " + totalReduces);
+    System.out.println("Job completed reduces = " + completedReduces);
+    System.out.println("Reduce percentage complete = " + reduceProgress);
+    System.out.flush();
   }
+
 }

Modified: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java?rev=588310&r1=588309&r2=588310&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java (original)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/HadoopServer.java Thu Oct 25 11:58:32 2007
@@ -18,679 +18,491 @@
 
 package org.apache.hadoop.eclipse.server;
 
-import java.io.BufferedInputStream;
-import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.StringReader;
-import java.net.HttpURLConnection;
-import java.net.MalformedURLException;
-import java.net.Socket;
-import java.net.URL;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
-import java.util.Vector;
 import java.util.logging.Logger;
 
-import javax.net.SocketFactory;
-
-import org.apache.hadoop.eclipse.JSchUtilities;
-import org.apache.hadoop.eclipse.launch.SWTUserInfo;
-import org.apache.hadoop.eclipse.servers.ServerRegistry;
-import org.eclipse.core.runtime.CoreException;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.eclipse.Activator;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
 import org.eclipse.core.runtime.IProgressMonitor;
 import org.eclipse.core.runtime.IStatus;
 import org.eclipse.core.runtime.Status;
 import org.eclipse.core.runtime.jobs.Job;
-import org.eclipse.debug.core.DebugPlugin;
-import org.eclipse.debug.core.ILaunchConfiguration;
-import org.eclipse.debug.core.ILaunchConfigurationType;
-import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
-import org.eclipse.debug.core.model.ILaunchConfigurationDelegate;
-import org.eclipse.debug.ui.DebugUITools;
 import org.eclipse.swt.widgets.Display;
-
-import com.jcraft.jsch.JSchException;
-import com.jcraft.jsch.Session;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
 
 /**
- * Methods for defining and interacting with a Hadoop MapReduce server
+ * Representation of a Hadoop location, meaning of the master node (NameNode,
+ * JobTracker).
+ * 
+ * <p>
+ * This class does not create any SSH connection anymore. Tunneling must be
+ * setup outside of Eclipse for now (using Putty or <tt>ssh -D&lt;port&gt;
+ * &lt;host&gt;</tt>)
+ * 
+ * <p>
+ * <em> TODO </em>
+ * <li> Disable the updater if a location becomes unreachable or fails for
+ * tool long
+ * <li> Stop the updater on location's disposal/removal
  */
 
 public class HadoopServer {
 
-  private static final int JOB_TRACKER_PORT = 50030;
-
-  private PingJob ping;
-
-  protected static final long PING_DELAY = 1500;
-
-  /**
-   * Location of Hadoop jars on the server
-   */
-  private String installPath;
-
   /**
-   * User name to use to connect to the server
+   * Frequency of location status observations expressed as the delay in ms
+   * between each observation
+   * 
+   * TODO Add a preference parameter for this
    */
-  private String userName;
+  protected static final long STATUS_OBSERVATION_DELAY = 1500;
 
   /**
-   * Host name of the hadoop server
-   */
-  private String hostName;
-
-  private String password;
-
-  // state and status - transient
-  private transient String state = "";
-
-  private transient Map<String, HadoopJob> jobs =
-      Collections.synchronizedMap(new TreeMap<String, HadoopJob>());
-
-  private transient List<JarModule> jars =
-      Collections.synchronizedList(new ArrayList<JarModule>());
-
-  /**
-   * User-defined name for the server (set from Eclipse)
+   * 
    */
-  private String name;
+  public class LocationStatusUpdater extends Job {
 
-  /**
-   * Host name of the tunneling machine
-   */
-  private String tunnelHostName;
+    JobClient client = null;
 
-  /**
-   * User name to use to connect to the tunneling machine
-   */
-  private String tunnelUserName;
+    /**
+     * Setup the updater
+     */
+    public LocationStatusUpdater() {
+      super("Map/Reduce location status updater");
+      this.setSystem(true);
+    }
 
-  private String tunnelPassword;
+    /* @inheritDoc */
+    @Override
+    protected IStatus run(IProgressMonitor monitor) {
+      if (client == null) {
+        try {
+          client = HadoopServer.this.getJobClient();
 
-  static Logger log = Logger.getLogger(HadoopServer.class.getName());
+        } catch (IOException ioe) {
+          client = null;
+          return new Status(Status.ERROR, Activator.PLUGIN_ID,
+              "Cannot connect to the Map/Reduce location: "
+                  + HadoopServer.this.getLocationName());
+        }
+      }
 
-  public HadoopServer(String uri, String name) {
-    this.name = name;
+      try {
+        // Set of all known existing Job IDs we want fresh info of
+        Set<String> missingJobIds =
+            new HashSet<String>(runningJobs.keySet());
+
+        JobStatus[] jstatus = client.jobsToComplete();
+        for (JobStatus status : jstatus) {
+
+          String jobId = status.getJobId();
+          missingJobIds.remove(jobId);
+
+          HadoopJob hJob;
+          synchronized (HadoopServer.this.runningJobs) {
+            hJob = runningJobs.get(jobId);
+            if (hJob == null) {
+              // Unknown job, create an entry
+              RunningJob running = client.getJob(jobId);
+              hJob =
+                  new HadoopJob(HadoopServer.this, jobId, running, status);
+              newJob(hJob);
+            }
+          }
 
-    String[] hostInfo = uri.split(":");
-    String[] loginInfo = hostInfo[0].split("@");
+          // Update HadoopJob with fresh infos
+          updateJob(hJob, status);
+        }
 
-    installPath = hostInfo[1];
-    userName = loginInfo[0];
-    hostName = loginInfo[1];
-  }
+        // Ask explicitly for fresh info for these Job IDs
+        for (String jobId : missingJobIds) {
+          HadoopJob hJob = runningJobs.get(jobId);
+          if (!hJob.isCompleted())
+            updateJob(hJob, null);
+        }
 
-  public HadoopServer(String uri, String name, String tunnelVia,
-      String tunnelUserName) {
-    this(uri, name);
-    this.tunnelHostName = tunnelVia;
-    this.tunnelUserName = tunnelUserName;
-  }
+      } catch (IOException ioe) {
+        client = null;
+        return new Status(Status.ERROR, Activator.PLUGIN_ID,
+            "Cannot retrieve running Jobs on location: "
+                + HadoopServer.this.getLocationName());
+      }
 
-  /**
-   * Create an SSH session with no timeout
-   * 
-   * @return Session object with no timeout
-   * @throws JSchException
-   */
-  public Session createSessionNoTimeout() throws JSchException {
-    return createSession(0);
-  }
+      // Schedule the next observation
+      schedule(STATUS_OBSERVATION_DELAY);
 
-  /**
-   * Create an SSH session with no timeout
-   * 
-   * @return Session object with no timeout
-   * @throws JSchException
-   */
-  public Session createSession() throws JSchException {
-    return createSession(0);
-  }
+      return Status.OK_STATUS;
+    }
 
-  /**
-   * Creates a SSH session with a specified timeout
-   * 
-   * @param timeout the amount of time before the session expires
-   * @return Returns the created session object representing the SSH session.
-   * @throws JSchException
-   */
-  public Session createSession(int timeout) throws JSchException {
-    if (tunnelHostName == null) {
-      Session session =
-          JSchUtilities.createJSch().getSession(userName, hostName, 22);
-      session.setUserInfo(new SWTUserInfo() {
-        @Override
-        public String getPassword() {
-          return HadoopServer.this.password;
-        }
+    /**
+     * Stores and make the new job available
+     * 
+     * @param data
+     */
+    private void newJob(final HadoopJob data) {
+      runningJobs.put(data.getJobId(), data);
 
-        @Override
-        public void setPassword(String pass) {
-          HadoopServer.this.password = pass;
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          fireJobAdded(data);
         }
-
       });
-      if (!session.isConnected()) {
-        try {
-          session.connect();
-        } catch (JSchException jse) {
-          // Reset password in case the authentication failed
-          if (jse.getMessage().equals("Auth fail"))
-            this.password = null;
-          throw jse;
-        }
-      }
+    }
 
-      return session;
-    } else {
-      createSshTunnel();
-
-      Session session =
-          JSchUtilities.createJSch().getSession(userName, "localhost",
-              tunnelPort);
-      session.setUserInfo(new SWTUserInfo() {
-        @Override
-        public String getPassword() {
-          return HadoopServer.this.password;
-        }
+    /**
+     * Updates the status of a job
+     * 
+     * @param job the job to update
+     */
+    private void updateJob(final HadoopJob job, JobStatus status) {
+      job.update(status);
 
-        @Override
-        public void setPassword(String pass) {
-          HadoopServer.this.password = pass;
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          fireJobChanged(job);
         }
       });
-      if (!session.isConnected()) {
-        try {
-          session.connect();
-        } catch (JSchException jse) {
-          // Reset password in case the authentication failed
-          if (jse.getMessage().equals("Auth fail"))
-            this.password = null;
-          throw jse;
-        }
-      }
-      if (timeout > -1) {
-        session.setTimeout(timeout);
-      }
-      return session;
     }
+
   }
 
-  private Session createTunnel(int port) throws JSchException {
-    Session tunnel;
+  static Logger log = Logger.getLogger(HadoopServer.class.getName());
 
-    tunnelPort = -1;
-    for (int i = 0; !((i > 4) || (tunnelPort > -1)); i++) {
-      try {
-        Socket socket = SocketFactory.getDefault().createSocket();
-        socket.bind(null);
-        tunnelPort = socket.getLocalPort();
-        socket.close();
-      } catch (IOException e) {
-        // ignore, retry
-      }
-    }
+  /**
+   * Hadoop configuration of the location. Also contains specific parameters
+   * for the plug-in. These parameters are prefix with eclipse.plug-in.*
+   */
+  private Configuration conf;
 
-    if (tunnelPort == -1) {
-      throw new JSchException("No free local port found to bound to");
-    }
+  /**
+   * Jobs listeners
+   */
+  private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
 
-    tunnel =
-        JSchUtilities.createJSch().getSession(tunnelUserName,
-            tunnelHostName, 22);
-    tunnel.setTimeout(0);
-    tunnel.setPortForwardingL(tunnelPort, hostName, port);
-
-    tunnel.setUserInfo(new SWTUserInfo() {
-      @Override
-      public String getPassword() {
-        return HadoopServer.this.tunnelPassword;
-      }
+  /**
+   * Jobs running on this location. The keys of this map are the Job IDs.
+   */
+  private transient Map<String, HadoopJob> runningJobs =
+      Collections.synchronizedMap(new TreeMap<String, HadoopJob>());
 
-      @Override
-      public void setPassword(String password) {
-        HadoopServer.this.tunnelPassword = password;
-      }
-    });
+  /**
+   * Status updater for this location
+   */
+  private LocationStatusUpdater statusUpdater;
 
-    try {
-      tunnel.connect();
-    } catch (JSchException jse) {
-      // Reset password in case the authentication failed
-      if (jse.getMessage().equals("Auth fail"))
-        this.tunnelPassword = null;
-      throw jse;
-    }
+  // state and status - transient
+  private transient String state = "";
 
-    return tunnel;
+  /**
+   * Creates a new default Hadoop location
+   */
+  public HadoopServer() {
+    this.conf = new Configuration();
+    this.addPluginConfigDefaultProperties();
   }
 
-  private void createSshTunnel() throws JSchException {
-    if ((sshTunnel != null) && sshTunnel.isConnected()) {
-      sshTunnel.disconnect();
-    }
-
-    sshTunnel = createTunnel(22);
-  }
+  /**
+   * Creates a location from a file
+   * 
+   * @throws IOException
+   * @throws SAXException
+   * @throws ParserConfigurationException
+   */
+  public HadoopServer(File file) throws ParserConfigurationException,
+      SAXException, IOException {
 
-  private void createHttpTunnel(int port) throws JSchException {
-    if ((httpTunnel == null) || !httpTunnel.isConnected()) {
-      httpTunnel = createTunnel(port);
-    }
+    this.conf = new Configuration();
+    this.addPluginConfigDefaultProperties();
+    this.loadFromXML(file);
   }
 
   /**
-   * Return the effective host name to use to contact the server. The
-   * effective host name might be "localhost" if we setup a tunnel.
+   * Create a new Hadoop location by copying an already existing one.
    * 
-   * @return the effective host name to use contact the server
+   * @param source the location to copy
    */
-  public String getEffectiveHostName() {
-    if ((tunnelHostName != null) && (tunnelHostName.length() > 0)) {
-      return "localhost";
-    }
-
-    return this.hostName;
+  public HadoopServer(HadoopServer existing) {
+    this();
+    this.load(existing);
   }
-  
-  public String getHostName() {
-    return this.hostName;
+
+  public void addJobListener(IJobListener l) {
+    jobListeners.add(l);
   }
 
-  public void setHostname(String hostname) {
-    this.hostName = hostname;
+  public void dispose() {
+    // TODO close DFS connections?
   }
 
   /**
-   * Gets the path where the hadoop jars are stored.
+   * List all elements that should be present in the Server window (all
+   * servers and all jobs running on each servers)
    * 
-   * @return String containing the path to the hadoop jars.
+   * @return collection of jobs for this location
    */
-  public String getInstallPath() {
-    return installPath;
+  public Collection<HadoopJob> getJobs() {
+    startStatusUpdater();
+    return this.runningJobs.values();
   }
 
   /**
-   * Sets the path where the hadoop jars are stored.
+   * Remove the given job from the currently running jobs map
    * 
-   * @param path The directory where the hadoop jars are stored.
+   * @param job the job to remove
    */
-  public void setPath(String path) {
-    this.installPath = path;
-  }
-
-  public String getUserName() {
-    return userName;
-  }
-
-  public void setUser(String user) {
-    this.userName = user;
+  public void purgeJob(final HadoopJob job) {
+    runningJobs.remove(job.getJobId());
+    Display.getDefault().asyncExec(new Runnable() {
+      public void run() {
+        fireJobRemoved(job);
+      }
+    });
   }
 
-  public String getPassword() {
-    return password;
+  /**
+   * Returns the {@link Configuration} defining this location.
+   * 
+   * @return the location configuration
+   */
+  public Configuration getConfiguration() {
+    return this.conf;
   }
 
-  public void setPassword(String password) {
-    log.fine("Server password set to " + password);
-    this.password = password;
+  /**
+   * Gets a Hadoop configuration property value
+   * 
+   * @param prop the configuration property
+   * @return the property value
+   */
+  public String getConfProp(ConfProp prop) {
+    return prop.get(conf);
   }
 
-  @Override
-  public String toString() {
-    return this.userName + "@" + this.hostName + ":" + this.installPath;
+  /**
+   * Gets a Hadoop configuration property value
+   * 
+   * @param propName the property name
+   * @return the property value
+   */
+  public String getConfProp(String propName) {
+    return this.conf.get(propName);
   }
 
-  public String getName() {
-    return this.name;
+  public String getLocationName() {
+    return ConfProp.PI_LOCATION_NAME.get(conf);
   }
 
   /**
-   * Returns the URL for the Job Tracker (default is port 50030)
+   * Returns the master host name of the Hadoop location (the Job tracker)
    * 
-   * @return URL for the Job Tracker
-   * @throws MalformedURLException
+   * @return the host name of the Job tracker
    */
-  public URL getJobTrackerUrl() throws MalformedURLException {
-    if (tunnelHostName == null) {
-      return new URL("http://" + getEffectiveHostName() + ":"
-          + JOB_TRACKER_PORT + "/jobtracker.jsp");
-    } else {
-      try {
-        createHttpTunnel(JOB_TRACKER_PORT);
-
-        String port = httpTunnel.getPortForwardingL()[0].split(":")[0];
-        return new URL("http://localhost:" + port + "/jobtracker.jsp");
-      } catch (JSchException e) {
-        // / BUG(jz) -- need to display error here
-        return null;
-      }
-    }
+  public String getMasterHostName() {
+    return getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
   }
 
   public String getState() {
     return state;
   }
 
-  public Object[] getChildren() {
-    /*
-     * List all elements that should be present in the Server window (all
-     * servers and all jobs running on each servers)
-     */
-    checkPingJobRunning();
-    Collection<Object> collection =
-        new ArrayList<Object>(this.jobs.values());
-    collection.addAll(jars);
-    return collection.toArray();
+  /**
+   * Overwrite this location with the given existing location
+   * 
+   * @param existing the existing location
+   */
+  public void load(HadoopServer existing) {
+    this.conf = new Configuration(existing.conf);
   }
 
-  private synchronized void checkPingJobRunning() {
-    if (ping == null) {
-      ping = new PingJob();
-      ping.setSystem(true);
-      ping.schedule();
+  /**
+   * Overwrite this location with settings available in the given XML file.
+   * The existing configuration is preserved if the XML file is invalid.
+   * 
+   * @param file the file path of the XML file
+   * @return validity of the XML file
+   * @throws ParserConfigurationException
+   * @throws IOException
+   * @throws SAXException
+   */
+  public boolean loadFromXML(File file) throws ParserConfigurationException,
+      SAXException, IOException {
+
+    Configuration newConf = new Configuration(this.conf);
+
+    DocumentBuilder builder =
+        DocumentBuilderFactory.newInstance().newDocumentBuilder();
+    Document document = builder.parse(file);
+
+    Element root = document.getDocumentElement();
+    if (!"configuration".equals(root.getTagName()))
+      return false;
+    NodeList props = root.getChildNodes();
+    for (int i = 0; i < props.getLength(); i++) {
+      Node propNode = props.item(i);
+      if (!(propNode instanceof Element))
+        continue;
+      Element prop = (Element) propNode;
+      if (!"property".equals(prop.getTagName()))
+        return false;
+      NodeList fields = prop.getChildNodes();
+      String attr = null;
+      String value = null;
+      for (int j = 0; j < fields.getLength(); j++) {
+        Node fieldNode = fields.item(j);
+        if (!(fieldNode instanceof Element))
+          continue;
+        Element field = (Element) fieldNode;
+        if ("name".equals(field.getTagName()))
+          attr = ((Text) field.getFirstChild()).getData();
+        if ("value".equals(field.getTagName()) && field.hasChildNodes())
+          value = ((Text) field.getFirstChild()).getData();
+      }
+      if (attr != null && value != null)
+        newConf.set(attr, value);
     }
-  }
-
-  private HashSet<IJobListener> jobListeners = new HashSet<IJobListener>();
-
-  private Session sshTunnel;
-
-  private Session httpTunnel;
 
-  private int tunnelPort;
-
-  private int id;
-
-  public void addJobListener(IJobListener l) {
-    jobListeners.add(l);
+    this.conf = newConf;
+    return true;
   }
 
-  protected void fireJobChanged(HadoopJob job) {
-    for (IJobListener listener : jobListeners) {
-      listener.jobChanged(job);
-    }
+  /**
+   * Sets a Hadoop configuration property value
+   * 
+   * @param prop the property
+   * @param propvalue the property value
+   */
+  public void setConfProp(ConfProp prop, String propValue) {
+    prop.set(conf, propValue);
   }
 
-  protected void fireJobAdded(HadoopJob job) {
-    for (IJobListener listener : jobListeners) {
-      listener.jobAdded(job);
-    }
+  /**
+   * Sets a Hadoop configuration property value
+   * 
+   * @param propName the property name
+   * @param propValue the property value
+   */
+  public void setConfProp(String propName, String propValue) {
+    this.conf.set(propName, propValue);
   }
 
-  protected void fireJarPublishStart(JarModule jar) {
-    for (IJobListener listener : jobListeners) {
-      listener.publishStart(jar);
-    }
+  public void setLocationName(String newName) {
+    ConfProp.PI_LOCATION_NAME.set(conf, newName);
   }
 
-  protected void fireJarPublishDone(JarModule jar) {
-    for (IJobListener listener : jobListeners) {
-      listener.publishDone(jar);
-    }
+  /**
+   * Write this location settings to the given output stream
+   * 
+   * @param out the output stream
+   * @throws IOException
+   */
+  public void storeSettingsToFile(File file) throws IOException {
+    FileOutputStream fos = new FileOutputStream(file);
+    this.conf.write(fos);
+    fos.close();
   }
 
-  public void runJar(JarModule jar, IProgressMonitor monitor) {
-    log.fine("Run Jar: " + jar);
-    ILaunchConfigurationType launchConfigType =
-        DebugPlugin.getDefault().getLaunchManager()
-            .getLaunchConfigurationType(
-                "org.apache.hadoop.eclipse.launch.StartServer");
-
-    jars.add(jar);
-    fireJarPublishStart(jar);
-
-    try {
-      ILaunchConfiguration[] matchingConfigs =
-          DebugPlugin.getDefault().getLaunchManager()
-              .getLaunchConfigurations(launchConfigType);
-      ILaunchConfiguration launchConfig = null;
-
-      // TODO(jz) allow choosing correct config, for now we're always
-      // going to use the first
-      if (matchingConfigs.length == 1) {
-        launchConfig = matchingConfigs[0];
-      } else {
-        launchConfig =
-            launchConfigType
-                .newInstance(null, DebugPlugin.getDefault()
-                    .getLaunchManager()
-                    .generateUniqueLaunchConfigurationNameFrom(
-                        "Run Hadoop Jar"));
-      }
-
-      ILaunchConfigurationWorkingCopy copy =
-          launchConfig
-              .copy("Run " + jar.getName() + " on " + this.getName());
-
-      // COMMENTED(jz) - perform the jarring in the launch delegate now
-      // copy.setAttribute("hadoop.jar",
-      // jar.buildJar(monitor).toString());
-
-      copy.setAttribute("hadoop.jarrable", jar.toMemento());
-      copy.setAttribute("hadoop.host", this.getEffectiveHostName());
-      copy.setAttribute("hadoop.user", this.getUserName());
-      copy.setAttribute("hadoop.serverid", this.id);
-      copy.setAttribute("hadoop.path", this.getInstallPath());
-      ILaunchConfiguration saved = copy.doSave();
-
-      // NOTE(jz) became deprecated in 3.3, replaced with getDelegates
-      // (plural) method,
-      // as this new method is marked experimental leaving as-is for now
-      ILaunchConfigurationDelegate delegate =
-          launchConfigType.getDelegate("run");
-      // only support run for now
-      DebugUITools.launch(saved, "run");
-    } catch (CoreException e) {
-      // TODO(jz) autogen
-      e.printStackTrace();
-    } finally {
-      jars.remove(jar);
-      fireJarPublishDone(jar);
-    }
+  /* @inheritDoc */
+  @Override
+  public String toString() {
+    return this.getLocationName();
   }
 
-  public class PingJob extends Job {
-    public PingJob() {
-      super("Get MapReduce server status");
-    }
-
-    @Override
-    protected IStatus run(IProgressMonitor monitor) {
-      HttpURLConnection connection = null;
-
-      try {
-        connection = (HttpURLConnection) getJobTrackerUrl().openConnection();
-        connection.connect();
-
-        String previousState = state;
-
-        if (connection.getResponseCode() == 200) {
-          state = "Started";
-
-          StringBuffer string = new StringBuffer();
-          byte[] buffer = new byte[1024];
-          InputStream in =
-              new BufferedInputStream(connection.getInputStream());
-          int bytes = 0;
-          while ((bytes = in.read(buffer)) != -1) {
-            string.append(new String(buffer, 0, bytes));
-          }
-
-          HadoopJob[] jobData = getJobData(string.toString());
-          for (int i = 0; i < jobData.length; i++) {
-            HadoopJob job = jobData[i];
-            if (jobs.containsKey((job.getId()))) {
-              updateJob(job);
-            } else {
-              addJob(job);
-            }
-          }
-        } else {
-          state = "Stopped";
-        }
-
-        if (!state.equals(previousState)) {
-          ServerRegistry.getInstance().stateChanged(HadoopServer.this);
-        }
-      } catch (Exception e) {
-        state = "Stopped (Connection Error)";
-      }
-
-      schedule(PING_DELAY);
-      return Status.OK_STATUS;
+  /**
+   * Fill the configuration with valid default values
+   */
+  private void addPluginConfigDefaultProperties() {
+    for (ConfProp prop : ConfProp.values()) {
+      if (conf.get(prop.name) == null)
+        conf.set(prop.name, prop.defVal);
     }
   }
 
-  private void updateJob(final HadoopJob data) {
-    jobs.put(data.getId(), data);
-    // TODO(jz) only if it has changed
-    Display.getDefault().syncExec(new Runnable() {
-      public void run() {
-        fireJobChanged(data);
-      }
-    });
+  /**
+   * Starts the location status updater
+   */
+  private synchronized void startStatusUpdater() {
+    if (statusUpdater == null) {
+      statusUpdater = new LocationStatusUpdater();
+      statusUpdater.schedule();
+    }
   }
 
-  private void addJob(final HadoopJob data) {
-    jobs.put(data.getId(), data);
+  /*
+   * Rewrite of the connecting and tunneling to the Hadoop location
+   */
 
-    Display.getDefault().syncExec(new Runnable() {
-      public void run() {
-        fireJobAdded(data);
-      }
-    });
+  /**
+   * Provides access to the default file system of this location.
+   * 
+   * @return a {@link FileSystem}
+   */
+  public FileSystem getDFS() throws IOException {
+    return FileSystem.get(this.conf);
   }
 
   /**
-   * Parse the job tracker data to display currently running and completed
-   * jobs.
+   * Provides access to the Job tracking system of this location
    * 
-   * @param jobTrackerHtml The HTML returned from the Job Tracker port
-   * @return an array of Strings that contain job status info
+   * @return a {@link JobClient}
    */
-  public HadoopJob[] getJobData(String jobTrackerHtml) {
-    try {
-      Vector<HadoopJob> jobsVector = new Vector<HadoopJob>();
-
-      BufferedReader in =
-          new BufferedReader(new StringReader(jobTrackerHtml));
-
-      String inputLine;
-
-      boolean completed = false;
-      while ((inputLine = in.readLine()) != null) {
-        // stop once we reach failed jobs (which are after running and
-        // completed jobs)
-        if (inputLine.indexOf("Failed Jobs") != -1) {
-          break;
-        }
-
-        if (inputLine.indexOf("Completed Jobs") != -1) {
-          completed = true;
-        }
-
-        // skip lines without data (stored in a table)
-        if (!inputLine.startsWith("<tr><td><a")) {
-          // log.debug (" > " + inputLine, verbose);
-          continue;
-        }
-
-        HadoopJob jobData = new HadoopJob(HadoopServer.this);
-
-        String[] values = inputLine.split("</td><td>");
-
-        String jobId = values[0].trim();
-        String realJobId =
-            jobId.substring(jobId.lastIndexOf("_") + 1, jobId
-                .lastIndexOf("_") + 5);
-        String name = values[2].trim();
-        if (name.equals("&nbsp;")) {
-          name = "(untitled)";
-        }
-        jobData.name = name + "(" + realJobId + ")";
-        jobData.jobId = "job_" + realJobId;
-        jobData.completed = completed;
-
-        jobData.mapPercentage = values[3].trim();
-        jobData.totalMaps = values[4].trim();
-        jobData.completedMaps = values[5].trim();
-        jobData.reducePercentage = values[6].trim();
-        jobData.totalReduces = values[7].trim();
-        jobData.completedReduces =
-            values[8].substring(0, values[8].indexOf("<")).trim();
-
-        jobsVector.addElement(jobData);
-      }
-
-      in.close();
-
-      // convert vector to array
-      HadoopJob[] jobArray = new HadoopJob[jobsVector.size()];
-      for (int j = 0; j < jobsVector.size(); j++) {
-        jobArray[j] = jobsVector.elementAt(j);
-      }
-
-      return jobArray;
-    } catch (Exception e) {
-      e.printStackTrace();
-    }
-
-    return null;
+  public JobClient getJobClient() throws IOException {
+    JobConf jconf = new JobConf(this.conf);
+    return new JobClient(jconf);
   }
 
-  public void dispose() {
-    if ((sshTunnel != null) && sshTunnel.isConnected()) {
-      sshTunnel.disconnect();
-    }
+  /*
+   * Listeners handling
+   */
 
-    if ((httpTunnel != null) && httpTunnel.isConnected()) {
-      httpTunnel.disconnect();
+  protected void fireJarPublishDone(JarModule jar) {
+    for (IJobListener listener : jobListeners) {
+      listener.publishDone(jar);
     }
   }
 
-  public String getTunnelHostName() {
-    return tunnelHostName;
+  protected void fireJarPublishStart(JarModule jar) {
+    for (IJobListener listener : jobListeners) {
+      listener.publishStart(jar);
+    }
   }
 
-  public String getTunnelUserName() {
-    return tunnelUserName;
+  protected void fireJobAdded(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobAdded(job);
+    }
   }
 
-  public void setId(int i) {
-    this.id = i;
+  protected void fireJobRemoved(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobRemoved(job);
+    }
   }
 
-  public void setName(String newName) {
-    this.name = newName;
+  protected void fireJobChanged(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobChanged(job);
+    }
   }
 
-  public void setURI(String newURI) {
-    String[] hostInfo = newURI.split(":");
-    String[] loginInfo = hostInfo[0].split("@");
-
-    installPath = hostInfo[1];
-    userName = loginInfo[0];
-    hostName = loginInfo[1];
-  }
-  
-  public void setTunnel(String tunnelHostName, String tunnelUserName) {
-    this.tunnelHostName = tunnelHostName;
-    this.tunnelUserName = tunnelUserName;
-  }
-  
-  /**
-   * Returns whether this server uses SSH tunneling or not
-   * @return whether this server uses SSH tunneling or not
-   */
-  public boolean useTunneling() {
-    return (this.tunnelHostName != null);
-  }
-  
 }

Modified: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java?rev=588310&r1=588309&r2=588310&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java (original)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/IJobListener.java Thu Oct 25 11:58:32 2007
@@ -21,13 +21,16 @@
 /**
  * Interface for updating/adding jobs to the MapReduce Server view.
  */
-
 public interface IJobListener {
+
   void jobChanged(HadoopJob job);
 
   void jobAdded(HadoopJob job);
 
+  void jobRemoved(HadoopJob job);
+
   void publishStart(JarModule jar);
 
   void publishDone(JarModule jar);
+
 }

Modified: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java?rev=588310&r1=588309&r2=588310&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java (original)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/server/JarModule.java Thu Oct 25 11:58:32 2007
@@ -22,37 +22,49 @@
 import java.util.logging.Logger;
 
 import org.apache.hadoop.eclipse.Activator;
+import org.apache.hadoop.eclipse.ErrorMessageDialog;
 import org.eclipse.core.resources.IResource;
-import org.eclipse.core.resources.ResourcesPlugin;
 import org.eclipse.core.runtime.IProgressMonitor;
 import org.eclipse.core.runtime.Path;
 import org.eclipse.jdt.core.ICompilationUnit;
 import org.eclipse.jdt.core.IJavaElement;
-import org.eclipse.jdt.core.IJavaProject;
 import org.eclipse.jdt.core.IType;
-import org.eclipse.jdt.core.JavaCore;
 import org.eclipse.jdt.ui.jarpackager.IJarExportRunnable;
 import org.eclipse.jdt.ui.jarpackager.JarPackageData;
-
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.ui.PlatformUI;
 
 /**
- * Methods for interacting with the jar file containing the 
+ * Methods for interacting with the jar file containing the
  * Mapper/Reducer/Driver classes for a MapReduce job.
  */
 
-public class JarModule {
+public class JarModule implements IRunnableWithProgress {
+
   static Logger log = Logger.getLogger(JarModule.class.getName());
 
-  private final IResource resource;
+  private IResource resource;
+
+  private File jarFile;
 
   public JarModule(IResource resource) {
     this.resource = resource;
   }
 
+  public String getName() {
+    return resource.getProject().getName() + "/" + resource.getName();
+  }
+
   /**
-   * Create the jar file containing all the MapReduce job classes.
+   * Creates a JAR file containing the given resource (Java class with
+   * main()) and all associated resources
+   * 
+   * @param resource the resource
+   * @return a file designing the created package
    */
-  public File buildJar(IProgressMonitor monitor) {
+  public void run(IProgressMonitor monitor) {
+
     log.fine("Build jar");
     JarPackageData jarrer = new JarPackageData();
 
@@ -61,42 +73,74 @@
     jarrer.setExportOutputFolders(true);
     jarrer.setOverwrite(true);
 
-    Path path;
-
     try {
-      IJavaProject project = (IJavaProject) resource.getProject().getNature(
-          JavaCore.NATURE_ID); // todo(jz)
+      // IJavaProject project =
+      // (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
+
       // check this is the case before letting this method get called
       Object element = resource.getAdapter(IJavaElement.class);
       IType type = ((ICompilationUnit) element).findPrimaryType();
       jarrer.setManifestMainClass(type);
-      path = new Path(new File(Activator.getDefault().getStateLocation()
-          .toFile(), resource.getProject().getName() + "_project_hadoop_"
-          + resource.getName() + "_" + System.currentTimeMillis() + ".jar")
-          .getAbsolutePath());
-      jarrer.setJarLocation(path);
+
+      // Create a temporary JAR file name
+      File baseDir = Activator.getDefault().getStateLocation().toFile();
+
+      String prefix =
+          String.format("%s_%s-", resource.getProject().getName(), resource
+              .getName());
+      File jarFile = File.createTempFile(prefix, ".jar", baseDir);
+      jarrer.setJarLocation(new Path(jarFile.getAbsolutePath()));
 
       jarrer.setElements(resource.getProject().members(IResource.FILE));
-      IJarExportRunnable runnable = jarrer.createJarExportRunnable(null);
+      IJarExportRunnable runnable =
+          jarrer.createJarExportRunnable(Display.getDefault()
+              .getActiveShell());
       runnable.run(monitor);
+
+      this.jarFile = jarFile;
+
     } catch (Exception e) {
       e.printStackTrace();
       throw new RuntimeException(e);
     }
-
-    return path.toFile();
   }
 
-  public String getName() {
-    return resource.getProject().getName() + "/" + resource.getName();
+  /**
+   * Allow the retrieval of the resulting JAR file
+   * 
+   * @return the generated JAR file
+   */
+  public File getJarFile() {
+    return this.jarFile;
   }
 
-  public static JarModule fromMemento(String memento) {
-    return new JarModule(ResourcesPlugin.getWorkspace().getRoot().findMember(
-        Path.fromPortableString(memento)));
-  }
+  /**
+   * Static way to create a JAR package for the given resource and showing a
+   * progress bar
+   * 
+   * @param resource
+   * @return
+   */
+  public static File createJarPackage(IResource resource) {
+
+    JarModule jarModule = new JarModule(resource);
+    try {
+      PlatformUI.getWorkbench().getProgressService().run(false, true,
+          jarModule);
 
-  public String toMemento() {
-    return resource.getFullPath().toPortableString();
+    } catch (Exception e) {
+      e.printStackTrace();
+      return null;
+    }
+
+    File jarFile = jarModule.getJarFile();
+    if (jarFile == null) {
+      ErrorMessageDialog.display("Run on Hadoop",
+          "Unable to create or locate the JAR file for the Job");
+      return null;
+    }
+
+    return jarFile;
   }
+
 }

Added: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java?rev=588310&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java (added)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopLocationWizard.java Thu Oct 25 11:58:32 2007
@@ -0,0 +1,968 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.eclipse.servers;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.eclipse.server.ConfProp;
+import org.apache.hadoop.eclipse.server.HadoopServer;
+import org.eclipse.jface.dialogs.IMessageProvider;
+import org.eclipse.jface.wizard.WizardPage;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.custom.ScrolledComposite;
+import org.eclipse.swt.events.ModifyEvent;
+import org.eclipse.swt.events.ModifyListener;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Control;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Event;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Listener;
+import org.eclipse.swt.widgets.TabFolder;
+import org.eclipse.swt.widgets.TabItem;
+import org.eclipse.swt.widgets.Text;
+
+/**
+ * Wizard for editing the settings of a Hadoop location
+ * 
+ * The wizard contains 3 tabs: General, Tunneling and Advanced. It edits
+ * parameters of the location member which either a new location or a copy of
+ * an existing registered location.
+ */
+
+public class HadoopLocationWizard extends WizardPage {
+
+  Image circle;
+
+  /**
+   * The location effectively edited by the wizard. This location is a copy
+   * or a new one.
+   */
+  private HadoopServer location;
+
+  /**
+   * The original location being edited by the wizard (null if we create a
+   * new instance).
+   */
+  private HadoopServer original;
+
+  /**
+   * New Hadoop location wizard
+   */
+  public HadoopLocationWizard() {
+    super("Hadoop Server", "New Hadoop Location", null);
+
+    this.original = null;
+    this.location = new HadoopServer();
+    this.location.setLocationName("");
+  }
+
+  /**
+   * Constructor to edit the parameters of an existing Hadoop server
+   * 
+   * @param server
+   */
+  public HadoopLocationWizard(HadoopServer server) {
+    super("Create a new Hadoop location", "Edit Hadoop Location", null);
+
+    this.original = server;
+    this.location = new HadoopServer(server);
+  }
+
+  /**
+   * Performs any actions appropriate in response to the user having pressed
+   * the Finish button, or refuse if finishing now is not permitted.
+   * 
+   * @return the created or updated Hadoop location
+   */
+
+  public HadoopServer performFinish() {
+    try {
+      if (this.original == null) {
+        // New location
+        Display.getDefault().syncExec(new Runnable() {
+          public void run() {
+            ServerRegistry.getInstance().addServer(
+                HadoopLocationWizard.this.location);
+          }
+        });
+        return this.location;
+
+      } else {
+        // Update location
+        final String originalName = this.original.getLocationName();
+        this.original.load(this.location);
+
+        Display.getDefault().syncExec(new Runnable() {
+          public void run() {
+            ServerRegistry.getInstance().updateServer(originalName,
+                HadoopLocationWizard.this.location);
+          }
+        });
+        return this.original;
+
+      }
+    } catch (Exception e) {
+      e.printStackTrace();
+      setMessage("Invalid server location values", IMessageProvider.ERROR);
+      return null;
+    }
+  }
+
+  /**
+   * Validates the current Hadoop location settings (look for Hadoop
+   * installation directory).
+   * 
+   */
+  private void testLocation() {
+    setMessage("Not implemented yet", IMessageProvider.WARNING);
+  }
+
+  /**
+   * Location is not complete (and finish button not available) until a host
+   * name is specified.
+   * 
+   * @inheritDoc
+   */
+  @Override
+  public boolean isPageComplete() {
+
+    {
+      String locName = location.getConfProp(ConfProp.PI_LOCATION_NAME);
+      if ((locName == null) || (locName.length() == 0)
+          || locName.contains("/")) {
+
+        setMessage("Bad location name: "
+            + "the location name should not contain "
+            + "any character prohibited in a file name.", WARNING);
+
+        return false;
+      }
+    }
+
+    {
+      String master = location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+      if ((master == null) || (master.length() == 0)) {
+
+        setMessage("Bad master host name: "
+            + "the master host name refers to the machine "
+            + "that runs the Job tracker.", WARNING);
+
+        return false;
+      }
+    }
+
+    {
+      String jobTracker = location.getConfProp(ConfProp.JOB_TRACKER_URI);
+      String[] strs = jobTracker.split(":");
+      boolean ok = (strs.length == 2);
+      if (ok) {
+        try {
+          int port = Integer.parseInt(strs[1]);
+          ok = (port >= 0) && (port < 65536);
+        } catch (NumberFormatException nfe) {
+          ok = false;
+        }
+      }
+      if (!ok) {
+        setMessage("The job tracker information ("
+            + ConfProp.JOB_TRACKER_URI.name + ") is invalid. "
+            + "This usually looks like \"host:port\"", WARNING);
+        return false;
+      }
+    }
+
+    {
+      String fsDefaultURI = location.getConfProp(ConfProp.FS_DEFAULT_URI);
+      try {
+        URI uri = new URI(fsDefaultURI);
+      } catch (URISyntaxException e) {
+
+        setMessage("The default file system URI is invalid. "
+            + "This usually looks like \"hdfs://host:port/\" "
+            + "or \"file:///dir/\"", WARNING);
+      }
+    }
+
+    setMessage("Define the location of a Hadoop infrastructure "
+        + "for running MapReduce applications.");
+    return true;
+  }
+
+  /**
+   * Create the wizard
+   */
+  /* @inheritDoc */
+  public void createControl(Composite parent) {
+    setTitle("Define Hadoop location");
+    setDescription("Define the location of a Hadoop infrastructure "
+        + "for running MapReduce applications.");
+
+    Composite panel = new Composite(parent, SWT.FILL);
+    GridLayout glayout = new GridLayout(2, false);
+    panel.setLayout(glayout);
+
+    TabMediator mediator = new TabMediator(panel);
+    {
+      GridData gdata = new GridData(GridData.FILL_BOTH);
+      gdata.horizontalSpan = 2;
+      mediator.folder.setLayoutData(gdata);
+    }
+    this.setControl(panel /* mediator.folder */);
+    {
+      final Button validate = new Button(panel, SWT.NONE);
+      validate.setText("&Load from file");
+      validate.addListener(SWT.Selection, new Listener() {
+        public void handleEvent(Event e) {
+          // TODO
+        }
+      });
+    }
+    {
+      final Button validate = new Button(panel, SWT.NONE);
+      validate.setText("&Validate location");
+      validate.addListener(SWT.Selection, new Listener() {
+        public void handleEvent(Event e) {
+          testLocation();
+        }
+      });
+    }
+  }
+
+  private interface TabListener {
+    void notifyChange(ConfProp prop, String propValue);
+  }
+
+  /*
+   * Mediator pattern to keep tabs synchronized with each other and with the
+   * location state.
+   */
+
+  private class TabMediator {
+    TabFolder folder;
+
+    private Set<TabListener> tabs = new HashSet<TabListener>();
+
+    TabMediator(Composite parent) {
+      folder = new TabFolder(parent, SWT.NONE);
+      tabs.add(new TabMain(this));
+      tabs.add(new TabAdvanced(this));
+    }
+
+    /**
+     * Access to current configuration settings
+     * 
+     * @param propName the property name
+     * @return the current property value
+     */
+    String get(String propName) {
+      return location.getConfProp(propName);
+    }
+
+    String get(ConfProp prop) {
+      return location.getConfProp(prop);
+    }
+
+    /**
+     * Implements change notifications from any tab: update the location
+     * state and other tabs
+     * 
+     * @param source origin of the notification (one of the tree tabs)
+     * @param propName modified property
+     * @param propValue new value
+     */
+    void notifyChange(TabListener source, final ConfProp prop,
+        final String propValue) {
+      // Ignore notification when no change
+      String oldValue = location.getConfProp(prop);
+      if ((oldValue != null) && oldValue.equals(propValue))
+        return;
+
+      location.setConfProp(prop, propValue);
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          getContainer().updateButtons();
+        }
+      });
+
+      this.fireChange(source, prop, propValue);
+
+      /*
+       * Now we deal with dependencies between settings
+       */
+      final String jobTrackerHost =
+          location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+      final String jobTrackerPort =
+          location.getConfProp(ConfProp.PI_JOB_TRACKER_PORT);
+      final String nameNodeHost =
+          location.getConfProp(ConfProp.PI_NAME_NODE_HOST);
+      final String nameNodePort =
+          location.getConfProp(ConfProp.PI_NAME_NODE_PORT);
+      final boolean colocate =
+          location.getConfProp(ConfProp.PI_COLOCATE_MASTERS)
+              .equalsIgnoreCase("yes");
+      final String jobTrackerURI =
+          location.getConfProp(ConfProp.JOB_TRACKER_URI);
+      final String fsDefaultURI =
+          location.getConfProp(ConfProp.FS_DEFAULT_URI);
+      final String socksServerURI =
+          location.getConfProp(ConfProp.SOCKS_SERVER);
+      final boolean socksProxyEnable =
+          location.getConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE)
+              .equalsIgnoreCase("yes");
+      final String socksProxyHost =
+          location.getConfProp(ConfProp.PI_SOCKS_PROXY_HOST);
+      final String socksProxyPort =
+          location.getConfProp(ConfProp.PI_SOCKS_PROXY_PORT);
+
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          switch (prop) {
+            case PI_JOB_TRACKER_HOST: {
+              if (colocate)
+                notifyChange(null, ConfProp.PI_NAME_NODE_HOST,
+                    jobTrackerHost);
+              String newJobTrackerURI =
+                  String.format("%s:%s", jobTrackerHost, jobTrackerPort);
+              notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
+              break;
+            }
+            case PI_JOB_TRACKER_PORT: {
+              String newJobTrackerURI =
+                  String.format("%s:%s", jobTrackerHost, jobTrackerPort);
+              notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
+              break;
+            }
+            case PI_NAME_NODE_HOST: {
+              String newHDFSURI =
+                  String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+              notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+
+              // Break colocation if someone force the DFS Master
+              if (!colocate && !nameNodeHost.equals(jobTrackerHost))
+                notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
+              break;
+            }
+            case PI_NAME_NODE_PORT: {
+              String newHDFSURI =
+                  String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+              notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+              break;
+            }
+            case PI_SOCKS_PROXY_HOST: {
+              String newSocksProxyURI =
+                  String.format("%s:%s", socksProxyHost, socksProxyPort);
+              notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+              break;
+            }
+            case PI_SOCKS_PROXY_PORT: {
+              String newSocksProxyURI =
+                  String.format("%s:%s", socksProxyHost, socksProxyPort);
+              notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+              break;
+            }
+            case JOB_TRACKER_URI: {
+              String[] strs = jobTrackerURI.split(":", 2);
+              String host = strs[0];
+              String port = (strs.length == 2) ? strs[1] : "";
+              notifyChange(null, ConfProp.PI_JOB_TRACKER_HOST, host);
+              notifyChange(null, ConfProp.PI_JOB_TRACKER_PORT, port);
+              break;
+            }
+            case FS_DEFAULT_URI: {
+              try {
+                URI uri = new URI(fsDefaultURI);
+                if (uri.getScheme().equals("hdfs")) {
+                  String host = uri.getHost();
+                  String port = Integer.toString(uri.getPort());
+                  notifyChange(null, ConfProp.PI_NAME_NODE_HOST, host);
+                  notifyChange(null, ConfProp.PI_NAME_NODE_PORT, port);
+                }
+              } catch (URISyntaxException use) {
+                // Ignore the update!
+              }
+              break;
+            }
+            case SOCKS_SERVER: {
+              String[] strs = socksServerURI.split(":", 2);
+              String host = strs[0];
+              String port = (strs.length == 2) ? strs[1] : "";
+              notifyChange(null, ConfProp.PI_SOCKS_PROXY_HOST, host);
+              notifyChange(null, ConfProp.PI_SOCKS_PROXY_PORT, port);
+              break;
+            }
+            case PI_COLOCATE_MASTERS: {
+              if (colocate)
+                notifyChange(null, ConfProp.PI_NAME_NODE_HOST,
+                    jobTrackerHost);
+              break;
+            }
+            case PI_SOCKS_PROXY_ENABLE: {
+              if (socksProxyEnable) {
+                notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT,
+                    "org.apache.hadoop.net.SocksSocketFactory");
+              } else {
+                notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT,
+                "org.apache.hadoop.net.StandardSocketFactory");
+              }
+              break;
+            }
+          }
+        }
+      });
+
+    }
+
+    /**
+     * Change notifications on properties (by name). A property might not be
+     * reflected as a ConfProp enum. If it is, the notification is forwarded
+     * to the ConfProp notifyChange method. If not, it is processed here.
+     * 
+     * @param source
+     * @param propName
+     * @param propValue
+     */
+    void notifyChange(TabListener source, String propName, String propValue) {
+
+      ConfProp prop = ConfProp.getByName(propName);
+      if (prop != null)
+        notifyChange(source, prop, propValue);
+
+      location.setConfProp(propName, propValue);
+    }
+
+    /**
+     * Broadcast a property change to all registered tabs. If a tab is
+     * identified as the source of the change, this tab will not be notified.
+     * 
+     * @param source TODO
+     * @param prop
+     * @param value
+     */
+    private void fireChange(TabListener source, ConfProp prop, String value) {
+      for (TabListener tab : tabs) {
+        if (tab != source)
+          tab.notifyChange(prop, value);
+      }
+    }
+
+  }
+
+  /**
+   * Create a SWT Text component for the given {@link ConfProp} text
+   * configuration property.
+   * 
+   * @param listener
+   * @param parent
+   * @param prop
+   * @return
+   */
+  private Text createConfText(ModifyListener listener, Composite parent,
+      ConfProp prop) {
+
+    Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+    GridData data = new GridData(GridData.FILL_HORIZONTAL);
+    text.setLayoutData(data);
+    text.setData("hProp", prop);
+    text.setText(location.getConfProp(prop));
+    text.addModifyListener(listener);
+
+    return text;
+  }
+
+  /**
+   * Create a SWT Checked Button component for the given {@link ConfProp}
+   * boolean configuration property.
+   * 
+   * @param listener
+   * @param parent
+   * @param prop
+   * @return
+   */
+  private Button createConfCheckButton(SelectionListener listener,
+      Composite parent, ConfProp prop, String text) {
+
+    Button button = new Button(parent, SWT.CHECK);
+    button.setText(text);
+    button.setData("hProp", prop);
+    button.setSelection(location.getConfProp(prop).equalsIgnoreCase("yes"));
+    button.addSelectionListener(listener);
+
+    return button;
+  }
+
+  /**
+   * Create editor entry for the given configuration property. The editor is
+   * a couple (Label, Text).
+   * 
+   * @param listener the listener to trigger on property change
+   * @param parent the SWT parent container
+   * @param prop the property to create an editor for
+   * @param labelText a label (null will defaults to the property name)
+   * 
+   * @return a SWT Text field
+   */
+  private Text createConfLabelText(ModifyListener listener,
+      Composite parent, ConfProp prop, String labelText) {
+
+    Label label = new Label(parent, SWT.NONE);
+    if (labelText == null)
+      labelText = prop.name;
+    label.setText(labelText);
+
+    return createConfText(listener, parent, prop);
+  }
+
+  /**
+   * Create an editor entry for the given configuration name
+   * 
+   * @param listener the listener to trigger on property change
+   * @param parent the SWT parent container
+   * @param propName the name of the property to create an editor for
+   * @param labelText a label (null will defaults to the property name)
+   * 
+   * @return a SWT Text field
+   */
+  private Text createConfNameEditor(ModifyListener listener,
+      Composite parent, String propName, String labelText) {
+
+    {
+      ConfProp prop = ConfProp.getByName(propName);
+      if (prop != null)
+        return createConfLabelText(listener, parent, prop, labelText);
+    }
+
+    Label label = new Label(parent, SWT.NONE);
+    if (labelText == null)
+      labelText = propName;
+    label.setText(labelText);
+
+    Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+    GridData data = new GridData(GridData.FILL_HORIZONTAL);
+    text.setLayoutData(data);
+    text.setData("hPropName", propName);
+    text.setText(location.getConfProp(propName));
+    text.addModifyListener(listener);
+
+    return text;
+  }
+
+  /**
+   * Main parameters of the Hadoop location:
+   * <li> host and port of the Map/Reduce master (Job tracker)
+   * <li> host and port of the DFS master (Name node)
+   * <li> SOCKS proxy
+   */
+  private class TabMain implements TabListener, ModifyListener,
+      SelectionListener {
+
+    TabMediator mediator;
+
+    Text locationName;
+
+    Text textJTHost;
+
+    Text textNNHost;
+
+    Button colocateMasters;
+
+    Text textJTPort;
+
+    Text textNNPort;
+
+    Text userName;
+
+    Button useSocksProxy;
+
+    Text socksProxyHost;
+
+    Text socksProxyPort;
+
+    TabMain(TabMediator mediator) {
+      this.mediator = mediator;
+      TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+      tab.setText("General");
+      tab.setToolTipText("General location parameters");
+      tab.setImage(circle);
+      tab.setControl(createControl(mediator.folder));
+    }
+
+    private Control createControl(Composite parent) {
+
+      Composite panel = new Composite(parent, SWT.FILL);
+      panel.setLayout(new GridLayout(2, false));
+
+      GridData data;
+
+      /*
+       * Location name
+       */
+      {
+        Composite subpanel = new Composite(panel, SWT.FILL);
+        subpanel.setLayout(new GridLayout(2, false));
+        data = new GridData();
+        data.horizontalSpan = 2;
+        data.horizontalAlignment = SWT.FILL;
+        subpanel.setLayoutData(data);
+
+        locationName =
+            createConfLabelText(this, subpanel, ConfProp.PI_LOCATION_NAME,
+                "&Location name:");
+      }
+
+      /*
+       * Map/Reduce group
+       */
+      {
+        Group groupMR = new Group(panel, SWT.SHADOW_NONE);
+        groupMR.setText("Map/Reduce Master");
+        groupMR.setToolTipText("Address of the Map/Reduce master node "
+            + "(the Job Tracker).");
+        GridLayout layout = new GridLayout(2, false);
+        groupMR.setLayout(layout);
+        data = new GridData();
+        data.verticalAlignment = SWT.FILL;
+        data.horizontalAlignment = SWT.CENTER;
+        data.widthHint = 250;
+        groupMR.setLayoutData(data);
+
+        // Job Tracker host
+        Label label = new Label(groupMR, SWT.NONE);
+        label.setText("Host:");
+        data =
+            new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+        label.setLayoutData(data);
+
+        textJTHost =
+            createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_HOST);
+        data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+        textJTHost.setLayoutData(data);
+
+        // Job Tracker port
+        label = new Label(groupMR, SWT.NONE);
+        label.setText("Port:");
+        data =
+            new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+        label.setLayoutData(data);
+
+        textJTPort =
+            createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_PORT);
+        data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+        textJTPort.setLayoutData(data);
+      }
+
+      /*
+       * DFS group
+       */
+      {
+        Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
+        groupDFS.setText("DFS Master");
+        groupDFS.setToolTipText("Address of the Distributed FileSystem "
+            + "master node (the Name Node).");
+        GridLayout layout = new GridLayout(2, false);
+        groupDFS.setLayout(layout);
+        data = new GridData();
+        data.horizontalAlignment = SWT.CENTER;
+        data.widthHint = 250;
+        groupDFS.setLayoutData(data);
+
+        colocateMasters =
+            createConfCheckButton(this, groupDFS,
+                ConfProp.PI_COLOCATE_MASTERS, "Use M/R Master host");
+        data = new GridData();
+        data.horizontalSpan = 2;
+        colocateMasters.setLayoutData(data);
+
+        // Job Tracker host
+        Label label = new Label(groupDFS, SWT.NONE);
+        data = new GridData();
+        label.setText("Host:");
+        label.setLayoutData(data);
+
+        textNNHost =
+            createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_HOST);
+
+        // Job Tracker port
+        label = new Label(groupDFS, SWT.NONE);
+        data = new GridData();
+        label.setText("Port:");
+        label.setLayoutData(data);
+
+        textNNPort =
+            createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_PORT);
+      }
+
+      {
+        Composite subpanel = new Composite(panel, SWT.FILL);
+        subpanel.setLayout(new GridLayout(2, false));
+        data = new GridData();
+        data.horizontalSpan = 2;
+        data.horizontalAlignment = SWT.FILL;
+        subpanel.setLayoutData(data);
+
+        userName =
+            createConfLabelText(this, subpanel, ConfProp.PI_USER_NAME,
+                "&User name:");
+      }
+
+      // SOCKS proxy group
+      {
+        Group groupSOCKS = new Group(panel, SWT.SHADOW_NONE);
+        groupSOCKS.setText("SOCKS proxy");
+        groupSOCKS.setToolTipText("Address of the SOCKS proxy to use "
+            + "to connect to the infrastructure.");
+        GridLayout layout = new GridLayout(2, false);
+        groupSOCKS.setLayout(layout);
+        data = new GridData();
+        data.horizontalAlignment = SWT.CENTER;
+        data.horizontalSpan = 2;
+        data.widthHint = 250;
+        groupSOCKS.setLayoutData(data);
+
+        useSocksProxy =
+            createConfCheckButton(this, groupSOCKS,
+                ConfProp.PI_SOCKS_PROXY_ENABLE, "Enable SOCKS proxy");
+        data = new GridData();
+        data.horizontalSpan = 2;
+        useSocksProxy.setLayoutData(data);
+
+        // SOCKS proxy host
+        Label label = new Label(groupSOCKS, SWT.NONE);
+        data = new GridData();
+        label.setText("Host:");
+        label.setLayoutData(data);
+
+        socksProxyHost =
+            createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_HOST);
+
+        // SOCKS proxy port
+        label = new Label(groupSOCKS, SWT.NONE);
+        data = new GridData();
+        label.setText("Port:");
+        label.setLayoutData(data);
+
+        socksProxyPort =
+            createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_PORT);
+      }
+
+      // Update the state of all widgets according to the current values!
+      reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
+      reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
+      reloadConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+
+      return panel;
+    }
+
+    /**
+     * Reload the given configuration property value
+     * 
+     * @param prop
+     */
+    private void reloadConfProp(ConfProp prop) {
+      this.notifyChange(prop, location.getConfProp(prop));
+    }
+
+    public void notifyChange(ConfProp prop, String propValue) {
+      switch (prop) {
+        case PI_JOB_TRACKER_HOST: {
+          textJTHost.setText(propValue);
+          break;
+        }
+        case PI_JOB_TRACKER_PORT: {
+          textJTPort.setText(propValue);
+          break;
+        }
+        case PI_LOCATION_NAME: {
+          locationName.setText(propValue);
+          break;
+        }
+        case PI_USER_NAME: {
+          userName.setText(propValue);
+          break;
+        }
+        case PI_COLOCATE_MASTERS: {
+          if (colocateMasters != null) {
+            boolean colocate = propValue.equalsIgnoreCase("yes");
+            colocateMasters.setSelection(colocate);
+            if (textNNHost != null) {
+              textNNHost.setEnabled(!colocate);
+            }
+          }
+          break;
+        }
+        case PI_NAME_NODE_HOST: {
+          textNNHost.setText(propValue);
+          break;
+        }
+        case PI_NAME_NODE_PORT: {
+          textNNPort.setText(propValue);
+          break;
+        }
+        case PI_SOCKS_PROXY_ENABLE: {
+          if (useSocksProxy != null) {
+            boolean useProxy = propValue.equalsIgnoreCase("yes");
+            useSocksProxy.setSelection(useProxy);
+            if (socksProxyHost != null)
+              socksProxyHost.setEnabled(useProxy);
+            if (socksProxyPort != null)
+              socksProxyPort.setEnabled(useProxy);
+          }
+          break;
+        }
+        case PI_SOCKS_PROXY_HOST: {
+          socksProxyHost.setText(propValue);
+          break;
+        }
+        case PI_SOCKS_PROXY_PORT: {
+          socksProxyPort.setText(propValue);
+          break;
+        }
+      }
+    }
+
+    /* @inheritDoc */
+    public void modifyText(ModifyEvent e) {
+      final Text text = (Text) e.widget;
+      final ConfProp prop = (ConfProp) text.getData("hProp");
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          mediator.notifyChange(TabMain.this, prop, text.getText());
+        }
+      });
+    }
+
+    /* @inheritDoc */
+    public void widgetDefaultSelected(SelectionEvent e) {
+      this.widgetSelected(e);
+    }
+
+    /* @inheritDoc */
+    public void widgetSelected(SelectionEvent e) {
+      final Button button = (Button) e.widget;
+      final ConfProp prop = (ConfProp) button.getData("hProp");
+
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          // We want to receive the update also!
+          mediator.notifyChange(null, prop, button.getSelection() ? "yes"
+              : "no");
+        }
+      });
+    }
+
+  }
+
+  private class TabAdvanced implements TabListener, ModifyListener {
+    TabMediator mediator;
+
+    private Composite panel;
+
+    private Map<String, Text> textMap = new TreeMap<String, Text>();
+
+    TabAdvanced(TabMediator mediator) {
+      this.mediator = mediator;
+      TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+      tab.setText("Advanced parameters");
+      tab.setToolTipText("Access to advanced Hadoop parameters");
+      tab.setImage(circle);
+      tab.setControl(createControl(mediator.folder));
+
+    }
+
+    private Control createControl(Composite parent) {
+      ScrolledComposite sc =
+          new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL
+              | SWT.V_SCROLL);
+
+      panel = new Composite(sc, SWT.NONE);
+      sc.setContent(panel);
+
+      sc.setExpandHorizontal(true);
+      sc.setExpandVertical(true);
+
+      sc.setMinSize(640, 480);
+
+      GridLayout layout = new GridLayout();
+      layout.numColumns = 2;
+      layout.makeColumnsEqualWidth = false;
+      panel.setLayout(layout);
+      panel.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true,
+          true, 1, 1));
+
+      // Sort by property name
+      Configuration config = location.getConfiguration();
+      SortedMap<String, String> map = new TreeMap<String, String>();
+      Iterator<Entry<String, String>> it = config.iterator();
+      while (it.hasNext()) {
+        Entry<String, String> entry = it.next();
+        map.put(entry.getKey(), entry.getValue());
+      }
+
+      for (Entry<String, String> entry : map.entrySet()) {
+        Text text = createConfNameEditor(this, panel, entry.getKey(), null);
+        textMap.put(entry.getKey(), text);
+      }
+
+      sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+
+      return sc;
+    }
+
+    public void notifyChange(ConfProp prop, final String propValue) {
+      Text text = textMap.get(prop.name);
+      text.setText(propValue);
+    }
+
+    public void modifyText(ModifyEvent e) {
+      final Text text = (Text) e.widget;
+      Object hProp = text.getData("hProp");
+      final ConfProp prop = (hProp != null) ? (ConfProp) hProp : null;
+      Object hPropName = text.getData("hPropName");
+      final String propName =
+          (hPropName != null) ? (String) hPropName : null;
+
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          if (prop != null)
+            mediator.notifyChange(TabAdvanced.this, prop, text.getText());
+          else
+            mediator
+                .notifyChange(TabAdvanced.this, propName, text.getText());
+        }
+      });
+    }
+  }
+
+}

Modified: lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java?rev=588310&r1=588309&r2=588310&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java (original)
+++ lucene/hadoop/trunk/src/contrib/eclipse-plugin/src/java/org/apache/hadoop/eclipse/servers/HadoopServerSelectionListContentProvider.java Thu Oct 25 11:58:32 2007
@@ -26,7 +26,6 @@
 import org.eclipse.jface.viewers.Viewer;
 import org.eclipse.swt.graphics.Image;
 
-
 /**
  * Provider that enables selection of a predefined Hadoop server.
  */
@@ -47,10 +46,12 @@
 
   public String getColumnText(Object element, int columnIndex) {
     if (element instanceof HadoopServer) {
+      HadoopServer location = (HadoopServer) element;
       if (columnIndex == 0) {
-        return ((HadoopServer) element).getName();
+        return location.getLocationName();
+
       } else if (columnIndex == 1) {
-        return ((HadoopServer) element).toString();
+        return location.getMasterHostName();
       }
     }
 



Mime
View raw message