hdt-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rsha...@apache.org
Subject [13/27] - Adding hadoop2 based on 2.2 version - Changing fragments to plugins(Fragment classpath is appended to host classpath which causes issues thus making it as plugin) - Loading classes in diffrent context loaders(http://wiki.eclipse.org/FAQ_How_
Date Thu, 26 Jun 2014 08:36:36 GMT
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
new file mode 100644
index 0000000..b200a9f
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
@@ -0,0 +1,619 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop2.release;
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Logger;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.ConfProp;
+import org.apache.hdt.core.launch.IHadoopJob;
+import org.apache.hdt.core.launch.IJarModule;
+import org.apache.hdt.core.launch.IJobListener;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.swt.widgets.Display;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
+
+/**
+ * Representation of a Hadoop location, meaning of the master node (NameNode,
+ * JobTracker).
+ * 
+ * <p>
+ * This class does not create any SSH connection anymore. Tunneling must be
+ * setup outside of Eclipse for now (using Putty or <tt>ssh -D&lt;port&gt;
+ * &lt;host&gt;</tt>)
+ * 
+ * <p>
+ * <em> TODO </em>
+ * <li>Disable the updater if a location becomes unreachable or fails for tool
+ * long
+ * <li>Stop the updater on location's disposal/removal
+ */
+
+public class HadoopCluster extends AbstractHadoopCluster {
+	private ExecutorService service= Executors.newSingleThreadExecutor();
+
+	/**
+	 * Frequency of location status observations expressed as the delay in ms
+	 * between each observation
+	 * 
+	 * TODO Add a preference parameter for this
+	 */
+	protected static final long STATUS_OBSERVATION_DELAY = 1500;
+
+	/**
+   * 
+   */
+	public class LocationStatusUpdater extends Job {
+
+		JobClient client = null;
+
+		/**
+		 * Setup the updater
+		 */
+		public LocationStatusUpdater() {
+			super("Map/Reduce location status updater");
+			this.setSystem(true);
+		}
+
+		/* @inheritDoc */
+		@Override
+		protected IStatus run(IProgressMonitor monitor) {
+			if (client == null) {
+				try {
+					client = HadoopCluster.this.getJobClient();
+
+				} catch (IOException ioe) {
+					client = null;
+					return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot connect to the Map/Reduce location: "
+							+ HadoopCluster.this.getLocationName(), ioe);
+				}
+			}
+			Thread current = Thread.currentThread();
+			ClassLoader oldLoader = current.getContextClassLoader();
+			try {
+			        current.setContextClassLoader(HadoopCluster.class.getClassLoader());
+				// Set of all known existing Job IDs we want fresh info of
+				Set<JobID> missingJobIds = new HashSet<JobID>(runningJobs.keySet());
+
+				JobStatus[] jstatus = client.jobsToComplete();
+				jstatus = jstatus == null ? new JobStatus[0] : jstatus;
+				for (JobStatus status : jstatus) {
+
+					JobID jobId = status.getJobID();
+					missingJobIds.remove(jobId);
+
+					HadoopJob hJob;
+					synchronized (HadoopCluster.this.runningJobs) {
+						hJob = runningJobs.get(jobId);
+						if (hJob == null) {
+							// Unknown job, create an entry
+							RunningJob running = client.getJob(jobId);
+							hJob = new HadoopJob(HadoopCluster.this, jobId, running, status);
+							newJob(hJob);
+						}
+					}
+
+					// Update HadoopJob with fresh infos
+					updateJob(hJob, status);
+				}
+
+				// Ask explicitly for fresh info for these Job IDs
+				for (JobID jobId : missingJobIds) {
+					HadoopJob hJob = runningJobs.get(jobId);
+					if (!hJob.isCompleted())
+						updateJob(hJob, null);
+				}
+
+			} catch (IOException ioe) {
+				client = null;
+				return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot retrieve running Jobs on location: " + HadoopCluster.this.getLocationName(),
+						ioe);
+			} finally {
+                            current.setContextClassLoader(oldLoader);
+                         }
+
+
+			// Schedule the next observation
+			schedule(STATUS_OBSERVATION_DELAY);
+
+			return Status.OK_STATUS;
+		}
+
+		/**
+		 * Stores and make the new job available
+		 * 
+		 * @param data
+		 */
+		private void newJob(final HadoopJob data) {
+			runningJobs.put(data.jobId, data);
+
+			Display.getDefault().asyncExec(new Runnable() {
+				public void run() {
+					fireJobAdded(data);
+				}
+			});
+		}
+
+		/**
+		 * Updates the status of a job
+		 * 
+		 * @param job
+		 *            the job to update
+		 */
+		private void updateJob(final HadoopJob job, JobStatus status) {
+			job.update(status);
+
+			Display.getDefault().asyncExec(new Runnable() {
+				public void run() {
+					fireJobChanged(job);
+				}
+			});
+		}
+
+	}
+
+	static Logger log = Logger.getLogger(HadoopCluster.class.getName());
+
+	/**
+	 * Hadoop configuration of the location. Also contains specific parameters
+	 * for the plug-in. These parameters are prefix with eclipse.plug-in.*
+	 */
+	private Configuration conf;
+
+	/**
+	 * Jobs listeners
+	 */
+	private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
+
+	/**
+	 * Jobs running on this location. The keys of this map are the Job IDs.
+	 */
+	private transient Map<JobID, HadoopJob> runningJobs = Collections.synchronizedMap(new TreeMap<JobID, HadoopJob>());
+
+	/**
+	 * Status updater for this location
+	 */
+	private LocationStatusUpdater statusUpdater;
+
+	// state and status - transient
+	private transient String state = "";
+
+	/**
+	 * Creates a new default Hadoop location
+	 */
+        public HadoopCluster() {
+            this.conf = new Configuration();
+            this.addPluginConfigDefaultProperties();
+            conf.set("mapreduce.framework.name", "yarn");
+            conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032");
+            conf.set(getConfPropName(ConfProp.PI_JOB_TRACKER_PORT), "8032");
+            conf.set("mapreduce.jobhistory.address", "localhost:10020");
+        }
+   
+	/**
+	 * Creates a location from a file
+	 * 
+	 * @throws IOException
+	 * @throws SAXException
+	 * @throws ParserConfigurationException
+	 */
+	public HadoopCluster(File file) throws ParserConfigurationException, SAXException, IOException {
+		this();
+		this.loadFromXML(file);
+	}
+
+	/**
+	 * Create a new Hadoop location by copying an already existing one.
+	 * 
+	 * @param source
+	 *            the location to copy
+	 */
+	public HadoopCluster(HadoopCluster existing) {
+		this();
+		this.load(existing);
+	}
+
+	public void addJobListener(IJobListener l) {
+		jobListeners.add(l);
+	}
+
+	public void dispose() {
+		// TODO close DFS connections?
+	}
+
+	/**
+	 * List all elements that should be present in the Server window (all
+	 * servers and all jobs running on each servers)
+	 * 
+	 * @return collection of jobs for this location
+	 */
+	public Collection<? extends IHadoopJob> getJobs() {
+		startStatusUpdater();
+		return this.runningJobs.values();
+	}
+
+	/**
+	 * Remove the given job from the currently running jobs map
+	 * 
+	 * @param job
+	 *            the job to remove
+	 */
+	public void purgeJob(final IHadoopJob job) {
+		runningJobs.remove(job.getJobID());
+		Display.getDefault().asyncExec(new Runnable() {
+			public void run() {
+				fireJobRemoved(job);
+			}
+		});
+	}
+
+	/**
+	 * Returns the {@link Configuration} defining this location.
+	 * 
+	 * @return the location configuration
+	 */
+	public Iterator<Entry<String, String>> getConfiguration() {
+		return this.conf.iterator();
+	}
+
+	/**
+	 * @return the conf
+	 */
+	public Configuration getConf() {
+		return conf;
+	}
+
+	/**
+	 * Gets a Hadoop configuration property value
+	 * 
+	 * @param prop
+	 *            the configuration property
+	 * @return the property value
+	 */
+	public String getConfPropValue(ConfProp prop) {
+		String confPropName = getConfPropName(prop);
+		return conf.get(confPropName);
+	}
+
+	/**
+	 * Gets a Hadoop configuration property value
+	 * 
+	 * @param propName
+	 *            the property name
+	 * @return the property value
+	 */
+	public String getConfPropValue(String propName) {
+		return this.conf.get(propName);
+	}
+
+	public String getLocationName() {
+		return getConfPropValue(ConfProp.PI_LOCATION_NAME);
+	}
+
+	/**
+	 * Returns the master host name of the Hadoop location (the Job tracker)
+	 * 
+	 * @return the host name of the Job tracker
+	 */
+	public String getMasterHostName() {
+		return getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
+	}
+
+	public String getState() {
+		return state;
+	}
+
+	/**
+	 * Overwrite this location with the given existing location
+	 * 
+	 * @param existing
+	 *            the existing location
+	 */
+	public void load(AbstractHadoopCluster existing) {
+		this.conf = new Configuration(((HadoopCluster) existing).conf);
+	}
+
+	/**
+	 * Overwrite this location with settings available in the given XML file.
+	 * The existing configuration is preserved if the XML file is invalid.
+	 * 
+	 * @param file
+	 *            the file path of the XML file
+	 * @return validity of the XML file
+	 * @throws ParserConfigurationException
+	 * @throws IOException
+	 * @throws SAXException
+	 */
+	public boolean loadFromXML(File file) {
+
+		Configuration newConf = new Configuration(this.conf);
+		DocumentBuilder builder;
+		Document document;
+		try {
+			builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
+			document = builder.parse(file);
+		} catch (ParserConfigurationException e) {
+			e.printStackTrace();
+			return false;
+		} catch (SAXException e) {
+			e.printStackTrace();
+			return false;
+		} catch (IOException e) {
+			e.printStackTrace();
+			return false;
+		}
+		Element root = document.getDocumentElement();
+		if (!"configuration".equals(root.getTagName()))
+			return false;
+		NodeList props = root.getChildNodes();
+		for (int i = 0; i < props.getLength(); i++) {
+			Node propNode = props.item(i);
+			if (!(propNode instanceof Element))
+				continue;
+			Element prop = (Element) propNode;
+			if (!"property".equals(prop.getTagName()))
+				return false;
+			NodeList fields = prop.getChildNodes();
+			String attr = null;
+			String value = null;
+			for (int j = 0; j < fields.getLength(); j++) {
+				Node fieldNode = fields.item(j);
+				if (!(fieldNode instanceof Element))
+					continue;
+				Element field = (Element) fieldNode;
+				if ("name".equals(field.getTagName()))
+					attr = ((Text) field.getFirstChild()).getData();
+				if ("value".equals(field.getTagName()) && field.hasChildNodes())
+					value = ((Text) field.getFirstChild()).getData();
+			}
+			if (attr != null && value != null)
+				newConf.set(attr, value);
+		}
+
+		this.conf = newConf;
+		return true;
+	}
+
+	/**
+	 * Sets a Hadoop configuration property value
+	 * 
+	 * @param prop
+	 *            the property
+	 * @param propvalue
+	 *            the property value
+	 */
+	public void setConfPropValue(ConfProp prop, String propValue) {
+            if (propValue != null)
+                    setConfPropValue(getConfPropName(prop), propValue);
+        }
+    
+        @Override
+        public void setConfPropValue(String propName, String propValue) {
+                conf.set(propName, propValue);
+        }
+
+	public void setLocationName(String newName) {
+		setConfPropValue(ConfProp.PI_LOCATION_NAME, newName);
+	}
+    
+	@Override
+	public String getConfPropName(ConfProp prop) {
+	    if(ConfProp.JOB_TRACKER_URI.equals(prop))
+	        return YarnConfiguration.RM_ADDRESS;
+	    return super.getConfPropName(prop);
+	}
+        @Override
+        public ConfProp getConfPropForName(String propName) {
+            if(YarnConfiguration.RM_ADDRESS.equals(propName))
+                return ConfProp.JOB_TRACKER_URI;
+            if("mapred.job.tracker".equals(propName))
+                return null;
+            return super.getConfPropForName(propName);
+        }
+    
+	/**
+	 * Write this location settings to the given output stream
+	 * 
+	 * @param out
+	 *            the output stream
+	 * @throws IOException
+	 */
+	public void storeSettingsToFile(File file) throws IOException {
+		FileOutputStream fos = new FileOutputStream(file);
+		try {
+			this.conf.writeXml(fos);
+			fos.close();
+			fos = null;
+		} finally {
+			IOUtils.closeStream(fos);
+		}
+
+	}
+
+	/* @inheritDoc */
+	@Override
+	public String toString() {
+		return this.getLocationName();
+	}
+
+	/**
+	 * Fill the configuration with valid default values
+	 */
+	private void addPluginConfigDefaultProperties() {
+		for (ConfProp prop : ConfProp.values()) {
+			conf.set(getConfPropName(prop), prop.defVal);
+		}
+	}
+
+	/**
+	 * Starts the location status updater
+	 */
+	private synchronized void startStatusUpdater() {
+		if (statusUpdater == null) {
+			statusUpdater = new LocationStatusUpdater();
+			statusUpdater.schedule();
+		}
+	}
+
+	/*
+	 * Rewrite of the connecting and tunneling to the Hadoop location
+	 */
+
+	/**
+	 * Provides access to the default file system of this location.
+	 * 
+	 * @return a {@link FileSystem}
+	 */
+	public FileSystem getDFS() throws IOException {
+		return FileSystem.get(this.conf);
+	}
+
+	/**
+	 * Provides access to the Job tracking system of this location
+	 * 
+	 * @return a {@link JobClient}
+	 */
+	public JobClient getJobClient() throws IOException {
+		JobConf jconf = new JobConf(this.conf);
+		return new JobClient(jconf);
+	}
+
+	/*
+	 * Listeners handling
+	 */
+
+	protected void fireJarPublishDone(IJarModule jar) {
+		for (IJobListener listener : jobListeners) {
+			listener.publishDone(jar);
+		}
+	}
+
+	protected void fireJarPublishStart(IJarModule jar) {
+		for (IJobListener listener : jobListeners) {
+			listener.publishStart(jar);
+		}
+	}
+
+	protected void fireJobAdded(HadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobAdded(job);
+		}
+	}
+
+	protected void fireJobRemoved(IHadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobRemoved(job);
+		}
+	}
+
+	protected void fireJobChanged(HadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobChanged(job);
+		}
+	}
+
+	@Override
+	public void saveConfiguration(File confDir, String jarFilePath) throws IOException {
+		// Prepare the Hadoop configuration
+		JobConf conf = new JobConf(this.conf);
+		conf.setJar(jarFilePath);
+		// Write it to the disk file
+		File coreSiteFile = new File(confDir, "core-site.xml");
+		File mapredSiteFile = new File(confDir, "yarn-site.xml");
+		FileOutputStream fos = new FileOutputStream(coreSiteFile);
+		FileInputStream fis = null;
+		try {
+			conf.writeXml(fos);
+			fos.close();
+			fos = new FileOutputStream(mapredSiteFile);
+			fis = new FileInputStream(coreSiteFile);
+			IOUtils.copyBytes(new BufferedInputStream(fis), fos, 4096);
+		} finally {
+			IOUtils.closeStream(fos);
+			IOUtils.closeStream(fis);
+		}
+
+	}
+
+	/* (non-Javadoc)
+	 * @see org.apache.hdt.core.launch.AbstractHadoopCluster#isAvailable()
+	 */
+	@Override
+	public boolean isAvailable() throws CoreException {
+		Callable<JobClient> task= new Callable<JobClient>() {
+			@Override
+			public JobClient call() throws Exception {
+			    return getJobClient();}}; 
+		Future<JobClient> jobClientFuture = service.submit(task);
+		try{
+			jobClientFuture.get(500, TimeUnit.SECONDS);
+			return true;
+		}catch(Exception e){
+			e.printStackTrace();
+			throw new CoreException(new Status(Status.ERROR, 
+					Activator.BUNDLE_ID, "unable to connect to server", e));
+		}
+	}
+
+    @Override
+    public String getVersion() {
+        return "2.2";
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java
new file mode 100644
index 0000000..a648cae
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java
@@ -0,0 +1,338 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop2.release;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopJob;
+
+/**
+ * Representation of a Map/Reduce running job on a given location
+ */
+
+public class HadoopJob implements IHadoopJob {
+
+	/**
+	 * Enum representation of a Job state
+	 */
+	public enum JobState {
+		PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
+
+		final int state;
+
+		JobState(int state) {
+			this.state = state;
+		}
+
+		static JobState ofInt(int state) {
+			if (state == JobStatus.PREP) {
+				return PREPARE;
+			} else if (state == JobStatus.RUNNING) {
+				return RUNNING;
+			} else if (state == JobStatus.FAILED) {
+				return FAILED;
+			} else if (state == JobStatus.SUCCEEDED) {
+				return SUCCEEDED;
+			} else {
+				return null;
+			}
+		}
+	}
+
+	/**
+	 * Location this Job runs on
+	 */
+	private final HadoopCluster location;
+
+	/**
+	 * Unique identifier of this Job
+	 */
+	final JobID jobId;
+
+	/**
+	 * Status representation of a running job. This actually contains a
+	 * reference to a JobClient. Its methods might block.
+	 */
+	RunningJob running;
+
+	/**
+	 * Last polled status
+	 * 
+	 * @deprecated should apparently not be used
+	 */
+	JobStatus status;
+
+	/**
+	 * Last polled counters
+	 */
+	Counters counters;
+
+	/**
+	 * Job Configuration
+	 */
+	JobConf jobConf = null;
+
+	boolean completed = false;
+
+	boolean successful = false;
+
+	boolean killed = false;
+
+	int totalMaps;
+
+	int totalReduces;
+
+	int completedMaps;
+
+	int completedReduces;
+
+	float mapProgress;
+
+	float reduceProgress;
+
+	/**
+	 * Constructor for a Hadoop job representation
+	 * 
+	 * @param location
+	 * @param id
+	 * @param running
+	 * @param status
+	 */
+	public HadoopJob(HadoopCluster location, JobID id, RunningJob running, JobStatus status) {
+		this.location = location;
+		this.jobId = id;
+		this.running = running;
+		loadJobFile();
+		update(status);
+	}
+
+	/**
+	 * Try to locate and load the JobConf file for this job so to get more
+	 * details on the job (number of maps and of reduces)
+	 */
+	private void loadJobFile() {
+		try {
+			String jobFile = getJobFile();
+			FileSystem fs = location.getDFS();
+			File tmp = File.createTempFile(getJobID().toString(), ".xml");
+			if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location.getConf())) {
+				this.jobConf = new JobConf(tmp.toString());
+
+				this.totalMaps = jobConf.getNumMapTasks();
+				this.totalReduces = jobConf.getNumReduceTasks();
+			}
+
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+		}
+	}
+
+	/* @inheritDoc */
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = 1;
+		result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
+		result = prime * result + ((location == null) ? 0 : location.hashCode());
+		return result;
+	}
+
+	/* @inheritDoc */
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (obj == null)
+			return false;
+		if (!(obj instanceof HadoopJob))
+			return false;
+		final HadoopJob other = (HadoopJob) obj;
+		if (jobId == null) {
+			if (other.jobId != null)
+				return false;
+		} else if (!jobId.equals(other.jobId))
+			return false;
+		if (location == null) {
+			if (other.location != null)
+				return false;
+		} else if (!location.equals(other.location))
+			return false;
+		return true;
+	}
+
+	/**
+	 * Get the running status of the Job (@see {@link JobStatus}).
+	 * 
+	 * @return
+	 */
+	public String getState() {
+		if (this.completed) {
+			if (this.successful) {
+				return JobState.SUCCEEDED.toString();
+			} else {
+				return JobState.FAILED.toString();
+			}
+		} else {
+			return JobState.RUNNING.toString();
+		}
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobID() {
+		return this.jobId.toString();
+	}
+
+	/**
+	 * @return
+	 */
+	public AbstractHadoopCluster getLocation() {
+		return this.location;
+	}
+
+	/**
+	 * @return
+	 */
+	public boolean isCompleted() {
+		return this.completed;
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobName() {
+		return this.running.getJobName();
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobFile() {
+		return this.running.getJobFile();
+	}
+
+	/**
+	 * Return the tracking URL for this Job.
+	 * 
+	 * @return string representation of the tracking URL for this Job
+	 */
+	public String getTrackingURL() {
+		return this.running.getTrackingURL();
+	}
+
+	/**
+	 * Returns a string representation of this job status
+	 * 
+	 * @return string representation of this job status
+	 */
+	public String getStatus() {
+
+		StringBuffer s = new StringBuffer();
+
+		s.append("Maps : " + completedMaps + "/" + totalMaps);
+		s.append(" (" + mapProgress + ")");
+		s.append("  Reduces : " + completedReduces + "/" + totalReduces);
+		s.append(" (" + reduceProgress + ")");
+
+		return s.toString();
+	}
+
+	/**
+	 * Update this job status according to the given JobStatus
+	 * 
+	 * @param status
+	 */
+	void update(JobStatus status) {
+		this.status = status;
+		try {
+			this.counters = running.getCounters();
+			this.completed = running.isComplete();
+			this.successful = running.isSuccessful();
+			this.mapProgress = running.mapProgress();
+			this.reduceProgress = running.reduceProgress();
+			// running.getTaskCompletionEvents(fromEvent);
+
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+		}
+
+		this.completedMaps = (int) (this.totalMaps * this.mapProgress);
+		this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
+	}
+
+	/**
+	 * Print this job counters (for debugging purpose)
+	 */
+	void printCounters() {
+		System.out.printf("New Job:\n", counters);
+		for (String groupName : counters.getGroupNames()) {
+			Counters.Group group = counters.getGroup(groupName);
+			System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
+
+			for (Counters.Counter counter : group) {
+				System.out.printf("\t\t%s: %s\n", counter.getDisplayName(), counter.getCounter());
+			}
+		}
+		System.out.printf("\n");
+	}
+
+	/**
+	 * Kill this job
+	 */
+	public void kill() {
+		try {
+			this.running.killJob();
+			this.killed = true;
+
+		} catch (IOException e) {
+			e.printStackTrace();
+		}
+	}
+
+	/**
+	 * Print this job status (for debugging purpose)
+	 */
+	public void display() {
+		System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
+		System.out.printf("Configuration file: %s\n", getJobID());
+		System.out.printf("Tracking URL: %s\n", getTrackingURL());
+
+		System.out.printf("Completion: map: %f reduce %f\n", 100.0 * this.mapProgress, 100.0 * this.reduceProgress);
+
+		System.out.println("Job total maps = " + totalMaps);
+		System.out.println("Job completed maps = " + completedMaps);
+		System.out.println("Map percentage complete = " + mapProgress);
+		System.out.println("Job total reduces = " + totalReduces);
+		System.out.println("Job completed reduces = " + completedReduces);
+		System.out.println("Reduce percentage complete = " + reduceProgress);
+		System.out.flush();
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
index 9424a45..f0d01f8 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
@@ -111,7 +111,7 @@ public class HDFSLightweightLabelDecorator implements ILightweightLabelDecorator
 							String userId = server.getUserId();
 							if (userId == null) {
 								try {
-									userId = hdfsManager.getClient(serverUrl).getDefaultUserAndGroupIds().get(0);
+									userId = hdfsManager.getClient(serverUrl,server.getVersion()).getDefaultUserAndGroupIds().get(0);
 								} catch (Throwable e) {
 									userId = null;
 								}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
index f5eca4d..f0a68af 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
@@ -26,7 +26,9 @@ import java.util.StringTokenizer;
 
 import org.apache.hdt.core.hdfs.HDFSClient;
 import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.core.launch.ConfProp;
 import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
 import org.apache.log4j.Logger;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.jface.wizard.WizardPage;
@@ -41,8 +43,10 @@ import org.eclipse.swt.widgets.Button;
 import org.eclipse.swt.widgets.Combo;
 import org.eclipse.swt.widgets.Composite;
 import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Event;
 import org.eclipse.swt.widgets.Group;
 import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Listener;
 import org.eclipse.swt.widgets.Text;
 
 public class NewHDFSServerWizardPage extends WizardPage {
@@ -55,6 +59,8 @@ public class NewHDFSServerWizardPage extends WizardPage {
 	private String hdfsServerName = null;
 	private boolean overrideDefaultSecurity = false;
 	private String userId = null;
+	private Combo hdfsVersionOptions;
+	private String hdfsVersion;;
 	private List<String> groupIds = new ArrayList<String>();
 
 	protected NewHDFSServerWizardPage() {
@@ -115,6 +121,30 @@ public class NewHDFSServerWizardPage extends WizardPage {
 		Label exampleLabel = new Label(c, SWT.NONE);
 		exampleLabel.setText("Example: hdfs://hdfs.server.hostname:8020");
 		exampleLabel.setForeground(Display.getCurrent().getSystemColor(SWT.COLOR_DARK_GRAY));
+		
+		/*
+		 * HDFS version
+		 */
+		{
+			Label label = new Label(c, SWT.NONE);
+			label.setText("&HDFS Version:");
+			Combo options =  new Combo (c, SWT.SINGLE | SWT.BORDER|SWT.READ_ONLY);
+			options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+			options.add (HadoopLocationWizard.HADOOP_1);
+			options.add (HadoopLocationWizard.HADOOP_2);
+			options.addListener (SWT.Selection, new Listener () {
+
+				@Override
+				public void handleEvent(Event arg0) {
+					hdfsVersion = hdfsVersionOptions.getText();
+				}
+				
+			});
+			options.select(0);
+			hdfsVersion=options.getItem(0);
+			hdfsVersionOptions = options;
+		}
+		
 		// Security
 		Group securityGroup = new Group(c, SWT.SHADOW_ETCHED_IN);
 		GridData gd = new GridData(GridData.FILL_HORIZONTAL);
@@ -191,7 +221,7 @@ public class NewHDFSServerWizardPage extends WizardPage {
 	private List<String> getUserAndGroupIds() {
 		List<String> list = new ArrayList<String>();
 		try {
-			HDFSClient client = HDFSManager.INSTANCE.getClient(hdfsServerLocation);
+			HDFSClient client = HDFSManager.INSTANCE.getClient(hdfsServerLocation,ConfProp.PI_HADOOP_VERSION.defVal);
 			List<String> defaultUserAndGroupIds = client.getDefaultUserAndGroupIds();
 			if (defaultUserAndGroupIds != null)
 				list.addAll(defaultUserAndGroupIds);
@@ -239,4 +269,8 @@ public class NewHDFSServerWizardPage extends WizardPage {
 	public List<String> getGroupIds() {
 		return groupIds;
 	}
+
+	public String getHDFSVersion() {
+		return hdfsVersion;
+	}
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
index 9b0706c..e66c9c4 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
@@ -81,7 +81,8 @@ public class NewHDFSWizard extends Wizard implements INewWizard,IExecutableExten
 					protected org.eclipse.core.runtime.IStatus run(org.eclipse.core.runtime.IProgressMonitor monitor) {
 						return HDFSManager.addServer(serverLocationWizardPage.getHdfsServerName(),serverLocationWizardPage.getHdfsServerLocation(),
 								serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getUserId() : null,
-								serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getGroupIds() : null);
+								serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getGroupIds() : null,
+										serverLocationWizardPage.getHDFSVersion());
 					};
 				};
 				j.schedule();

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
index 3757c05..7f3cbfb 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
@@ -36,6 +36,7 @@ import org.eclipse.core.resources.IWorkspaceRoot;
 import org.eclipse.core.resources.ResourcesPlugin;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.jface.dialogs.IMessageProvider;
+import org.eclipse.jface.dialogs.MessageDialog;
 import org.eclipse.jface.wizard.WizardPage;
 import org.eclipse.swt.SWT;
 import org.eclipse.swt.custom.ScrolledComposite;
@@ -47,6 +48,7 @@ import org.eclipse.swt.graphics.Image;
 import org.eclipse.swt.layout.GridData;
 import org.eclipse.swt.layout.GridLayout;
 import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Combo;
 import org.eclipse.swt.widgets.Composite;
 import org.eclipse.swt.widgets.Control;
 import org.eclipse.swt.widgets.Display;
@@ -68,6 +70,8 @@ import org.eclipse.swt.widgets.Text;
 
 public class HadoopLocationWizard extends WizardPage {
 
+	public  static final String HADOOP_1 = "1.1";
+	public  static final String HADOOP_2 = "2.2";
 	Image circle;
 
 	/**
@@ -90,7 +94,7 @@ public class HadoopLocationWizard extends WizardPage {
 
 		this.original = null;
 		try {
-			this.location = AbstractHadoopCluster.createCluster();
+			this.location = AbstractHadoopCluster.createCluster(ConfProp.PI_HADOOP_VERSION.defVal);
 		} catch (CoreException e) {
 			e.printStackTrace();
 		}
@@ -125,8 +129,8 @@ public class HadoopLocationWizard extends WizardPage {
 				Display.getDefault().syncExec(new Runnable() {
 					public void run() {
 						HDFSManager.addServer(location.getLocationName(),
-								location.getConfProp(ConfProp.FS_DEFAULT_URI), location
-								.getConfProp(ConfProp.PI_USER_NAME), null);
+								location.getConfPropValue(ConfProp.FS_DEFAULT_URI), location
+								.getConfPropValue(ConfProp.PI_USER_NAME), null,location.getVersion());
 					}
 				});
 				// New location
@@ -141,9 +145,9 @@ public class HadoopLocationWizard extends WizardPage {
 				
 				// Update location
 				final String originalName = this.original.getLocationName();
-				final String originalLoc = this.original.getConfProp(ConfProp.FS_DEFAULT_URI);
+				final String originalLoc = this.original.getConfPropValue(ConfProp.FS_DEFAULT_URI);
 				final String newName = this.location.getLocationName();
-				final String newLoc = this.location.getConfProp(ConfProp.FS_DEFAULT_URI);
+				final String newLoc = this.location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
 				
 				if (!originalName.equals(newName) || !originalLoc.equals(newLoc)){
 					IWorkspaceRoot root = ResourcesPlugin.getWorkspace().getRoot();
@@ -160,8 +164,8 @@ public class HadoopLocationWizard extends WizardPage {
 								}
 							}
 							HDFSManager.addServer(location.getLocationName(),
-									location.getConfProp(ConfProp.FS_DEFAULT_URI), location
-									.getConfProp(ConfProp.PI_USER_NAME), null);
+									location.getConfPropValue(ConfProp.FS_DEFAULT_URI), location
+									.getConfPropValue(ConfProp.PI_USER_NAME), null,location.getVersion());
 						}
 					});
 				}
@@ -204,7 +208,7 @@ public class HadoopLocationWizard extends WizardPage {
 	public boolean isPageComplete() {
 
 		{
-			String locName = location.getConfProp(ConfProp.PI_LOCATION_NAME);
+			String locName = location.getConfPropValue(ConfProp.PI_LOCATION_NAME);
 			if ((locName == null) || (locName.length() == 0) || locName.contains("/")) {
 
 				setMessage("Bad location name: " + "the location name should not contain " + "any character prohibited in a file name.", WARNING);
@@ -214,7 +218,7 @@ public class HadoopLocationWizard extends WizardPage {
 		}
 
 		{
-			String master = location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+			String master = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
 			if ((master == null) || (master.length() == 0)) {
 
 				setMessage("Bad master host name: " + "the master host name refers to the machine " + "that runs the Job tracker.", WARNING);
@@ -224,7 +228,7 @@ public class HadoopLocationWizard extends WizardPage {
 		}
 
 		{
-			String jobTracker = location.getConfProp(ConfProp.JOB_TRACKER_URI);
+			String jobTracker = location.getConfPropValue(ConfProp.JOB_TRACKER_URI);
 			String[] strs = jobTracker.split(":");
 			boolean ok = (strs.length == 2);
 			if (ok) {
@@ -236,14 +240,14 @@ public class HadoopLocationWizard extends WizardPage {
 				}
 			}
 			if (!ok) {
-				setMessage("The job tracker information (" + ConfProp.JOB_TRACKER_URI.name + ") is invalid. " + "This usually looks like \"host:port\"",
+				setMessage("The job tracker information is invalid. " + "This usually looks like \"host:port\"",
 						WARNING);
 				return false;
 			}
 		}
 
 		{
-			String fsDefaultURI = location.getConfProp(ConfProp.FS_DEFAULT_URI);
+			String fsDefaultURI = location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
 			try {
 				URI uri = new URI(fsDefaultURI);
 			} catch (URISyntaxException e) {
@@ -301,6 +305,7 @@ public class HadoopLocationWizard extends WizardPage {
 
 	private interface TabListener {
 		void notifyChange(ConfProp prop, String propValue);
+		void reloadData();
 	}
 
 	/*
@@ -320,21 +325,6 @@ public class HadoopLocationWizard extends WizardPage {
 		}
 
 		/**
-		 * Access to current configuration settings
-		 * 
-		 * @param propName
-		 *            the property name
-		 * @return the current property value
-		 */
-		String get(String propName) {
-			return location.getConfProp(propName);
-		}
-
-		String get(ConfProp prop) {
-			return location.getConfProp(prop);
-		}
-
-		/**
 		 * Implements change notifications from any tab: update the location
 		 * state and other tabs
 		 * 
@@ -347,11 +337,11 @@ public class HadoopLocationWizard extends WizardPage {
 		 */
 		void notifyChange(TabListener source, final ConfProp prop, final String propValue) {
 			// Ignore notification when no change
-			String oldValue = location.getConfProp(prop);
+			String oldValue = location.getConfPropValue(prop);
 			if ((oldValue != null) && oldValue.equals(propValue))
 				return;
 
-			location.setConfProp(prop, propValue);
+			location.setConfPropValue(prop, propValue);
 			Display.getDefault().syncExec(new Runnable() {
 				public void run() {
 					getContainer().updateButtons();
@@ -363,17 +353,17 @@ public class HadoopLocationWizard extends WizardPage {
 			/*
 			 * Now we deal with dependencies between settings
 			 */
-			final String jobTrackerHost = location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
-			final String jobTrackerPort = location.getConfProp(ConfProp.PI_JOB_TRACKER_PORT);
-			final String nameNodeHost = location.getConfProp(ConfProp.PI_NAME_NODE_HOST);
-			final String nameNodePort = location.getConfProp(ConfProp.PI_NAME_NODE_PORT);
-			final boolean colocate = location.getConfProp(ConfProp.PI_COLOCATE_MASTERS).equalsIgnoreCase("yes");
-			final String jobTrackerURI = location.getConfProp(ConfProp.JOB_TRACKER_URI);
-			final String fsDefaultURI = location.getConfProp(ConfProp.FS_DEFAULT_URI);
-			final String socksServerURI = location.getConfProp(ConfProp.SOCKS_SERVER);
-			final boolean socksProxyEnable = location.getConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE).equalsIgnoreCase("yes");
-			final String socksProxyHost = location.getConfProp(ConfProp.PI_SOCKS_PROXY_HOST);
-			final String socksProxyPort = location.getConfProp(ConfProp.PI_SOCKS_PROXY_PORT);
+			final String jobTrackerHost = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
+			final String jobTrackerPort = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_PORT);
+			final String nameNodeHost = location.getConfPropValue(ConfProp.PI_NAME_NODE_HOST);
+			final String nameNodePort = location.getConfPropValue(ConfProp.PI_NAME_NODE_PORT);
+			final boolean colocate = location.getConfPropValue(ConfProp.PI_COLOCATE_MASTERS).equalsIgnoreCase("yes");
+			final String jobTrackerURI = location.getConfPropValue(ConfProp.JOB_TRACKER_URI) ;
+			final String fsDefaultURI = location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
+			final String socksServerURI = location.getConfPropValue(ConfProp.SOCKS_SERVER);
+			final boolean socksProxyEnable = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE).equalsIgnoreCase("yes");
+			final String socksProxyHost = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST);
+			final String socksProxyPort = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT);
 
 			Display.getDefault().syncExec(new Runnable() {
 				public void run() {
@@ -456,7 +446,7 @@ public class HadoopLocationWizard extends WizardPage {
 							notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.StandardSocketFactory");
 						}
 						break;
-					}
+					}					
 					}
 				}
 			});
@@ -473,12 +463,11 @@ public class HadoopLocationWizard extends WizardPage {
 		 * @param propValue
 		 */
 		void notifyChange(TabListener source, String propName, String propValue) {
-
-			ConfProp prop = ConfProp.getByName(propName);
+			ConfProp prop = location.getConfPropForName(propName);
 			if (prop != null)
 				notifyChange(source, prop, propValue);
-
-			location.setConfProp(propName, propValue);
+			else
+				location.setConfPropValue(propName, propValue);
 		}
 
 		/**
@@ -510,12 +499,11 @@ public class HadoopLocationWizard extends WizardPage {
 	 * @return
 	 */
 	private Text createConfText(ModifyListener listener, Composite parent, ConfProp prop) {
-
 		Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
 		GridData data = new GridData(GridData.FILL_HORIZONTAL);
 		text.setLayoutData(data);
-		text.setData("hProp", prop);
-		text.setText(location.getConfProp(prop));
+		text.setData("hProp",prop);
+		text.setText(location.getConfPropValue(prop));
 		text.addModifyListener(listener);
 
 		return text;
@@ -531,13 +519,11 @@ public class HadoopLocationWizard extends WizardPage {
 	 * @return
 	 */
 	private Button createConfCheckButton(SelectionListener listener, Composite parent, ConfProp prop, String text) {
-
 		Button button = new Button(parent, SWT.CHECK);
 		button.setText(text);
 		button.setData("hProp", prop);
-		button.setSelection(location.getConfProp(prop).equalsIgnoreCase("yes"));
+		button.setSelection(location.getConfPropValue(prop).equalsIgnoreCase("yes"));
 		button.addSelectionListener(listener);
-
 		return button;
 	}
 
@@ -557,12 +543,10 @@ public class HadoopLocationWizard extends WizardPage {
 	 * @return a SWT Text field
 	 */
 	private Text createConfLabelText(ModifyListener listener, Composite parent, ConfProp prop, String labelText) {
-
 		Label label = new Label(parent, SWT.NONE);
 		if (labelText == null)
-			labelText = prop.name;
+			labelText = location.getConfPropName(prop);
 		label.setText(labelText);
-
 		return createConfText(listener, parent, prop);
 	}
 
@@ -583,7 +567,7 @@ public class HadoopLocationWizard extends WizardPage {
 	private Text createConfNameEditor(ModifyListener listener, Composite parent, String propName, String labelText) {
 
 		{
-			ConfProp prop = ConfProp.getByName(propName);
+			ConfProp prop = location.getConfPropForName(propName);
 			if (prop != null)
 				return createConfLabelText(listener, parent, prop, labelText);
 		}
@@ -597,7 +581,7 @@ public class HadoopLocationWizard extends WizardPage {
 		GridData data = new GridData(GridData.FILL_HORIZONTAL);
 		text.setLayoutData(data);
 		text.setData("hPropName", propName);
-		text.setText(location.getConfProp(propName));
+		text.setText(location.getConfPropValue(propName));
 		text.addModifyListener(listener);
 
 		return text;
@@ -610,11 +594,19 @@ public class HadoopLocationWizard extends WizardPage {
 	 */
 	private class TabMain implements TabListener, ModifyListener, SelectionListener {
 
+		/**
+		 * 
+		 */
+		
+
 		TabMediator mediator;
 
 		Text locationName;
+		
+		Combo hadoopVersion;
 
 		Text textJTHost;
+		
 
 		Text textNNHost;
 
@@ -632,6 +624,8 @@ public class HadoopLocationWizard extends WizardPage {
 
 		Text socksProxyPort;
 
+		private Group groupMR;
+
 		TabMain(TabMediator mediator) {
 			this.mediator = mediator;
 			TabItem tab = new TabItem(mediator.folder, SWT.NONE);
@@ -661,12 +655,56 @@ public class HadoopLocationWizard extends WizardPage {
 
 				locationName = createConfLabelText(this, subpanel, ConfProp.PI_LOCATION_NAME, "&Location name:");
 			}
+			/*
+			 * Hadoop version
+			 */
+			{
+				Composite subpanel = new Composite(panel, SWT.FILL);
+				subpanel.setLayout(new GridLayout(2, false));
+				data = new GridData();
+				data.horizontalSpan = 2;
+				data.horizontalAlignment = SWT.FILL;
+				subpanel.setLayoutData(data);
+				
+				Label label = new Label(subpanel, SWT.NONE);
+				label.setText("&Hadoop Version:");
+				Combo options =  new Combo (subpanel, SWT.BORDER | SWT.READ_ONLY);
+				options.add (HADOOP_1);
+				options.add (HADOOP_2);
+				options.select(0);
+				options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+				options.addListener (SWT.Selection, new Listener () {
+					@Override
+					public void handleEvent(Event event) {
+						final String selection = hadoopVersion.getText();
+						if (location == null || !selection.equals(location.getVersion())) {
+							Display.getDefault().syncExec(new Runnable() {
+
+								@Override
+								public void run() {
+									try {
+										location = AbstractHadoopCluster.createCluster(selection);
+										for (TabListener tab : mediator.tabs) {
+											tab.reloadData();
+										}
+									} catch (CoreException e) {
+										MessageDialog.openError(Display.getDefault().getActiveShell(), "HDFS Error", "Unable to create HDFS site :"
+												+ e.getMessage());
+									}
+								}
+							});
+						}
 
+					}
+				});
+				hadoopVersion = options;
+			}
+			
 			/*
 			 * Map/Reduce group
 			 */
 			{
-				Group groupMR = new Group(panel, SWT.SHADOW_NONE);
+				groupMR = new Group(panel, SWT.SHADOW_NONE);
 				groupMR.setText("Map/Reduce Master");
 				groupMR.setToolTipText("Address of the Map/Reduce master node " + "(the Job Tracker).");
 				GridLayout layout = new GridLayout(2, false);
@@ -783,7 +821,7 @@ public class HadoopLocationWizard extends WizardPage {
 			// Update the state of all widgets according to the current values!
 			reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
 			reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
-			reloadConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+			reloadConfProp(ConfProp.PI_HADOOP_VERSION);
 
 			return panel;
 		}
@@ -794,7 +832,28 @@ public class HadoopLocationWizard extends WizardPage {
 		 * @param prop
 		 */
 		private void reloadConfProp(ConfProp prop) {
-			this.notifyChange(prop, location.getConfProp(prop));
+			this.notifyChange(prop, location.getConfPropValue(prop));
+		}
+		
+		@Override
+		public void reloadData() {
+			if (HADOOP_2.equals(hadoopVersion.getText())) {
+				groupMR.setText("Resource Manager Master");
+				groupMR.setToolTipText("Address of the Resouce manager node ");
+			} else {
+				groupMR.setText("Map/Reduce Master");
+				groupMR.setToolTipText("Address of the Map/Reduce master node " + "(the Job Tracker).");
+			}
+			groupMR.layout(true);
+			notifyChange(ConfProp.PI_JOB_TRACKER_HOST,location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST));
+			notifyChange(ConfProp.PI_JOB_TRACKER_PORT,location.getConfPropValue(ConfProp.PI_JOB_TRACKER_PORT));
+			notifyChange(ConfProp.PI_USER_NAME,location.getConfPropValue(ConfProp.PI_USER_NAME));
+			notifyChange(ConfProp.PI_NAME_NODE_HOST,location.getConfPropValue(ConfProp.PI_NAME_NODE_HOST));
+			notifyChange(ConfProp.PI_USER_NAME,location.getConfPropValue(ConfProp.PI_USER_NAME));
+			notifyChange(ConfProp.PI_COLOCATE_MASTERS,location.getConfPropValue(ConfProp.PI_COLOCATE_MASTERS));
+			notifyChange(ConfProp.PI_SOCKS_PROXY_ENABLE,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE));
+			notifyChange(ConfProp.PI_SOCKS_PROXY_HOST,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST));
+			notifyChange(ConfProp.PI_SOCKS_PROXY_PORT,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT));
 		}
 
 		public void notifyChange(ConfProp prop, String propValue) {
@@ -851,10 +910,11 @@ public class HadoopLocationWizard extends WizardPage {
 			case PI_SOCKS_PROXY_PORT: {
 				socksProxyPort.setText(propValue);
 				break;
-			}
+			}			
 			}
 		}
 
+		
 		/* @inheritDoc */
 		public void modifyText(ModifyEvent e) {
 			final Text text = (Text) e.widget;
@@ -888,9 +948,7 @@ public class HadoopLocationWizard extends WizardPage {
 
 	private class TabAdvanced implements TabListener, ModifyListener {
 		TabMediator mediator;
-
 		private Composite panel;
-
 		private Map<String, Text> textMap = new TreeMap<String, Text>();
 
 		TabAdvanced(TabMediator mediator) {
@@ -905,15 +963,29 @@ public class HadoopLocationWizard extends WizardPage {
 
 		private Control createControl(Composite parent) {
 			ScrolledComposite sc = new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL);
-
-			panel = new Composite(sc, SWT.NONE);
+			panel=buildPanel(sc);
 			sc.setContent(panel);
-
 			sc.setExpandHorizontal(true);
 			sc.setExpandVertical(true);
-
 			sc.setMinSize(640, 480);
+			sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+			return sc;
+		}
+		
+		@Override
+		public void reloadData() {
+			ScrolledComposite parent = (ScrolledComposite)panel.getParent();
+			panel.dispose();
+			Composite panel = buildPanel(parent);
+			parent.setContent(panel);
+			parent.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+			parent.pack();
+			parent.layout(true);
+			this.panel=panel;
+		}
 
+		private Composite buildPanel(Composite parent) {
+			Composite panel = new Composite(parent, SWT.NONE);
 			GridLayout layout = new GridLayout();
 			layout.numColumns = 2;
 			layout.makeColumnsEqualWidth = false;
@@ -932,14 +1004,12 @@ public class HadoopLocationWizard extends WizardPage {
 				Text text = createConfNameEditor(this, panel, entry.getKey(), null);
 				textMap.put(entry.getKey(), text);
 			}
-
-			sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
-
-			return sc;
+			return panel;
 		}
+		
 
 		public void notifyChange(ConfProp prop, final String propValue) {
-			Text text = textMap.get(prop.name);
+			Text text = textMap.get(location.getConfPropName(prop));
 			text.setText(propValue);
 		}
 
@@ -959,6 +1029,8 @@ public class HadoopLocationWizard extends WizardPage {
 				}
 			});
 		}
+
+	
 	}
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.updateSite/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.updateSite/.classpath b/org.apache.hdt.updateSite/.classpath
index 4c2b7c4..36851f4 100644
--- a/org.apache.hdt.updateSite/.classpath
+++ b/org.apache.hdt.updateSite/.classpath
@@ -1,9 +1,10 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <classpath>
-  <classpathentry kind="src" path="target/maven-shared-archive-resources" excluding="**/*.java"/>
-  <classpathentry kind="output" path="target/classes"/>
-  <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
-  <classpathentry kind="src" path="/org.apache.hdt.core"/>
-  <classpathentry kind="src" path="/org.apache.hdt.hadoop.release"/>
-  <classpathentry kind="src" path="/org.apache.hdt.ui"/>
-</classpath>
\ No newline at end of file
+	<classpathentry excluding="**/*.java" kind="src" path="target/maven-shared-archive-resources"/>
+	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+	<classpathentry kind="src" path="/org.apache.hdt.core"/>
+	<classpathentry kind="src" path="/org.apache.hdt.hadoop.release"/>
+	<classpathentry kind="src" path="/org.apache.hdt.ui"/>
+	<classpathentry combineaccessrules="false" kind="src" path="/org.apache.hdt.hadoop2.release"/>
+	<classpathentry kind="output" path="target/classes"/>
+</classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.updateSite/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.updateSite/.project b/org.apache.hdt.updateSite/.project
index 99c4771..b94eb36 100644
--- a/org.apache.hdt.updateSite/.project
+++ b/org.apache.hdt.updateSite/.project
@@ -6,6 +6,7 @@
 		<project>org.apache.hdt.core</project>
 		<project>org.apache.hdt.feature</project>
 		<project>org.apache.hdt.hadoop.release</project>
+		<project>org.apache.hdt.hadoop2.release</project>
 		<project>org.apache.hdt.ui</project>
 	</projects>
 	<buildSpec>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 19b9ec5..4005645 100644
--- a/pom.xml
+++ b/pom.xml
@@ -127,6 +127,7 @@ under the License.
     <module>org.apache.hdt.core</module>
     <module>org.apache.hdt.ui</module>
     <module>org.apache.hdt.hadoop.release</module>
+    <module>org.apache.hdt.hadoop2.release</module>
     <module>org.apache.hdt.feature</module>
     <module>org.apache.hdt.updateSite</module>
     <module>org.apache.hdt.ui.test</module>


Mime
View raw message