hdt-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rsha...@apache.org
Subject [7/8] HDT-32: Merge the code base of Hadoop-Eclipse project into HDT. Contributed by Srimanth Gunturi
Date Thu, 25 Jul 2013 04:29:23 GMT
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
new file mode 100644
index 0000000..b84221c
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
@@ -0,0 +1,594 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.hdfs.HDFSClient;
+import org.apache.hdt.core.hdfs.ResourceInformation;
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.filesystem.IFileInfo;
+import org.eclipse.core.filesystem.IFileStore;
+import org.eclipse.core.filesystem.provider.FileInfo;
+import org.eclipse.core.filesystem.provider.FileStore;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.URIUtil;
+
+/**
+ * Represents a file or folder in the Hadoop Distributed File System. This
+ * {@link IFileStore} knows about the remote HDFS resource, and the local
+ * resource. Based on this, it is able to tell a lot about each file and its
+ * sync status.
+ * 
+ * @author Srimanth Gunturi
+ */
+public class HDFSFileStore extends FileStore {
+
+	private static final Logger logger = Logger.getLogger(HDFSFileStore.class);
+	private final HDFSURI uri;
+	private File localFile = null;
+	private FileInfo serverFileInfo = null;
+	private FileInfo localFileInfo = null;
+	private ResourceInformation serverResourceInfo = null;
+	private HDFSServer hdfsServer;
+	private ResourceInformation.Permissions effectivePermissions = null;
+	private List<String> systemDefaultUserIdAndGroupIds = null;
+
+	public HDFSFileStore(HDFSURI uri) {
+		this.uri = uri;
+	}
+
+	protected HDFSServer getServer() {
+		if (hdfsServer == null) {
+			hdfsServer = HDFSManager.INSTANCE.getServer(this.uri.getURI().toString());
+		}
+		return hdfsServer;
+	}
+
+	@Override
+	public String[] childNames(int options, IProgressMonitor monitor) throws CoreException {
+		List<String> childNamesList = new ArrayList<String>();
+		if (getServer() != null) {
+			try {
+				List<ResourceInformation> listResources = getClient().listResources(uri.getURI(), getServer().getUserId());
+				for (ResourceInformation lr : listResources) {
+					if (lr != null)
+						childNamesList.add(lr.getName());
+				}
+			} catch (IOException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			} catch (InterruptedException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			}
+			if (isLocalFile()) {
+				// If there is a local folder also, then local children belong
+				// to
+				// the server also.
+				File local = getLocalFile();
+				if (local.isDirectory()) {
+					childNamesList.addAll(Arrays.asList(local.list()));
+				}
+			}
+		}
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: childNames():" + childNamesList);
+		return childNamesList.toArray(new String[childNamesList.size()]);
+	}
+
+	/**
+	 * @return
+	 * @throws CoreException
+	 */
+	private HDFSClient getClient() throws CoreException {
+		return HDFSManager.INSTANCE.getClient(getServer().getUri());
+	}
+
+	/**
+	 * The file information for this resource contains two parts: server file
+	 * information and local file information. Either one, or both file
+	 * informations are possible:
+	 * <ul>
+	 * <li>Server only</li>
+	 * <li>Local only</li>
+	 * <li>Server and local
+	 * <li>
+	 * </ul>
+	 * 
+	 * This method will attempt to determine both server and client file
+	 * informations depending on which is not available. Stale information can
+	 * be cleared by call {@link #clearServerFileInfo()} and
+	 * {@link #clearLocalFileInfo()}.
+	 * 
+	 */
+	@Override
+	public IFileInfo fetchInfo(int options, IProgressMonitor monitor) throws CoreException {
+		if (serverFileInfo == null) {
+			serverResourceInfo = null;
+			this.effectivePermissions = null;
+			FileInfo fi = new FileInfo(getName());
+			HDFSServer server = getServer();
+			if (server != null) {
+				try {
+					if (".project".equals(getName())) {
+						fi.setExists(getLocalFile().exists());
+						fi.setLength(getLocalFile().length());
+					} else {
+						ResourceInformation fileInformation = getClient().getResourceInformation(uri.getURI(), server.getUserId());
+						if (fileInformation != null) {
+							serverResourceInfo = fileInformation;
+							fi.setDirectory(fileInformation.isFolder());
+							fi.setExists(true);
+							fi.setLastModified(fileInformation.getLastModifiedTime());
+							fi.setLength(fileInformation.getSize());
+							fi.setName(fileInformation.getName());
+							String userId = server.getUserId();
+							List<String> groupIds = server.getGroupIds();
+							if (userId == null) {
+								userId = getDefaultUserId();
+								groupIds = getDefaultGroupIds();
+							}
+							fileInformation.updateEffectivePermissions(userId, groupIds);
+							this.effectivePermissions = fileInformation.getEffectivePermissions();
+							fi.setAttribute(EFS.ATTRIBUTE_OWNER_READ, fileInformation.getUserPermissions().read);
+							fi.setAttribute(EFS.ATTRIBUTE_OWNER_WRITE, fileInformation.getUserPermissions().write);
+							fi.setAttribute(EFS.ATTRIBUTE_OWNER_EXECUTE, fileInformation.getUserPermissions().execute);
+							fi.setAttribute(EFS.ATTRIBUTE_GROUP_READ, fileInformation.getGroupPermissions().read);
+							fi.setAttribute(EFS.ATTRIBUTE_GROUP_WRITE, fileInformation.getGroupPermissions().write);
+							fi.setAttribute(EFS.ATTRIBUTE_GROUP_EXECUTE, fileInformation.getGroupPermissions().execute);
+							fi.setAttribute(EFS.ATTRIBUTE_OTHER_READ, fileInformation.getOtherPermissions().read);
+							fi.setAttribute(EFS.ATTRIBUTE_OTHER_WRITE, fileInformation.getOtherPermissions().write);
+							fi.setAttribute(EFS.ATTRIBUTE_OTHER_EXECUTE, fileInformation.getOtherPermissions().execute);
+						}
+					}
+				} catch (IOException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+				} catch (InterruptedException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+				} finally {
+				}
+			} else {
+				// No server definition
+				fi.setExists(false);
+			}
+			serverFileInfo = fi;
+		}
+		if (localFileInfo == null) {
+			if (isLocalFile()) {
+				File file = getLocalFile();
+				localFileInfo = new FileInfo(file.getName());
+				if (file.exists()) {
+					localFileInfo.setExists(true);
+					localFileInfo.setLastModified(file.lastModified());
+					localFileInfo.setLength(file.length());
+					localFileInfo.setDirectory(file.isDirectory());
+					localFileInfo.setAttribute(EFS.ATTRIBUTE_READ_ONLY, file.exists() && !file.canWrite());
+					localFileInfo.setAttribute(EFS.ATTRIBUTE_HIDDEN, file.isHidden());
+				} else
+					localFileInfo.setExists(false);
+			}
+		}
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: fetchInfo(): " + HDFSUtilites.getDebugMessage(serverFileInfo));
+		if (localFileInfo != null)
+			return localFileInfo;
+		return serverFileInfo;
+	}
+
+	protected String getDefaultUserId() {
+		if (systemDefaultUserIdAndGroupIds == null) {
+			try {
+				this.systemDefaultUserIdAndGroupIds = getClient().getDefaultUserAndGroupIds();
+			} catch (IOException e) {
+				logger.debug(e.getMessage(), e);
+			} catch (CoreException e) {
+				logger.debug(e.getMessage(), e);
+			} catch (InterruptedException e) {
+				logger.debug(e.getMessage(), e);
+			}
+		}
+		if (this.systemDefaultUserIdAndGroupIds != null && this.systemDefaultUserIdAndGroupIds.size() > 0)
+			return this.systemDefaultUserIdAndGroupIds.get(0);
+		return null;
+	}
+
+	protected List<String> getDefaultGroupIds() {
+		if (systemDefaultUserIdAndGroupIds == null) {
+			try {
+				this.systemDefaultUserIdAndGroupIds = getClient().getDefaultUserAndGroupIds();
+			} catch (IOException e) {
+				logger.debug(e.getMessage(), e);
+			} catch (CoreException e) {
+				logger.debug(e.getMessage(), e);
+			} catch (InterruptedException e) {
+				logger.debug(e.getMessage(), e);
+			}
+		}
+		if (this.systemDefaultUserIdAndGroupIds != null && this.systemDefaultUserIdAndGroupIds.size() > 1)
+			return this.systemDefaultUserIdAndGroupIds.subList(1, this.systemDefaultUserIdAndGroupIds.size() - 1);
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.filesystem.provider.FileStore#putInfo(org.eclipse.core
+	 * .filesystem.IFileInfo, int, org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public void putInfo(IFileInfo info, int options, IProgressMonitor monitor) throws CoreException {
+		try {
+			if (isLocalFile()) {
+				File file = getLocalFile();
+				if ((options & EFS.SET_LAST_MODIFIED) != 0)
+					file.setLastModified(info.getLastModified());
+				if ((options & EFS.SET_ATTRIBUTES) != 0) {
+					file.setReadable(info.getAttribute(EFS.ATTRIBUTE_OWNER_READ), true);
+					file.setWritable(info.getAttribute(EFS.ATTRIBUTE_OWNER_WRITE), true);
+					file.setExecutable(info.getAttribute(EFS.ATTRIBUTE_OWNER_EXECUTE), true);
+				}
+			} else {
+				ResourceInformation ri = new ResourceInformation();
+				ri.setFolder(info.isDirectory());
+				if ((options & EFS.SET_LAST_MODIFIED) != 0)
+					ri.setLastModifiedTime(info.getLastModified());
+				HDFSServer server = getServer();
+				getClient().setResourceInformation(uri.getURI(), ri, server == null ? null : server.getUserId());
+			}
+		} catch (IOException e) {
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		} catch (InterruptedException e) {
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		}
+	}
+
+	/**
+	 * When this file store makes changes which obsolete the server information,
+	 * it should clear the server information.
+	 */
+	protected void clearServerFileInfo() {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: clearServerFileInfo()");
+		this.serverFileInfo = null;
+	}
+
+	/**
+	 * When this file store makes changes which obsolete the local information,
+	 * it should clear the localinformation.
+	 */
+	protected void clearLocalFileInfo() {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: clearServerFileInfo()");
+		this.localFileInfo = null;
+	}
+
+	@Override
+	public IFileStore getChild(String name) {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: getChild():" + name);
+		return new HDFSFileStore(uri.append(name));
+	}
+
+	@Override
+	public String getName() {
+		String lastSegment = uri.lastSegment();
+		if (lastSegment == null)
+			lastSegment = "/";
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: getName():" + lastSegment);
+		return lastSegment;
+	}
+
+	@Override
+	public IFileStore getParent() {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: getParent()");
+		try {
+			return new HDFSFileStore(uri.removeLastSegment());
+		} catch (URISyntaxException e) {
+			logger.log(Level.WARN, e.getMessage(), e);
+		}
+		return null;
+	}
+
+	@Override
+	public InputStream openInputStream(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: openInputStream()");
+		if (".project".equals(getName())) {
+			try {
+				final File localFile = getLocalFile();
+				if (!localFile.exists())
+					localFile.createNewFile();
+				return new FileInputStream(localFile);
+			} catch (FileNotFoundException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			} catch (IOException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			}
+		} else {
+			File lFile = getLocalFile();
+			if (lFile.exists()) {
+				try {
+					return new FileInputStream(lFile);
+				} catch (FileNotFoundException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+				}
+			} else {
+				return openRemoteInputStream(options, monitor);
+			}
+		}
+	}
+
+	public InputStream openRemoteInputStream(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: openRemoteInputStream()");
+		if (".project".equals(getName())) {
+			return null;
+		} else {
+			try {
+				HDFSServer server = getServer();
+				return getClient().openInputStream(uri.getURI(), server == null ? null : server.getUserId());
+			} catch (IOException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			} catch (InterruptedException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			}
+		}
+	}
+
+	@Override
+	public URI toURI() {
+		return uri.getURI();
+	}
+
+	/**
+	 * @return the localFile
+	 * @throws CoreException
+	 */
+	public File getLocalFile() throws CoreException {
+		if (localFile == null) {
+			final HDFSManager hdfsManager = HDFSManager.INSTANCE;
+			final String uriString = uri.getURI().toString();
+			HDFSServer server = hdfsManager.getServer(uriString);
+			if (server != null) {
+				File workspaceFolder = ResourcesPlugin.getWorkspace().getRoot().getLocation().toFile();
+				try {
+					URI relativeURI = URIUtil.makeRelative(uri.getURI(), new URI(server.getUri()));
+					String relativePath = hdfsManager.getProjectName(server) + "/" + relativeURI.toString();
+					localFile = new File(workspaceFolder, relativePath);
+				} catch (URISyntaxException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+				}
+			} else
+				logger.error("No server associated with uri: " + uriString);
+		}
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: getLocalFile():" + localFile);
+		return localFile;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.filesystem.provider.FileStore#mkdir(int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public IFileStore mkdir(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: mkdir()");
+		try {
+			clearServerFileInfo();
+			HDFSServer server = getServer();
+			if (getClient().mkdirs(uri.getURI(), server == null ? null : server.getUserId())) {
+				return this;
+			} else {
+				return null;
+			}
+		} catch (IOException e) {
+			logger.error("Unable to mkdir: " + uri);
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		} catch (InterruptedException e) {
+			logger.error("Unable to mkdir: " + uri);
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		}
+	}
+
+	/**
+	 * Determines if file exists in local workspace
+	 * 
+	 * @return
+	 */
+	public boolean isLocalFile() {
+		try {
+			File localFile = getLocalFile();
+			return localFile != null && localFile.exists();
+		} catch (CoreException e) {
+			logger.debug("Unable to determine if file is local", e);
+		}
+		return false;
+	}
+
+	/**
+	 * <code>true</code> only when it is {@link #isLocalFile()} and NOT
+	 * {@link #isRemoteFile()}
+	 * 
+	 * @return
+	 */
+	public boolean isLocalOnly() {
+		return isLocalFile() && !isRemoteFile();
+	}
+
+	/**
+	 * Determines if file exists on server side.
+	 * 
+	 * @return
+	 */
+	public boolean isRemoteFile() {
+		if (this.serverFileInfo == null)
+			this.fetchInfo();
+		return this.serverFileInfo != null && this.serverFileInfo.exists();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.filesystem.provider.FileStore#openOutputStream(int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public OutputStream openOutputStream(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: openOutputStream()");
+		if (".project".equals(getName())) {
+			try {
+				File dotProjectFile = getLocalFile();
+				if (!dotProjectFile.exists()) {
+					dotProjectFile.getParentFile().mkdirs();
+					dotProjectFile.createNewFile();
+				}
+				return new FileOutputStream(dotProjectFile);
+			} catch (FileNotFoundException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			} catch (IOException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			}
+		} else {
+			File lFile = getLocalFile();
+			if (!lFile.exists()) {
+				lFile.getParentFile().mkdirs();
+				try {
+					lFile.createNewFile();
+				} catch (IOException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Cannot create new file to save", e));
+				}
+			}
+			if (lFile.exists()) {
+				try {
+					clearLocalFileInfo();
+					return new FileOutputStream(lFile);
+				} catch (FileNotFoundException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+				}
+			} else
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Local file does not exist to write to: " + lFile.getAbsolutePath()));
+		}
+	}
+
+	public OutputStream openRemoteOutputStream(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: openRemoteOutputStream()");
+		try {
+			HDFSServer server = getServer();
+			clearServerFileInfo();
+			if (fetchInfo().exists())
+				return getClient().openOutputStream(uri.getURI(), server == null ? null : server.getUserId());
+			else
+				return getClient().createOutputStream(uri.getURI(), server == null ? null : server.getUserId());
+		} catch (IOException e) {
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		} catch (InterruptedException e) {
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.filesystem.provider.FileStore#delete(int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public void delete(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: delete()");
+		try {
+			if (isLocalFile()) {
+				clearLocalFileInfo();
+				final File lf = getLocalFile();
+				final File plf = lf.getParentFile();
+				lf.delete();
+				UploadFileJob.deleteFoldersIfEmpty(plf);
+			}
+			if (isRemoteFile()) {
+				final HDFSServer server = getServer();
+				if (server != null) {
+					if (server.getUri().equals(uri.getURI().toString())) {
+						// Server location is the same as the project - so we
+						// just
+						// disconnect instead of actually deleting the root
+						// folder
+						// on HDFS.
+					} else {
+						clearServerFileInfo();
+						getClient().delete(uri.getURI(), server == null ? null : server.getUserId());
+					}
+				} else {
+					// Not associated with any server, we just disconnect.
+				}
+			}
+		} catch (IOException e) {
+			logger.error("Unable to delete: " + uri);
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		} catch (InterruptedException e) {
+			logger.error("Unable to delete: " + uri);
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		}
+	}
+
+	/**
+	 * Effective permissions are only given when the accessing user and the
+	 * permissions from the server are known. If any data in permissions
+	 * determining process is not known, <code>null</code> is returned.
+	 * 
+	 * @return the effectivePermissions
+	 */
+	public ResourceInformation.Permissions getEffectivePermissions() {
+		if (effectivePermissions == null)
+			fetchInfo();
+		return effectivePermissions;
+	}
+
+	/**
+	 * @return the serverResourceInfo
+	 */
+	public ResourceInformation getServerResourceInfo() {
+		return serverResourceInfo;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileSystem.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileSystem.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileSystem.java
new file mode 100644
index 0000000..cc55493
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileSystem.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.net.URI;
+
+import org.eclipse.core.filesystem.IFileStore;
+import org.eclipse.core.filesystem.provider.FileSystem;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ */
+public class HDFSFileSystem extends FileSystem {
+	
+	public static final String SCHEME = "hdfs";
+
+	@Override
+	public IFileStore getStore(URI uri) {
+		if(SCHEME.equals(uri.getScheme()))
+			return new HDFSFileStore(new HDFSURI(uri));
+		return null;
+	}
+	
+	/* (non-Javadoc)
+	 * @see org.eclipse.core.filesystem.provider.FileSystem#canDelete()
+	 */
+	@Override
+	public boolean canDelete() {
+		return true;
+	}
+	
+	/* (non-Javadoc)
+	 * @see org.eclipse.core.filesystem.provider.FileSystem#canWrite()
+	 */
+	@Override
+	public boolean canWrite() {
+		return true;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
new file mode 100644
index 0000000..93f0696
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
@@ -0,0 +1,285 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.hdfs.HDFSClient;
+import org.apache.hdt.core.internal.HadoopManager;
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.HadoopFactory;
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.log4j.Logger;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IProjectDescription;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.resources.IWorkspace;
+import org.eclipse.core.resources.IWorkspaceRoot;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.core.runtime.Platform;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.emf.common.util.EList;
+import org.eclipse.team.core.RepositoryProvider;
+
+/**
+ * Manages workspace files with server files.
+ * 
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HDFSManager {
+
+	public static HDFSManager INSTANCE = new HDFSManager();
+	private static final Logger logger = Logger.getLogger(HDFSManager.class);
+
+	public static void disconnectProject(IProject project) {
+		HDFSServer server = HDFSManager.INSTANCE.getServer(project.getLocationURI().toString());
+		if (server != null && server.getStatusCode() != ServerStatus.DISCONNECTED_VALUE)
+			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+		try {
+			project.refreshLocal(IResource.DEPTH_INFINITE, new NullProgressMonitor());
+		} catch (CoreException e) {
+			logger.warn(e.getMessage(), e);
+		}
+	}
+
+	public static void reconnectProject(IProject project) {
+		HDFSServer server = HDFSManager.INSTANCE.getServer(project.getLocationURI().toString());
+		if (server != null && server.getStatusCode() == ServerStatus.DISCONNECTED_VALUE)
+			server.setStatusCode(0);
+		try {
+			project.refreshLocal(IResource.DEPTH_INFINITE, new NullProgressMonitor());
+		} catch (CoreException e) {
+			logger.warn(e.getMessage(), e);
+		}
+	}
+
+	private Map<HDFSServer, String> serverToProjectMap = new HashMap<HDFSServer, String>();
+	private Map<String, HDFSServer> projectToServerMap = new HashMap<String, HDFSServer>();
+	private final Map<String, HDFSClient> hdfsClientsMap = new HashMap<String, HDFSClient>();
+	/**
+	 * URI should always end with a '/'
+	 */
+	private Map<String, HDFSServer> uriToServerMap = new HashMap<String, HDFSServer>();
+
+	private Map<String, HDFSServer> uriToServerCacheMap = new LinkedHashMap<String, HDFSServer>() {
+		private static final long serialVersionUID = 1L;
+		private int MAX_ENTRIES = 1 << 10;
+
+		protected boolean removeEldestEntry(Map.Entry<String, HDFSServer> eldest) {
+			return size() > MAX_ENTRIES;
+		};
+	};
+
+	/**
+	 * Singleton
+	 */
+	private HDFSManager() {
+	}
+
+	public EList<HDFSServer> getHdfsServers() {
+		return HadoopManager.INSTANCE.getServers().getHdfsServers();
+	}
+
+	public void loadServers() {
+		final IWorkspaceRoot workspaceRoot = ResourcesPlugin.getWorkspace().getRoot();
+		for (HDFSServer server : getHdfsServers()) {
+			uriToServerMap.put(server.getUri(), server);
+			final IProject project = workspaceRoot.getProject(server.getName());
+			if (!project.exists()) {
+				server.setStatusCode(ServerStatus.NO_PROJECT_VALUE);
+			}
+			serverToProjectMap.put(server, server.getName());
+			projectToServerMap.put(server.getName(), server);
+		}
+		IProject[] projects = workspaceRoot.getProjects();
+		if (projects != null) {
+			for (IProject p : projects) {
+				if (p.getLocationURI() != null && HDFSFileSystem.SCHEME.equals(p.getLocationURI().getScheme())) {
+					if (!projectToServerMap.keySet().contains(p)) {
+						logger.error("HDFS project with no server associated being closed:" + p.getName());
+						try {
+							p.close(new NullProgressMonitor());
+							logger.error("HDFS project with no server associated closed:" + p.getName());
+						} catch (CoreException e) {
+							logger.error("HDFS project with no server associated cannot be closed:" + p.getName(), e);
+						}
+					}
+				}
+			}
+		}
+	}
+
+	/**
+	 * Creates and adds an HDFS server definition. This also creates a local
+	 * project which represents server file system via EFS.
+	 * 
+	 * @param hdfsURI
+	 * @return
+	 * @throws CoreException
+	 */
+	public HDFSServer createServer(String name, java.net.URI hdfsURI, String userId, List<String> groupIds) throws CoreException {
+		if (hdfsURI.getPath() == null || hdfsURI.getPath().length() < 1) {
+			try {
+				hdfsURI = new java.net.URI(hdfsURI.toString() + "/");
+			} catch (URISyntaxException e) {
+			}
+		}
+		if (ResourcesPlugin.getWorkspace().getRoot().getProject(name).exists())
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Project with name '" + name + "' already exists"));
+		HDFSServer hdfsServer = HadoopFactory.eINSTANCE.createHDFSServer();
+		hdfsServer.setName(name);
+		hdfsServer.setUri(hdfsURI.toString());
+		hdfsServer.setLoaded(true);
+		if (userId != null)
+			hdfsServer.setUserId(userId);
+		if (groupIds != null)
+			for (String groupId : groupIds)
+				hdfsServer.getGroupIds().add(groupId);
+		getHdfsServers().add(hdfsServer);
+		HadoopManager.INSTANCE.saveServers();
+		uriToServerMap.put(hdfsServer.getUri(), hdfsServer);
+		serverToProjectMap.put(hdfsServer, name);
+		projectToServerMap.put(name, hdfsServer);
+		createIProject(name, hdfsURI);
+		return hdfsServer;
+	}
+
+	/**
+	 * @param name
+	 * @param hdfsURI
+	 * @return
+	 * @throws CoreException
+	 */
+	private IProject createIProject(String name, java.net.URI hdfsURI) throws CoreException {
+		final IWorkspace workspace = ResourcesPlugin.getWorkspace();
+		IProject project = workspace.getRoot().getProject(name);
+		IProjectDescription pd = workspace.newProjectDescription(name);
+		pd.setLocationURI(hdfsURI);
+		project.create(pd, new NullProgressMonitor());
+		project.open(new NullProgressMonitor());
+		RepositoryProvider.map(project, HDFSTeamRepositoryProvider.ID);
+		return project;
+	}
+
+	public HDFSServer getServer(String uri) {
+		if (uri != null && !uriToServerCacheMap.containsKey(uri)) {
+			String tmpUri = uri;
+			HDFSServer serverU = uriToServerMap.get(tmpUri);
+			while (serverU == null) {
+				int lastSlashIndex = tmpUri.lastIndexOf('/');
+				tmpUri = lastSlashIndex < 0 ? null : tmpUri.substring(0, lastSlashIndex);
+				if (tmpUri != null)
+					serverU = uriToServerMap.get(tmpUri + "/");
+				else
+					break;
+			}
+			if (serverU != null)
+				uriToServerCacheMap.put(uri, serverU);
+		}
+		return uriToServerCacheMap.get(uri);
+	}
+
+	public String getProjectName(HDFSServer server) {
+		return serverToProjectMap.get(server);
+	}
+
+	/**
+	 * @param string
+	 */
+	public void startServerOperation(String uri) {
+		HDFSServer server = getServer(uri);
+		if (server != null && !server.getOperationURIs().contains(uri)) {
+			server.getOperationURIs().add(uri);
+		}
+	}
+
+	/**
+	 * @param string
+	 */
+	public void stopServerOperation(String uri) {
+		HDFSServer server = getServer(uri);
+		if (server != null) {
+			server.getOperationURIs().remove(uri);
+		}
+	}
+
+	public boolean isServerOperationRunning(String uri) {
+		HDFSServer server = getServer(uri);
+		if (server != null) {
+			return server.getOperationURIs().contains(uri);
+		}
+		return false;
+	}
+
+	/**
+	 * @param server
+	 */
+	public void deleteServer(HDFSServer server) {
+		getHdfsServers().remove(server);
+		String projectName = this.serverToProjectMap.remove(server);
+		this.projectToServerMap.remove(projectName);
+		this.uriToServerMap.remove(server.getUri());
+		HadoopManager.INSTANCE.saveServers();
+	}
+
+	/**
+	 * Provides the HDFSClient instance to
+	 * 
+	 * @param serverURI
+	 * @return
+	 * @throws CoreException
+	 */
+	public HDFSClient getClient(String serverURI) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("getClient(" + serverURI + "): Server=" + serverURI);
+		HDFSServer server = getServer(serverURI);
+		if (server != null && server.getStatusCode() == ServerStatus.DISCONNECTED_VALUE) {
+			if (logger.isDebugEnabled())
+				logger.debug("getClient(" + serverURI + "): Server timed out. Not returning client");
+			throw new CoreException(new Status(IStatus.WARNING, Activator.BUNDLE_ID, "Server disconnected due to timeout. Please reconnect to server."));
+		}
+		if (hdfsClientsMap.containsKey(serverURI))
+			return hdfsClientsMap.get(serverURI);
+		else {
+			try {
+				java.net.URI sUri = serverURI == null ? new java.net.URI("hdfs://server") : new java.net.URI(serverURI);
+				IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hdfsClient");
+				for (IConfigurationElement element : elementsFor) {
+					if (sUri.getScheme().equals(element.getAttribute("protocol"))) {
+						HDFSClient client = (HDFSClient) element.createExecutableExtension("class");
+						hdfsClientsMap.put(serverURI, new InterruptableHDFSClient(serverURI, client));
+					}
+				}
+			} catch (URISyntaxException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Invalid server URI", e));
+			}
+			return hdfsClientsMap.get(serverURI);
+		}
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
new file mode 100644
index 0000000..0ca0df4
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.resources.IFolder;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IProjectDescription;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.resources.team.IMoveDeleteHook;
+import org.eclipse.core.resources.team.IResourceTree;
+import org.eclipse.core.runtime.IProgressMonitor;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HDFSMoveDeleteHook implements IMoveDeleteHook {
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#deleteFile(org.eclipse
+	 * .core.resources.team.IResourceTree, org.eclipse.core.resources.IFile,
+	 * int, org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean deleteFile(IResourceTree tree, IFile file, int updateFlags, IProgressMonitor monitor) {
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#deleteFolder(org.eclipse
+	 * .core.resources.team.IResourceTree, org.eclipse.core.resources.IFolder,
+	 * int, org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean deleteFolder(IResourceTree tree, IFolder folder, int updateFlags, IProgressMonitor monitor) {
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#deleteProject(org.eclipse
+	 * .core.resources.team.IResourceTree, org.eclipse.core.resources.IProject,
+	 * int, org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean deleteProject(IResourceTree tree, IProject project, int updateFlags, IProgressMonitor monitor) {
+		if (HDFSFileSystem.SCHEME.equals(project.getLocationURI().getScheme())) {
+			// Deleting a HDFS project root folder *and* its contents is not
+			// supported.
+			// Caller has to uncheck the 'Delete project contents' checkbox.
+			if ((IResource.ALWAYS_DELETE_PROJECT_CONTENT & updateFlags) > 0) {
+				throw new RuntimeException(
+						"Deletion of HDFS project root folder is not supported. To remove project uncheck the \'Delete project contents on disk\' checkbox");
+			}
+		}
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#moveFile(org.eclipse.
+	 * core.resources.team.IResourceTree, org.eclipse.core.resources.IFile,
+	 * org.eclipse.core.resources.IFile, int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean moveFile(IResourceTree tree, IFile source, IFile destination, int updateFlags, IProgressMonitor monitor) {
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#moveFolder(org.eclipse
+	 * .core.resources.team.IResourceTree, org.eclipse.core.resources.IFolder,
+	 * org.eclipse.core.resources.IFolder, int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean moveFolder(IResourceTree tree, IFolder source, IFolder destination, int updateFlags, IProgressMonitor monitor) {
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#moveProject(org.eclipse
+	 * .core.resources.team.IResourceTree, org.eclipse.core.resources.IProject,
+	 * org.eclipse.core.resources.IProjectDescription, int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean moveProject(IResourceTree tree, IProject source, IProjectDescription description, int updateFlags, IProgressMonitor monitor) {
+		if (HDFSFileSystem.SCHEME.equals(source.getLocationURI().getScheme())) {
+			// Moving a HDFS project is not supported.
+			throw new RuntimeException("Moving a HDFS project root folder is not supported.");
+		}
+		return false;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSTeamRepositoryProvider.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSTeamRepositoryProvider.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSTeamRepositoryProvider.java
new file mode 100644
index 0000000..e09e456
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSTeamRepositoryProvider.java
@@ -0,0 +1,41 @@
+package org.apache.hdt.core.internal.hdfs;
+
+import org.eclipse.core.resources.team.IMoveDeleteHook;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.team.core.RepositoryProvider;
+
+public class HDFSTeamRepositoryProvider extends RepositoryProvider {
+
+	public static final String ID = "org.apache.hadoop.hdfs";
+	private HDFSMoveDeleteHook moveDeleteHook = new HDFSMoveDeleteHook();
+	
+	public HDFSTeamRepositoryProvider() {
+		// TODO Auto-generated constructor stub
+	}
+
+	@Override
+	public void deconfigure() throws CoreException {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public void configureProject() throws CoreException {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public String getID() {
+		return ID;
+	}
+	
+	/* (non-Javadoc)
+	 * @see org.eclipse.team.core.RepositoryProvider#getMoveDeleteHook()
+	 */
+	@Override
+	public IMoveDeleteHook getMoveDeleteHook() {
+		return moveDeleteHook;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSURI.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSURI.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSURI.java
new file mode 100644
index 0000000..e958646
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSURI.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.eclipse.core.runtime.IPath;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.core.runtime.URIUtil;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ */
+public class HDFSURI {
+	public static final String SCHEME = "hdfs";
+	private final URI uri;
+	private IPath path;
+
+	public HDFSURI(URI uri) {
+		this.uri = uri;
+		String pathString = uri.getPath();
+		path = new Path(pathString);
+	}
+
+	public HDFSURI append(String name) {
+		return new HDFSURI(URIUtil.append(uri, name));
+	}
+
+	public String lastSegment() {
+		return URIUtil.lastSegment(uri);
+	}
+
+	public HDFSURI removeLastSegment() throws URISyntaxException {
+		if (path.segmentCount() > 0) {
+			String parentPath = path.removeLastSegments(1).toString();
+			URI parentURI = new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), parentPath, uri.getQuery(), uri.getFragment());
+			return new HDFSURI(parentURI);
+		}
+		return null;
+	}
+
+	public URI getURI() {
+		return uri;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see java.lang.Object#toString()
+	 */
+	@Override
+	public String toString() {
+		return uri == null ? "null" : uri.toString();
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSUtilites.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSUtilites.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSUtilites.java
new file mode 100644
index 0000000..e2e387b
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSUtilites.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.text.DateFormat;
+import java.util.Date;
+
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.filesystem.IFileInfo;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HDFSUtilites {
+
+	public static String getDebugMessage(IFileInfo fi) {
+		if (fi != null) {
+			String lastMod = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.LONG).format(new Date(fi.getLastModified()));
+			
+			String userPerms = "user(";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OWNER_READ))
+				userPerms+="r";
+			else
+				userPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OWNER_WRITE))
+				userPerms+="w";
+			else
+				userPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OWNER_EXECUTE))
+				userPerms+="x";
+			else
+				userPerms+="-";
+			userPerms += ")";
+
+			String groupPerms = "group(";
+			if (fi.getAttribute(EFS.ATTRIBUTE_GROUP_READ))
+				groupPerms+="r";
+			else
+				groupPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_GROUP_WRITE))
+				groupPerms+="w";
+			else
+				groupPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_GROUP_EXECUTE))
+				groupPerms+="x";
+			else
+				groupPerms+="-";
+			groupPerms += ")";
+
+			String otherPerms = "other(";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OTHER_READ))
+				otherPerms+="r";
+			else
+				otherPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OTHER_WRITE))
+				otherPerms+="w";
+			else
+				otherPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OTHER_EXECUTE))
+				otherPerms+="x";
+			else
+				otherPerms+="-";
+			otherPerms += ")";
+
+			return "Exists=" + fi.exists() + ", Length=" + fi.getLength() + ", LastMod=" + lastMod + ", "+userPerms+", "+groupPerms+", "+otherPerms;
+		}
+		return "null";
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
new file mode 100644
index 0000000..0301d5f
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.hdfs.HDFSClient;
+import org.apache.hdt.core.hdfs.ResourceInformation;
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.log4j.Logger;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.ResourcesPlugin;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ * 
+ */
+public class InterruptableHDFSClient extends HDFSClient {
+	private static final int DEFAULT_TIMEOUT = 5000;
+	private static final Logger logger = Logger.getLogger(InterruptableHDFSClient.class);
+	// private static ExecutorService threadPool =
+	// Executors.newFixedThreadPool(10);
+
+	private final HDFSClient client;
+	private final int timeoutMillis = DEFAULT_TIMEOUT;
+	private final String serverURI;
+
+	/**
+	 * @param serverURI
+	 * 
+	 */
+	public InterruptableHDFSClient(String serverURI, HDFSClient client) {
+		this.serverURI = serverURI;
+		this.client = client;
+	}
+
+	private static interface CustomRunnable<V> {
+		public V run() throws IOException, InterruptedException;
+	}
+
+	protected <T> T executeWithTimeout(final CustomRunnable<T> runnable) throws IOException, InterruptedException {
+		final List<T> data = new ArrayList<T>();
+		final IOException[] ioE = new IOException[1];
+		final InterruptedException[] inE = new InterruptedException[1];
+		Thread runnerThread = new Thread(new Runnable() {
+			public void run() {
+				try {
+					data.add(runnable.run());
+				} catch (IOException e) {
+					ioE[0] = e;
+				} catch (InterruptedException e) {
+					inE[0] = e;
+				}
+			}
+		});
+		boolean interrupted = false;
+		runnerThread.start();
+		runnerThread.join(timeoutMillis);
+		if (runnerThread.isAlive()) {
+			if(logger.isDebugEnabled())
+				logger.debug("executeWithTimeout(): Interrupting server call");
+			runnerThread.interrupt();
+			interrupted = true;
+		}
+		if (ioE[0] != null)
+			throw ioE[0];
+		if (inE[0] != null)
+			throw inE[0];
+		if (interrupted) {
+			// Tell HDFS manager that the server timed out
+			if(logger.isDebugEnabled())
+				logger.debug("executeWithTimeout(): Server timed out: "+serverURI);
+			HDFSServer server = HDFSManager.INSTANCE.getServer(serverURI);
+			String projectName = HDFSManager.INSTANCE.getProjectName(server);
+			IProject project = ResourcesPlugin.getWorkspace().getRoot().getProject(projectName);
+			HDFSManager.disconnectProject(project);
+			throw new InterruptedException();
+		}
+		if (data.size() > 0)
+			return data.get(0);
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#getDefaultUserAndGroupIds()
+	 */
+	@Override
+	public List<String> getDefaultUserAndGroupIds() throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<List<String>>() {
+			@Override
+			public List<String> run() throws IOException, InterruptedException {
+				return client.getDefaultUserAndGroupIds();
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#getResourceInformation(java
+	 * .net.URI, java.lang.String)
+	 */
+	@Override
+	public ResourceInformation getResourceInformation(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<ResourceInformation>() {
+			@Override
+			public ResourceInformation run() throws IOException, InterruptedException {
+				return client.getResourceInformation(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#setResourceInformation(java
+	 * .net.URI, org.apache.hdt.core.hdfs.ResourceInformation,
+	 * java.lang.String)
+	 */
+	@Override
+	public void setResourceInformation(final URI uri, final ResourceInformation information, final String user) throws IOException, InterruptedException {
+		executeWithTimeout(new CustomRunnable<Object>() {
+			@Override
+			public Object run() throws IOException, InterruptedException {
+				client.setResourceInformation(uri, information, user);
+				return null;
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#listResources(java.net.URI,
+	 * java.lang.String)
+	 */
+	@Override
+	public List<ResourceInformation> listResources(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<List<ResourceInformation>>() {
+			@Override
+			public List<ResourceInformation> run() throws IOException, InterruptedException {
+				return client.listResources(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+	 * java.lang.String)
+	 */
+	@Override
+	public InputStream openInputStream(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<InputStream>() {
+			@Override
+			public InputStream run() throws IOException, InterruptedException {
+				return client.openInputStream(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#mkdirs(java.net.URI,
+	 * java.lang.String)
+	 */
+	@Override
+	public boolean mkdirs(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<Boolean>() {
+			@Override
+			public Boolean run() throws IOException, InterruptedException {
+				return client.mkdirs(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#openOutputStream(java.net.URI,
+	 * java.lang.String)
+	 */
+	@Override
+	public OutputStream openOutputStream(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<OutputStream>() {
+			@Override
+			public OutputStream run() throws IOException, InterruptedException {
+				return client.openOutputStream(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#createOutputStream(java.net
+	 * .URI, java.lang.String)
+	 */
+	@Override
+	public OutputStream createOutputStream(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<OutputStream>() {
+			@Override
+			public OutputStream run() throws IOException, InterruptedException {
+				return client.openOutputStream(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#delete(java.net.URI,
+	 * java.lang.String)
+	 */
+	@Override
+	public void delete(final URI uri, final String user) throws IOException, InterruptedException {
+		executeWithTimeout(new CustomRunnable<Object>() {
+			@Override
+			public Object run() throws IOException, InterruptedException {
+				client.delete(uri, user);
+				return null;
+			}
+		});
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/UploadFileJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/UploadFileJob.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/UploadFileJob.java
new file mode 100644
index 0000000..a369776
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/UploadFileJob.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.core.internal.hdfs;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+
+import org.apache.hdt.core.Activator;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class UploadFileJob extends Job {
+
+	private final static Logger logger = Logger.getLogger(UploadFileJob.class);
+	private final HDFSFileStore store;
+	private final IResource resource;
+
+	/**
+	 * @throws CoreException
+	 * 
+	 */
+	public UploadFileJob(IResource resource) throws CoreException {
+		super("Uploading " + resource.getLocationURI());
+		this.resource = resource;
+		this.store = (HDFSFileStore) EFS.getStore(resource.getLocationURI());;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.IWorkspaceRunnable#run(org.eclipse.core.runtime
+	 * .IProgressMonitor)
+	 */
+	@Override
+	public IStatus run(IProgressMonitor monitor) {
+		IStatus status = Status.OK_STATUS;
+		if (store != null) {
+			URI uri = store.toURI();
+			try {
+				File localFile = store.getLocalFile();
+				if (logger.isDebugEnabled())
+					logger.debug("[" + uri + "]: Uploading from " + (localFile == null ? "(null)" : localFile.toString()));
+				HDFSManager.INSTANCE.startServerOperation(uri.toString());
+				if (localFile != null && localFile.exists()) {
+					boolean uploaded = false;
+					monitor.beginTask("Uploading " + localFile.getAbsolutePath(), (int) localFile.length());
+					FileInputStream fis = new FileInputStream(localFile);
+					OutputStream fos = store.openRemoteOutputStream(EFS.NONE, new NullProgressMonitor());
+					try {
+						if (!monitor.isCanceled()) {
+							byte[] data = new byte[8 * 1024];
+							int read = fis.read(data);
+							int totalRead = 0;
+							while (read > -1) {
+								if (monitor.isCanceled())
+									throw new InterruptedException();
+								fos.write(data, 0, read);
+								totalRead += read;
+								monitor.worked(read);
+								read = fis.read(data);
+								if (logger.isDebugEnabled())
+									logger.debug("Uploaded " + totalRead + " out of " + localFile.length() + " [" + (((float)totalRead*100.0f) / (float)localFile.length())
+											+ "]");
+							}
+							uploaded = true;
+						}
+					} catch (InterruptedException e) {
+						throw e;
+					} finally {
+						try {
+							fis.close();
+						} catch (Throwable t) {
+						}
+						try {
+							fos.close();
+						} catch (Throwable t) {
+						}
+						if (uploaded) {
+							// Delete parent folders if empty.
+							File parentFolder = localFile.getParentFile();
+							localFile.delete();
+							deleteFoldersIfEmpty(parentFolder);
+						}
+						monitor.done();
+					}
+				} else
+					status = new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Local file not found [" + localFile + "]");
+				resource.refreshLocal(IResource.DEPTH_ONE, new NullProgressMonitor());
+			} catch (InterruptedException e) {
+				logger.debug("Uploading file [" + uri + "] cancelled by user");
+			} catch (IOException e) {
+				status = new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Error uploading file " + uri, e);
+			} catch (CoreException e) {
+				status = new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e);
+				;
+			} finally {
+				HDFSManager.INSTANCE.stopServerOperation(uri.toString());
+			}
+		}
+		return status;
+	}
+
+	/**
+	 * Will attempt to delete the provided folder and its parents provided they
+	 * are empty.
+	 * 
+	 * @param localFile
+	 */
+	public static void deleteFoldersIfEmpty(File folder) {
+		File toDeleteFolder = folder;
+		String[] children = toDeleteFolder.list();
+		while (children == null || children.length < 1) {
+			// Empty folder
+			folder = toDeleteFolder.getParentFile();
+			toDeleteFolder.delete();
+			toDeleteFolder = folder;
+			children = folder.list();
+		}
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
new file mode 100644
index 0000000..be04f74
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import org.eclipse.emf.common.util.EList;
+import org.eclipse.emf.ecore.EObject;
+
+/**
+ * <!-- begin-user-doc -->
+ * A representation of the model object '<em><b>HDFS Server</b></em>'.
+ * <!-- end-user-doc -->
+ *
+ * <p>
+ * The following features are supported:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#isLoaded <em>Loaded</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getOperationURIs <em>Operation UR Is</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getUserId <em>User Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getGroupIds <em>Group Ids</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer()
+ * @model
+ * @generated
+ */
+public interface HDFSServer extends Server {
+	/**
+	 * Returns the value of the '<em><b>Loaded</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Loaded</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Loaded</em>' attribute.
+	 * @see #setLoaded(boolean)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer_Loaded()
+	 * @model
+	 * @generated
+	 */
+	boolean isLoaded();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.HDFSServer#isLoaded <em>Loaded</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Loaded</em>' attribute.
+	 * @see #isLoaded()
+	 * @generated
+	 */
+	void setLoaded(boolean value);
+
+	/**
+	 * Returns the value of the '<em><b>Operation UR Is</b></em>' attribute list.
+	 * The list contents are of type {@link java.lang.String}.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * <!-- begin-model-doc -->
+	 * List of HDFS uris where operations are being performed.
+	 * <!-- end-model-doc -->
+	 * @return the value of the '<em>Operation UR Is</em>' attribute list.
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer_OperationURIs()
+	 * @model transient="true"
+	 * @generated
+	 */
+	EList<String> getOperationURIs();
+
+	/**
+	 * Returns the value of the '<em><b>User Id</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>User Id</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>User Id</em>' attribute.
+	 * @see #setUserId(String)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer_UserId()
+	 * @model
+	 * @generated
+	 */
+	String getUserId();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.HDFSServer#getUserId <em>User Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>User Id</em>' attribute.
+	 * @see #getUserId()
+	 * @generated
+	 */
+	void setUserId(String value);
+
+	/**
+	 * Returns the value of the '<em><b>Group Ids</b></em>' attribute list.
+	 * The list contents are of type {@link java.lang.String}.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Group Ids</em>' attribute list isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Group Ids</em>' attribute list.
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer_GroupIds()
+	 * @model
+	 * @generated
+	 */
+	EList<String> getGroupIds();
+
+} // HDFSServer

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopFactory.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopFactory.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopFactory.java
new file mode 100644
index 0000000..bb79ecc
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopFactory.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import org.eclipse.emf.ecore.EFactory;
+
+/**
+ * <!-- begin-user-doc -->
+ * The <b>Factory</b> for the model.
+ * It provides a create method for each non-abstract class of the model.
+ * <!-- end-user-doc -->
+ * @see org.apache.hdt.core.internal.model.HadoopPackage
+ * @generated
+ */
+public interface HadoopFactory extends EFactory {
+	/**
+	 * The singleton instance of the factory.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	HadoopFactory eINSTANCE = org.apache.hdt.core.internal.model.impl.HadoopFactoryImpl.init();
+
+	/**
+	 * Returns a new object of class '<em>HDFS Server</em>'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return a new object of class '<em>HDFS Server</em>'.
+	 * @generated
+	 */
+	HDFSServer createHDFSServer();
+
+	/**
+	 * Returns a new object of class '<em>Servers</em>'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return a new object of class '<em>Servers</em>'.
+	 * @generated
+	 */
+	Servers createServers();
+
+	/**
+	 * Returns a new object of class '<em>Zoo Keeper Server</em>'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return a new object of class '<em>Zoo Keeper Server</em>'.
+	 * @generated
+	 */
+	ZooKeeperServer createZooKeeperServer();
+
+	/**
+	 * Returns a new object of class '<em>ZNode</em>'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return a new object of class '<em>ZNode</em>'.
+	 * @generated
+	 */
+	ZNode createZNode();
+
+	/**
+	 * Returns the package supported by this factory.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the package supported by this factory.
+	 * @generated
+	 */
+	HadoopPackage getHadoopPackage();
+
+} //HadoopFactory


Mime
View raw message