hdt-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rsha...@apache.org
Subject [4/8] HDT-32: Merge the code base of Hadoop-Eclipse project into HDT. Contributed by Srimanth Gunturi
Date Thu, 25 Jul 2013 04:29:20 GMT
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZooKeeperServerImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZooKeeperServerImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZooKeeperServerImpl.java
new file mode 100644
index 0000000..d09bc18
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZooKeeperServerImpl.java
@@ -0,0 +1,1109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.impl;
+
+import java.util.Collection;
+
+import org.apache.hdt.core.internal.model.HadoopPackage;
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZNodeType;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+
+import org.eclipse.emf.common.notify.Notification;
+import org.eclipse.emf.common.notify.NotificationChain;
+import org.eclipse.emf.common.util.EList;
+import org.eclipse.emf.ecore.EClass;
+import org.eclipse.emf.ecore.InternalEObject;
+import org.eclipse.emf.ecore.impl.ENotificationImpl;
+import org.eclipse.emf.ecore.util.EObjectContainmentEList;
+import org.eclipse.emf.ecore.util.InternalEList;
+import org.eclipse.emf.ecore.util.EObjectResolvingEList;
+
+/**
+ * <!-- begin-user-doc -->
+ * An implementation of the model object '<em><b>Zoo Keeper Server</b></em>'.
+ * <!-- end-user-doc -->
+ * <p>
+ * The following features are implemented:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getChildren <em>Children</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getLastRefresh <em>Last Refresh</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#isRefreshing <em>Refreshing</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#isEphermeral <em>Ephermeral</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getCreationId <em>Creation Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getModifiedId <em>Modified Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getCreationTime <em>Creation Time</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getModifiedTime <em>Modified Time</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getVersion <em>Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getChildrenVersion <em>Children Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getAclVersion <em>Acl Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getEphermalOwnerSessionId <em>Ephermal Owner Session Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getDataLength <em>Data Length</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getChildrenCount <em>Children Count</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getParent <em>Parent</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getNodeName <em>Node Name</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#isSequential <em>Sequential</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @generated
+ */
+public class ZooKeeperServerImpl extends ServerImpl implements ZooKeeperServer {
+	/**
+	 * The cached value of the '{@link #getChildren() <em>Children</em>}' containment reference list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getChildren()
+	 * @generated
+	 * @ordered
+	 */
+	protected EList<ZNode> children;
+	/**
+	 * The default value of the '{@link #getLastRefresh() <em>Last Refresh</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getLastRefresh()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long LAST_REFRESH_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getLastRefresh() <em>Last Refresh</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getLastRefresh()
+	 * @generated
+	 * @ordered
+	 */
+	protected long lastRefresh = LAST_REFRESH_EDEFAULT;
+	/**
+	 * The default value of the '{@link #isRefreshing() <em>Refreshing</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isRefreshing()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final boolean REFRESHING_EDEFAULT = false;
+	/**
+	 * The cached value of the '{@link #isRefreshing() <em>Refreshing</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isRefreshing()
+	 * @generated
+	 * @ordered
+	 */
+	protected boolean refreshing = REFRESHING_EDEFAULT;
+	/**
+	 * The default value of the '{@link #isEphermeral() <em>Ephermeral</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isEphermeral()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final boolean EPHERMERAL_EDEFAULT = false;
+	/**
+	 * The cached value of the '{@link #isEphermeral() <em>Ephermeral</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isEphermeral()
+	 * @generated
+	 * @ordered
+	 */
+	protected boolean ephermeral = EPHERMERAL_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getCreationId() <em>Creation Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getCreationId()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long CREATION_ID_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getCreationId() <em>Creation Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getCreationId()
+	 * @generated
+	 * @ordered
+	 */
+	protected long creationId = CREATION_ID_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getModifiedId() <em>Modified Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getModifiedId()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long MODIFIED_ID_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getModifiedId() <em>Modified Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getModifiedId()
+	 * @generated
+	 * @ordered
+	 */
+	protected long modifiedId = MODIFIED_ID_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getCreationTime() <em>Creation Time</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getCreationTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long CREATION_TIME_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getCreationTime() <em>Creation Time</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getCreationTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected long creationTime = CREATION_TIME_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getModifiedTime() <em>Modified Time</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getModifiedTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long MODIFIED_TIME_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getModifiedTime() <em>Modified Time</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getModifiedTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected long modifiedTime = MODIFIED_TIME_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getVersion() <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int VERSION_EDEFAULT = -1;
+	/**
+	 * The cached value of the '{@link #getVersion() <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected int version = VERSION_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getChildrenVersion() <em>Children Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getChildrenVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int CHILDREN_VERSION_EDEFAULT = -1;
+	/**
+	 * The cached value of the '{@link #getChildrenVersion() <em>Children Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getChildrenVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected int childrenVersion = CHILDREN_VERSION_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getAclVersion() <em>Acl Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getAclVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int ACL_VERSION_EDEFAULT = -1;
+	/**
+	 * The cached value of the '{@link #getAclVersion() <em>Acl Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getAclVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected int aclVersion = ACL_VERSION_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getEphermalOwnerSessionId() <em>Ephermal Owner Session Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getEphermalOwnerSessionId()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long EPHERMAL_OWNER_SESSION_ID_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getEphermalOwnerSessionId() <em>Ephermal Owner Session Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getEphermalOwnerSessionId()
+	 * @generated
+	 * @ordered
+	 */
+	protected long ephermalOwnerSessionId = EPHERMAL_OWNER_SESSION_ID_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getDataLength() <em>Data Length</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getDataLength()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int DATA_LENGTH_EDEFAULT = -1;
+	/**
+	 * The cached value of the '{@link #getDataLength() <em>Data Length</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getDataLength()
+	 * @generated
+	 * @ordered
+	 */
+	protected int dataLength = DATA_LENGTH_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getChildrenCount() <em>Children Count</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getChildrenCount()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int CHILDREN_COUNT_EDEFAULT = 0;
+	/**
+	 * The cached value of the '{@link #getChildrenCount() <em>Children Count</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getChildrenCount()
+	 * @generated
+	 * @ordered
+	 */
+	protected int childrenCount = CHILDREN_COUNT_EDEFAULT;
+	/**
+	 * The cached value of the '{@link #getParent() <em>Parent</em>}' reference.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getParent()
+	 * @generated
+	 * @ordered
+	 */
+	protected ZNode parent;
+	/**
+	 * The default value of the '{@link #getNodeName() <em>Node Name</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getNodeName()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final String NODE_NAME_EDEFAULT = null;
+	/**
+	 * The cached value of the '{@link #getNodeName() <em>Node Name</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getNodeName()
+	 * @generated
+	 * @ordered
+	 */
+	protected String nodeName = NODE_NAME_EDEFAULT;
+	/**
+	 * The default value of the '{@link #isSequential() <em>Sequential</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isSequential()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final boolean SEQUENTIAL_EDEFAULT = false;
+	/**
+	 * The cached value of the '{@link #isSequential() <em>Sequential</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isSequential()
+	 * @generated
+	 * @ordered
+	 */
+	protected boolean sequential = SEQUENTIAL_EDEFAULT;
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected ZooKeeperServerImpl() {
+		super();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	protected EClass eStaticClass() {
+		return HadoopPackage.Literals.ZOO_KEEPER_SERVER;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EList<ZNode> getChildren() {
+		if (children == null) {
+			children = new EObjectContainmentEList<ZNode>(ZNode.class, this, HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN);
+		}
+		return children;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getLastRefresh() {
+		return lastRefresh;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setLastRefresh(long newLastRefresh) {
+		long oldLastRefresh = lastRefresh;
+		lastRefresh = newLastRefresh;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH, oldLastRefresh, lastRefresh));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public boolean isRefreshing() {
+		return refreshing;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setRefreshing(boolean newRefreshing) {
+		boolean oldRefreshing = refreshing;
+		refreshing = newRefreshing;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING, oldRefreshing, refreshing));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public boolean isEphermeral() {
+		return ephermeral;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setEphermeral(boolean newEphermeral) {
+		boolean oldEphermeral = ephermeral;
+		ephermeral = newEphermeral;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL, oldEphermeral, ephermeral));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getCreationId() {
+		return creationId;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setCreationId(long newCreationId) {
+		long oldCreationId = creationId;
+		creationId = newCreationId;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID, oldCreationId, creationId));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getModifiedId() {
+		return modifiedId;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setModifiedId(long newModifiedId) {
+		long oldModifiedId = modifiedId;
+		modifiedId = newModifiedId;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID, oldModifiedId, modifiedId));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getCreationTime() {
+		return creationTime;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setCreationTime(long newCreationTime) {
+		long oldCreationTime = creationTime;
+		creationTime = newCreationTime;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME, oldCreationTime, creationTime));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getModifiedTime() {
+		return modifiedTime;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setModifiedTime(long newModifiedTime) {
+		long oldModifiedTime = modifiedTime;
+		modifiedTime = newModifiedTime;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME, oldModifiedTime, modifiedTime));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getVersion() {
+		return version;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setVersion(int newVersion) {
+		int oldVersion = version;
+		version = newVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__VERSION, oldVersion, version));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getChildrenVersion() {
+		return childrenVersion;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setChildrenVersion(int newChildrenVersion) {
+		int oldChildrenVersion = childrenVersion;
+		childrenVersion = newChildrenVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION, oldChildrenVersion, childrenVersion));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getAclVersion() {
+		return aclVersion;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setAclVersion(int newAclVersion) {
+		int oldAclVersion = aclVersion;
+		aclVersion = newAclVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION, oldAclVersion, aclVersion));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getEphermalOwnerSessionId() {
+		return ephermalOwnerSessionId;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setEphermalOwnerSessionId(long newEphermalOwnerSessionId) {
+		long oldEphermalOwnerSessionId = ephermalOwnerSessionId;
+		ephermalOwnerSessionId = newEphermalOwnerSessionId;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID, oldEphermalOwnerSessionId, ephermalOwnerSessionId));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getDataLength() {
+		return dataLength;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setDataLength(int newDataLength) {
+		int oldDataLength = dataLength;
+		dataLength = newDataLength;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH, oldDataLength, dataLength));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getChildrenCount() {
+		return childrenCount;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setChildrenCount(int newChildrenCount) {
+		int oldChildrenCount = childrenCount;
+		childrenCount = newChildrenCount;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT, oldChildrenCount, childrenCount));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZNode getParent() {
+		if (parent != null && parent.eIsProxy()) {
+			InternalEObject oldParent = (InternalEObject)parent;
+			parent = (ZNode)eResolveProxy(oldParent);
+			if (parent != oldParent) {
+				if (eNotificationRequired())
+					eNotify(new ENotificationImpl(this, Notification.RESOLVE, HadoopPackage.ZOO_KEEPER_SERVER__PARENT, oldParent, parent));
+			}
+		}
+		return parent;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZNode basicGetParent() {
+		return parent;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setParent(ZNode newParent) {
+		ZNode oldParent = parent;
+		parent = newParent;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__PARENT, oldParent, parent));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getNodeName() {
+		return nodeName;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setNodeName(String newNodeName) {
+		String oldNodeName = nodeName;
+		nodeName = newNodeName;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME, oldNodeName, nodeName));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public boolean isSequential() {
+		return sequential;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setSequential(boolean newSequential) {
+		boolean oldSequential = sequential;
+		sequential = newSequential;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL, oldSequential, sequential));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZooKeeperServer getServer() {
+		if(this instanceof org.apache.hdt.core.internal.model.ZooKeeperServer)
+					return (org.apache.hdt.core.internal.model.ZooKeeperServer) this;
+				else
+					return getParent().getServer();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getPath() {
+		if (this instanceof org.apache.hdt.core.internal.model.ZooKeeperServer)
+			return "/";
+		else {
+			String parentPath = getParent().getPath();
+			return parentPath.endsWith("/") ? parentPath + getNodeName() : parentPath + "/" + getNodeName();
+		}
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) {
+		switch (featureID) {
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN:
+				return ((InternalEList<?>)getChildren()).basicRemove(otherEnd, msgs);
+		}
+		return super.eInverseRemove(otherEnd, featureID, msgs);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public Object eGet(int featureID, boolean resolve, boolean coreType) {
+		switch (featureID) {
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN:
+				return getChildren();
+			case HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH:
+				return getLastRefresh();
+			case HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING:
+				return isRefreshing();
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL:
+				return isEphermeral();
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID:
+				return getCreationId();
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID:
+				return getModifiedId();
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME:
+				return getCreationTime();
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME:
+				return getModifiedTime();
+			case HadoopPackage.ZOO_KEEPER_SERVER__VERSION:
+				return getVersion();
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION:
+				return getChildrenVersion();
+			case HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION:
+				return getAclVersion();
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID:
+				return getEphermalOwnerSessionId();
+			case HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH:
+				return getDataLength();
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT:
+				return getChildrenCount();
+			case HadoopPackage.ZOO_KEEPER_SERVER__PARENT:
+				if (resolve) return getParent();
+				return basicGetParent();
+			case HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME:
+				return getNodeName();
+			case HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL:
+				return isSequential();
+		}
+		return super.eGet(featureID, resolve, coreType);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@SuppressWarnings("unchecked")
+	@Override
+	public void eSet(int featureID, Object newValue) {
+		switch (featureID) {
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN:
+				getChildren().clear();
+				getChildren().addAll((Collection<? extends ZNode>)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH:
+				setLastRefresh((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING:
+				setRefreshing((Boolean)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL:
+				setEphermeral((Boolean)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID:
+				setCreationId((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID:
+				setModifiedId((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME:
+				setCreationTime((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME:
+				setModifiedTime((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__VERSION:
+				setVersion((Integer)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION:
+				setChildrenVersion((Integer)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION:
+				setAclVersion((Integer)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID:
+				setEphermalOwnerSessionId((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH:
+				setDataLength((Integer)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT:
+				setChildrenCount((Integer)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__PARENT:
+				setParent((ZNode)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME:
+				setNodeName((String)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL:
+				setSequential((Boolean)newValue);
+				return;
+		}
+		super.eSet(featureID, newValue);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public void eUnset(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN:
+				getChildren().clear();
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH:
+				setLastRefresh(LAST_REFRESH_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING:
+				setRefreshing(REFRESHING_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL:
+				setEphermeral(EPHERMERAL_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID:
+				setCreationId(CREATION_ID_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID:
+				setModifiedId(MODIFIED_ID_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME:
+				setCreationTime(CREATION_TIME_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME:
+				setModifiedTime(MODIFIED_TIME_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__VERSION:
+				setVersion(VERSION_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION:
+				setChildrenVersion(CHILDREN_VERSION_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION:
+				setAclVersion(ACL_VERSION_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID:
+				setEphermalOwnerSessionId(EPHERMAL_OWNER_SESSION_ID_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH:
+				setDataLength(DATA_LENGTH_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT:
+				setChildrenCount(CHILDREN_COUNT_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__PARENT:
+				setParent((ZNode)null);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME:
+				setNodeName(NODE_NAME_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL:
+				setSequential(SEQUENTIAL_EDEFAULT);
+				return;
+		}
+		super.eUnset(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public boolean eIsSet(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN:
+				return children != null && !children.isEmpty();
+			case HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH:
+				return lastRefresh != LAST_REFRESH_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING:
+				return refreshing != REFRESHING_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL:
+				return ephermeral != EPHERMERAL_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID:
+				return creationId != CREATION_ID_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID:
+				return modifiedId != MODIFIED_ID_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME:
+				return creationTime != CREATION_TIME_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME:
+				return modifiedTime != MODIFIED_TIME_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__VERSION:
+				return version != VERSION_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION:
+				return childrenVersion != CHILDREN_VERSION_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION:
+				return aclVersion != ACL_VERSION_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID:
+				return ephermalOwnerSessionId != EPHERMAL_OWNER_SESSION_ID_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH:
+				return dataLength != DATA_LENGTH_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT:
+				return childrenCount != CHILDREN_COUNT_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__PARENT:
+				return parent != null;
+			case HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME:
+				return NODE_NAME_EDEFAULT == null ? nodeName != null : !NODE_NAME_EDEFAULT.equals(nodeName);
+			case HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL:
+				return sequential != SEQUENTIAL_EDEFAULT;
+		}
+		return super.eIsSet(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public int eBaseStructuralFeatureID(int derivedFeatureID, Class<?> baseClass) {
+		if (baseClass == ZNode.class) {
+			switch (derivedFeatureID) {
+				case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN: return HadoopPackage.ZNODE__CHILDREN;
+				case HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH: return HadoopPackage.ZNODE__LAST_REFRESH;
+				case HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING: return HadoopPackage.ZNODE__REFRESHING;
+				case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL: return HadoopPackage.ZNODE__EPHERMERAL;
+				case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID: return HadoopPackage.ZNODE__CREATION_ID;
+				case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID: return HadoopPackage.ZNODE__MODIFIED_ID;
+				case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME: return HadoopPackage.ZNODE__CREATION_TIME;
+				case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME: return HadoopPackage.ZNODE__MODIFIED_TIME;
+				case HadoopPackage.ZOO_KEEPER_SERVER__VERSION: return HadoopPackage.ZNODE__VERSION;
+				case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION: return HadoopPackage.ZNODE__CHILDREN_VERSION;
+				case HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION: return HadoopPackage.ZNODE__ACL_VERSION;
+				case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID: return HadoopPackage.ZNODE__EPHERMAL_OWNER_SESSION_ID;
+				case HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH: return HadoopPackage.ZNODE__DATA_LENGTH;
+				case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT: return HadoopPackage.ZNODE__CHILDREN_COUNT;
+				case HadoopPackage.ZOO_KEEPER_SERVER__PARENT: return HadoopPackage.ZNODE__PARENT;
+				case HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME: return HadoopPackage.ZNODE__NODE_NAME;
+				case HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL: return HadoopPackage.ZNODE__SEQUENTIAL;
+				default: return -1;
+			}
+		}
+		return super.eBaseStructuralFeatureID(derivedFeatureID, baseClass);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public int eDerivedStructuralFeatureID(int baseFeatureID, Class<?> baseClass) {
+		if (baseClass == ZNode.class) {
+			switch (baseFeatureID) {
+				case HadoopPackage.ZNODE__CHILDREN: return HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN;
+				case HadoopPackage.ZNODE__LAST_REFRESH: return HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH;
+				case HadoopPackage.ZNODE__REFRESHING: return HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING;
+				case HadoopPackage.ZNODE__EPHERMERAL: return HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL;
+				case HadoopPackage.ZNODE__CREATION_ID: return HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID;
+				case HadoopPackage.ZNODE__MODIFIED_ID: return HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID;
+				case HadoopPackage.ZNODE__CREATION_TIME: return HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME;
+				case HadoopPackage.ZNODE__MODIFIED_TIME: return HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME;
+				case HadoopPackage.ZNODE__VERSION: return HadoopPackage.ZOO_KEEPER_SERVER__VERSION;
+				case HadoopPackage.ZNODE__CHILDREN_VERSION: return HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION;
+				case HadoopPackage.ZNODE__ACL_VERSION: return HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION;
+				case HadoopPackage.ZNODE__EPHERMAL_OWNER_SESSION_ID: return HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID;
+				case HadoopPackage.ZNODE__DATA_LENGTH: return HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH;
+				case HadoopPackage.ZNODE__CHILDREN_COUNT: return HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT;
+				case HadoopPackage.ZNODE__PARENT: return HadoopPackage.ZOO_KEEPER_SERVER__PARENT;
+				case HadoopPackage.ZNODE__NODE_NAME: return HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME;
+				case HadoopPackage.ZNODE__SEQUENTIAL: return HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL;
+				default: return -1;
+			}
+		}
+		return super.eDerivedStructuralFeatureID(baseFeatureID, baseClass);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public String toString() {
+		if (eIsProxy()) return super.toString();
+
+		StringBuffer result = new StringBuffer(super.toString());
+		result.append(" (lastRefresh: ");
+		result.append(lastRefresh);
+		result.append(", refreshing: ");
+		result.append(refreshing);
+		result.append(", ephermeral: ");
+		result.append(ephermeral);
+		result.append(", creationId: ");
+		result.append(creationId);
+		result.append(", modifiedId: ");
+		result.append(modifiedId);
+		result.append(", creationTime: ");
+		result.append(creationTime);
+		result.append(", modifiedTime: ");
+		result.append(modifiedTime);
+		result.append(", version: ");
+		result.append(version);
+		result.append(", childrenVersion: ");
+		result.append(childrenVersion);
+		result.append(", aclVersion: ");
+		result.append(aclVersion);
+		result.append(", ephermalOwnerSessionId: ");
+		result.append(ephermalOwnerSessionId);
+		result.append(", dataLength: ");
+		result.append(dataLength);
+		result.append(", childrenCount: ");
+		result.append(childrenCount);
+		result.append(", nodeName: ");
+		result.append(nodeName);
+		result.append(", sequential: ");
+		result.append(sequential);
+		result.append(')');
+		return result.toString();
+	}
+
+} //ZooKeeperServerImpl

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopAdapterFactory.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopAdapterFactory.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopAdapterFactory.java
new file mode 100644
index 0000000..417d51a
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopAdapterFactory.java
@@ -0,0 +1,208 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.util;
+
+import org.apache.hdt.core.internal.model.*;
+
+import org.eclipse.emf.common.notify.Adapter;
+import org.eclipse.emf.common.notify.Notifier;
+
+import org.eclipse.emf.common.notify.impl.AdapterFactoryImpl;
+
+import org.eclipse.emf.ecore.EObject;
+
+/**
+ * <!-- begin-user-doc -->
+ * The <b>Adapter Factory</b> for the model.
+ * It provides an adapter <code>createXXX</code> method for each class of the model.
+ * <!-- end-user-doc -->
+ * @see org.apache.hdt.core.internal.model.HadoopPackage
+ * @generated
+ */
+public class HadoopAdapterFactory extends AdapterFactoryImpl {
+	/**
+	 * The cached model package.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected static HadoopPackage modelPackage;
+
+	/**
+	 * Creates an instance of the adapter factory.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public HadoopAdapterFactory() {
+		if (modelPackage == null) {
+			modelPackage = HadoopPackage.eINSTANCE;
+		}
+	}
+
+	/**
+	 * Returns whether this factory is applicable for the type of the object.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns <code>true</code> if the object is either the model's package or is an instance object of the model.
+	 * <!-- end-user-doc -->
+	 * @return whether this factory is applicable for the type of the object.
+	 * @generated
+	 */
+	@Override
+	public boolean isFactoryForType(Object object) {
+		if (object == modelPackage) {
+			return true;
+		}
+		if (object instanceof EObject) {
+			return ((EObject)object).eClass().getEPackage() == modelPackage;
+		}
+		return false;
+	}
+
+	/**
+	 * The switch that delegates to the <code>createXXX</code> methods.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected HadoopSwitch<Adapter> modelSwitch =
+		new HadoopSwitch<Adapter>() {
+			@Override
+			public Adapter caseHDFSServer(HDFSServer object) {
+				return createHDFSServerAdapter();
+			}
+			@Override
+			public Adapter caseServers(Servers object) {
+				return createServersAdapter();
+			}
+			@Override
+			public Adapter caseServer(Server object) {
+				return createServerAdapter();
+			}
+			@Override
+			public Adapter caseZooKeeperServer(ZooKeeperServer object) {
+				return createZooKeeperServerAdapter();
+			}
+			@Override
+			public Adapter caseZNode(ZNode object) {
+				return createZNodeAdapter();
+			}
+			@Override
+			public Adapter defaultCase(EObject object) {
+				return createEObjectAdapter();
+			}
+		};
+
+	/**
+	 * Creates an adapter for the <code>target</code>.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param target the object to adapt.
+	 * @return the adapter for the <code>target</code>.
+	 * @generated
+	 */
+	@Override
+	public Adapter createAdapter(Notifier target) {
+		return modelSwitch.doSwitch((EObject)target);
+	}
+
+
+	/**
+	 * Creates a new adapter for an object of class '{@link org.apache.hdt.core.internal.model.HDFSServer <em>HDFS Server</em>}'.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null so that we can easily ignore cases;
+	 * it's useful to ignore a case when inheritance will catch all the cases anyway.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @see org.apache.hdt.core.internal.model.HDFSServer
+	 * @generated
+	 */
+	public Adapter createHDFSServerAdapter() {
+		return null;
+	}
+
+	/**
+	 * Creates a new adapter for an object of class '{@link org.apache.hdt.core.internal.model.Servers <em>Servers</em>}'.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null so that we can easily ignore cases;
+	 * it's useful to ignore a case when inheritance will catch all the cases anyway.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @see org.apache.hdt.core.internal.model.Servers
+	 * @generated
+	 */
+	public Adapter createServersAdapter() {
+		return null;
+	}
+
+	/**
+	 * Creates a new adapter for an object of class '{@link org.apache.hdt.core.internal.model.Server <em>Server</em>}'.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null so that we can easily ignore cases;
+	 * it's useful to ignore a case when inheritance will catch all the cases anyway.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @see org.apache.hdt.core.internal.model.Server
+	 * @generated
+	 */
+	public Adapter createServerAdapter() {
+		return null;
+	}
+
+	/**
+	 * Creates a new adapter for an object of class '{@link org.apache.hdt.core.internal.model.ZooKeeperServer <em>Zoo Keeper Server</em>}'.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null so that we can easily ignore cases;
+	 * it's useful to ignore a case when inheritance will catch all the cases anyway.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @see org.apache.hdt.core.internal.model.ZooKeeperServer
+	 * @generated
+	 */
+	public Adapter createZooKeeperServerAdapter() {
+		return null;
+	}
+
+	/**
+	 * Creates a new adapter for an object of class '{@link org.apache.hdt.core.internal.model.ZNode <em>ZNode</em>}'.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null so that we can easily ignore cases;
+	 * it's useful to ignore a case when inheritance will catch all the cases anyway.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @see org.apache.hdt.core.internal.model.ZNode
+	 * @generated
+	 */
+	public Adapter createZNodeAdapter() {
+		return null;
+	}
+
+	/**
+	 * Creates a new adapter for the default case.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @generated
+	 */
+	public Adapter createEObjectAdapter() {
+		return null;
+	}
+
+} //HadoopAdapterFactory

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
new file mode 100644
index 0000000..6f0b337
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.util;
+
+import java.util.List;
+
+import org.apache.hdt.core.internal.model.*;
+
+import org.eclipse.emf.ecore.EClass;
+import org.eclipse.emf.ecore.EObject;
+
+/**
+ * <!-- begin-user-doc -->
+ * The <b>Switch</b> for the model's inheritance hierarchy.
+ * It supports the call {@link #doSwitch(EObject) doSwitch(object)}
+ * to invoke the <code>caseXXX</code> method for each class of the model,
+ * starting with the actual class of the object
+ * and proceeding up the inheritance hierarchy
+ * until a non-null result is returned,
+ * which is the result of the switch.
+ * <!-- end-user-doc -->
+ * @see org.apache.hdt.core.internal.model.HadoopPackage
+ * @generated
+ */
+public class HadoopSwitch<T> {
+	/**
+	 * The cached model package
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected static HadoopPackage modelPackage;
+
+	/**
+	 * Creates an instance of the switch.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public HadoopSwitch() {
+		if (modelPackage == null) {
+			modelPackage = HadoopPackage.eINSTANCE;
+		}
+	}
+
+	/**
+	 * Calls <code>caseXXX</code> for each class of the model until one returns a non null result; it yields that result.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the first non-null result returned by a <code>caseXXX</code> call.
+	 * @generated
+	 */
+	public T doSwitch(EObject theEObject) {
+		return doSwitch(theEObject.eClass(), theEObject);
+	}
+
+	/**
+	 * Calls <code>caseXXX</code> for each class of the model until one returns a non null result; it yields that result.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the first non-null result returned by a <code>caseXXX</code> call.
+	 * @generated
+	 */
+	protected T doSwitch(EClass theEClass, EObject theEObject) {
+		if (theEClass.eContainer() == modelPackage) {
+			return doSwitch(theEClass.getClassifierID(), theEObject);
+		}
+		else {
+			List<EClass> eSuperTypes = theEClass.getESuperTypes();
+			return
+				eSuperTypes.isEmpty() ?
+					defaultCase(theEObject) :
+					doSwitch(eSuperTypes.get(0), theEObject);
+		}
+	}
+
+	/**
+	 * Calls <code>caseXXX</code> for each class of the model until one returns a non null result; it yields that result.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the first non-null result returned by a <code>caseXXX</code> call.
+	 * @generated
+	 */
+	protected T doSwitch(int classifierID, EObject theEObject) {
+		switch (classifierID) {
+			case HadoopPackage.HDFS_SERVER: {
+				HDFSServer hdfsServer = (HDFSServer)theEObject;
+				T result = caseHDFSServer(hdfsServer);
+				if (result == null) result = caseServer(hdfsServer);
+				if (result == null) result = defaultCase(theEObject);
+				return result;
+			}
+			case HadoopPackage.SERVERS: {
+				Servers servers = (Servers)theEObject;
+				T result = caseServers(servers);
+				if (result == null) result = defaultCase(theEObject);
+				return result;
+			}
+			case HadoopPackage.SERVER: {
+				Server server = (Server)theEObject;
+				T result = caseServer(server);
+				if (result == null) result = defaultCase(theEObject);
+				return result;
+			}
+			case HadoopPackage.ZOO_KEEPER_SERVER: {
+				ZooKeeperServer zooKeeperServer = (ZooKeeperServer)theEObject;
+				T result = caseZooKeeperServer(zooKeeperServer);
+				if (result == null) result = caseServer(zooKeeperServer);
+				if (result == null) result = caseZNode(zooKeeperServer);
+				if (result == null) result = defaultCase(theEObject);
+				return result;
+			}
+			case HadoopPackage.ZNODE: {
+				ZNode zNode = (ZNode)theEObject;
+				T result = caseZNode(zNode);
+				if (result == null) result = defaultCase(theEObject);
+				return result;
+			}
+			default: return defaultCase(theEObject);
+		}
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>HDFS Server</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>HDFS Server</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject) doSwitch(EObject)
+	 * @generated
+	 */
+	public T caseHDFSServer(HDFSServer object) {
+		return null;
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>Servers</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>Servers</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject) doSwitch(EObject)
+	 * @generated
+	 */
+	public T caseServers(Servers object) {
+		return null;
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>Server</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>Server</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject) doSwitch(EObject)
+	 * @generated
+	 */
+	public T caseServer(Server object) {
+		return null;
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>Zoo Keeper Server</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>Zoo Keeper Server</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject) doSwitch(EObject)
+	 * @generated
+	 */
+	public T caseZooKeeperServer(ZooKeeperServer object) {
+		return null;
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>ZNode</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>ZNode</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject) doSwitch(EObject)
+	 * @generated
+	 */
+	public T caseZNode(ZNode object) {
+		return null;
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>EObject</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch, but this is the last case anyway.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>EObject</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject)
+	 * @generated
+	 */
+	public T defaultCase(EObject object) {
+		return null;
+	}
+
+} //HadoopSwitch

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
new file mode 100644
index 0000000..133b9dd
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.zookeeper;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.hdt.core.zookeeper.ZooKeeperClient;
+import org.apache.log4j.Logger;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ * 
+ */
+public class InterruptableZooKeeperClient extends ZooKeeperClient {
+	private static final int DEFAULT_TIMEOUT = 60000;
+	private static final Logger logger = Logger.getLogger(InterruptableZooKeeperClient.class);
+	// private static ExecutorService threadPool =
+	// Executors.newFixedThreadPool(10);
+
+	private final ZooKeeperClient client;
+	private final int timeoutMillis = DEFAULT_TIMEOUT;
+	private final ZooKeeperServer server;
+
+	/**
+	 * @param server
+	 * 
+	 */
+	public InterruptableZooKeeperClient(ZooKeeperServer server, ZooKeeperClient client) {
+		this.server = server;
+		this.client = client;
+	}
+
+	private static interface CustomRunnable<V> {
+		public V run() throws IOException, InterruptedException;
+	}
+
+	protected <T> T executeWithTimeout(final CustomRunnable<T> runnable) throws IOException, InterruptedException {
+		final List<T> data = new ArrayList<T>();
+		final IOException[] ioE = new IOException[1];
+		final InterruptedException[] inE = new InterruptedException[1];
+		Thread runnerThread = new Thread(new Runnable() {
+			public void run() {
+				try {
+					data.add(runnable.run());
+				} catch (IOException e) {
+					ioE[0] = e;
+				} catch (InterruptedException e) {
+					inE[0] = e;
+				}
+			}
+		});
+		boolean interrupted = false;
+		runnerThread.start();
+		runnerThread.join(timeoutMillis);
+		if (runnerThread.isAlive()) {
+			if (logger.isDebugEnabled())
+				logger.debug("executeWithTimeout(): Interrupting server call");
+			runnerThread.interrupt();
+			interrupted = true;
+		}
+		if (ioE[0] != null) {
+			try {
+				if (!client.isConnected())
+					ZooKeeperManager.INSTANCE.disconnect(server);
+			} catch (Throwable t) {
+			}
+			throw ioE[0];
+		}
+		if (inE[0] != null)
+			throw inE[0];
+		if (interrupted) {
+			// Tell HDFS manager that the server timed out
+			if (logger.isDebugEnabled())
+				logger.debug("executeWithTimeout(): Server timed out: " + server);
+			ZooKeeperManager.INSTANCE.disconnect(server);
+			throw new InterruptedException();
+		}
+		if (data.size() > 0)
+			return data.get(0);
+		return null;
+	}
+
+	protected void connectIfNecessary() throws IOException, InterruptedException {
+		if (!client.isConnected())
+			client.connect();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#initialize(java.lang
+	 * .String)
+	 */
+	@Override
+	public void initialize(final String serverLocation) {
+		try {
+			executeWithTimeout(new CustomRunnable<Object>() {
+				@Override
+				public Object run() throws IOException, InterruptedException {
+					client.initialize(serverLocation);
+					return null;
+				}
+			});
+		} catch (IOException e) {
+			throw new RuntimeException(e.getMessage(), e);
+		} catch (InterruptedException e) {
+			throw new RuntimeException(e.getMessage(), e);
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.zookeeper.ZooKeeperClient#isConnected()
+	 */
+	@Override
+	public boolean isConnected() throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<Boolean>() {
+			@Override
+			public Boolean run() throws IOException, InterruptedException {
+				return client.isConnected();
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#connect(java.lang
+	 * .String)
+	 */
+	@Override
+	public void connect() throws IOException, InterruptedException {
+		executeWithTimeout(new CustomRunnable<Object>() {
+			@Override
+			public Object run() throws IOException, InterruptedException {
+				client.connect();
+				return null;
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#getChildren(java.
+	 * lang.String)
+	 */
+	@Override
+	public List<ZNode> getChildren(final ZNode path) throws IOException, InterruptedException {
+		connectIfNecessary();
+		return executeWithTimeout(new CustomRunnable<List<ZNode>>() {
+			@Override
+			public List<ZNode> run() throws IOException, InterruptedException {
+				return client.getChildren(path);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.zookeeper.ZooKeeperClient#disconnect()
+	 */
+	@Override
+	public void disconnect() throws IOException, InterruptedException {
+		executeWithTimeout(new CustomRunnable<Object>() {
+			@Override
+			public Object run() throws IOException, InterruptedException {
+				client.disconnect();
+				return null;
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.zookeeper.ZooKeeperClient#disconnect()
+	 */
+	@Override
+	public void delete(final ZNode node) throws IOException, InterruptedException {
+		executeWithTimeout(new CustomRunnable<Object>() {
+			@Override
+			public Object run() throws IOException, InterruptedException {
+				client.delete(node);
+				return null;
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#open(java.lang.String
+	 * )
+	 */
+	@Override
+	public byte[] open(final ZNode path) throws InterruptedException, IOException {
+		connectIfNecessary();
+		return executeWithTimeout(new CustomRunnable<byte[]>() {
+			@Override
+			public byte[] run() throws IOException, InterruptedException {
+				return client.open(path);
+			}
+		});
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
new file mode 100644
index 0000000..4c36259
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.zookeeper;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.internal.HadoopManager;
+import org.apache.hdt.core.internal.model.HadoopFactory;
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.hdt.core.zookeeper.ZooKeeperClient;
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Platform;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.emf.common.util.EList;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class ZooKeeperManager {
+	private static final Logger logger = Logger.getLogger(ZooKeeperManager.class);
+	public static ZooKeeperManager INSTANCE = new ZooKeeperManager();
+	private Map<String, ZooKeeperClient> clientsMap = new HashMap<String, ZooKeeperClient>();
+
+	private ZooKeeperManager() {
+	}
+
+	/**
+	 * 
+	 */
+	public void loadServers() {
+
+	}
+
+	public EList<ZooKeeperServer> getServers() {
+		return HadoopManager.INSTANCE.getServers().getZookeeperServers();
+	}
+
+	/**
+	 * @param zkServerName
+	 * @param uri
+	 */
+	public ZooKeeperServer createServer(String zkServerName, String zkServerLocation) {
+		ZooKeeperServer zkServer = HadoopFactory.eINSTANCE.createZooKeeperServer();
+		zkServer.setName(zkServerName);
+		zkServer.setUri(zkServerLocation);
+		getServers().add(zkServer);
+		HadoopManager.INSTANCE.saveServers();
+		return zkServer;
+	}
+
+	/**
+	 * @param r
+	 */
+	public void disconnect(ZooKeeperServer server) {
+		try {
+			if (ServerStatus.DISCONNECTED_VALUE != server.getStatusCode()) {
+				getClient(server).disconnect();
+				server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+			}
+		} catch (IOException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		} catch (InterruptedException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		} catch (CoreException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		}
+	}
+
+	/**
+	 * Provides a ZooKeeper instance using plugin extensions.
+	 * 
+	 * @param r
+	 */
+	public void reconnect(ZooKeeperServer server) {
+		try {
+			if (logger.isDebugEnabled())
+				logger.debug("reconnect(): Reconnecting: " + server);
+			server.setStatusCode(0);
+			getClient(server).connect();
+			if (!getClient(server).isConnected()) {
+				if (logger.isDebugEnabled())
+					logger.debug("reconnect(): Client not connected. Setting to disconnected: " + server);
+				server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+			}
+			if (logger.isDebugEnabled())
+				logger.debug("reconnect(): Reconnected: " + server);
+		} catch (IOException e) {
+			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		} catch (InterruptedException e) {
+			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		} catch (CoreException e) {
+			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		}
+	}
+
+	public ZooKeeperClient getClient(ZooKeeperServer server) throws CoreException {
+		if (server != null && server.getStatusCode() == ServerStatus.DISCONNECTED_VALUE) {
+			if (logger.isDebugEnabled())
+				logger.debug("getClient(" + server.getUri() + "): Server disconnected. Not returning client");
+			throw new CoreException(new Status(IStatus.WARNING, Activator.BUNDLE_ID, "Server disconnected. Please reconnect to server."));
+		}
+		if (clientsMap.containsKey(server.getUri()))
+			return clientsMap.get(server.getUri());
+		else {
+			IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.zookeeperClient");
+			for (IConfigurationElement element : elementsFor) {
+				ZooKeeperClient client = (ZooKeeperClient) element.createExecutableExtension("class");
+				client.initialize(server.getUri());
+				clientsMap.put(server.getUri(), new InterruptableZooKeeperClient(server, client));
+			}
+			return clientsMap.get(server.getUri());
+		}
+	}
+
+	/**
+	 * @param r
+	 * @throws CoreException
+	 */
+	public void delete(ZooKeeperServer server) throws CoreException {
+		if (server != null && server.getStatusCode() != ServerStatus.DISCONNECTED_VALUE) {
+			if (logger.isDebugEnabled())
+				logger.debug("getClient(" + server.getUri() + "): Cannot delete a connected server.");
+			throw new CoreException(new Status(IStatus.WARNING, Activator.BUNDLE_ID, "Cannot delete a connected server."));
+		}
+		if (clientsMap.containsKey(server.getUri()))
+			clientsMap.remove(server.getUri());
+		getServers().remove(server);
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/zookeeper/ZooKeeperClient.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/zookeeper/ZooKeeperClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/zookeeper/ZooKeeperClient.java
new file mode 100644
index 0000000..cec9a73
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/zookeeper/ZooKeeperClient.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.zookeeper;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hdt.core.internal.model.ZNode;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public abstract class ZooKeeperClient {
+
+	public abstract void initialize(String serverLocation);
+
+	public abstract boolean isConnected() throws IOException, InterruptedException;
+
+	public abstract void connect() throws IOException, InterruptedException;
+
+	public abstract List<ZNode> getChildren(ZNode path) throws IOException, InterruptedException;
+
+	public abstract void disconnect() throws IOException, InterruptedException;
+
+	public abstract void delete(ZNode zkn) throws IOException, InterruptedException;
+
+	public abstract byte[] open(ZNode path) throws InterruptedException, IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.feature/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/.project b/org.apache.hdt.feature/.project
new file mode 100644
index 0000000..bbf2949
--- /dev/null
+++ b/org.apache.hdt.feature/.project
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>org.apache.hdt.feature</name>
+	<comment></comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.eclipse.pde.FeatureBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.eclipse.pde.FeatureNature</nature>
+	</natures>
+</projectDescription>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.feature/build.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/build.properties b/org.apache.hdt.feature/build.properties
new file mode 100644
index 0000000..64f93a9
--- /dev/null
+++ b/org.apache.hdt.feature/build.properties
@@ -0,0 +1 @@
+bin.includes = feature.xml

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.feature/feature.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/feature.xml b/org.apache.hdt.feature/feature.xml
new file mode 100644
index 0000000..96add65
--- /dev/null
+++ b/org.apache.hdt.feature/feature.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<feature
+      id="org.apache.hadoop.eclipse.feature"
+      label="Apache Hadoop Eclipse"
+      version="1.0.0.qualifier">
+
+   <description url="http://people.apache.org/~srimanth/hadoop-eclipse">
+      Apache Hadoop Eclipse feature provides useful Hadoop capabilities from the Eclipse platform.
+   </description>
+
+   <copyright url="http://www.apache.org/licenses/LICENSE-2.0">
+      Copyright 2013 Srimanth Gunturi
+
+Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+or implied. See the License for the specific language governing
+permissions and limitations under the License.
+   </copyright>
+
+   <license url="http://www.apache.org/licenses/LICENSE-2.0">
+      Copyright 2013 Srimanth Gunturi
+Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+or implied. See the License for the specific language governing
+permissions and limitations under the License.
+   </license>
+
+   <url>
+      <update label="Apache Hadoop Eclipse Update Site" url="http://people.apache.org/~srimanth/hadoop-eclipse/update-site"/>
+      <discovery label="Apache Hadoop Eclipse Update Site" url="http://people.apache.org/~srimanth/hadoop-eclipse/update-site/"/>
+      <discovery label="Apache Hadoop Eclipse WebSite" url="http://people.apache.org/~srimanth/hadoop-eclipse"/>
+   </url>
+
+   <plugin
+         id="org.apache.hadoop.eclipse"
+         download-size="0"
+         install-size="0"
+         version="0.0.0"
+         unpack="false"/>
+
+   <plugin
+         id="org.apache.hadoop.eclipse.release"
+         download-size="0"
+         install-size="0"
+         version="0.0.0"
+         fragment="true"
+         unpack="false"/>
+
+   <plugin
+         id="org.apache.hadoop.eclipse.ui"
+         download-size="0"
+         install-size="0"
+         version="0.0.0"
+         unpack="false"/>
+
+</feature>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/.classpath b/org.apache.hdt.hadoop.release/.classpath
new file mode 100644
index 0000000..178956b
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/.classpath
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/jline-0.9.94.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/log4j-1.2.15.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/netty-3.2.2.Final.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/slf4j-api-1.6.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/slf4j-log4j12-1.6.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/zookeeper-3.4.5.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/commons-configuration-1.6.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/commons-lang-2.4.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/commons-logging-1.1.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/commons-logging-api-1.0.4.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-ant-1.1.2.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-client-1.1.2.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-core-1.1.1.jar" sourcepath="/release-1.1.2-rc5"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-core-1.1.2.21.jar" sourcepath="/release-1.1.2-rc5/src"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-examples-1.1.2.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-minicluster-1.1.2.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-test-1.1.2.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-tools-1.1.2.21.jar"/>
+	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+	<classpathentry kind="src" path="src"/>
+	<classpathentry kind="output" path="bin"/>
+</classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/.project b/org.apache.hdt.hadoop.release/.project
new file mode 100644
index 0000000..1759ed6
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/.project
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>org.apache.hdt.hadoop.release</name>
+	<comment></comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.eclipse.jdt.core.javabuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.pde.ManifestBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.pde.SchemaBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.eclipse.pde.PluginNature</nature>
+		<nature>org.eclipse.jdt.core.javanature</nature>
+	</natures>
+</projectDescription>


Mime
View raw message