hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [2/2] hadoop git commit: HDFS-8218. Move classes that used by ClientProtocol into hdfs-client. Contributed by Haohui Mai.
Date Wed, 22 Apr 2015 18:33:39 GMT
HDFS-8218. Move classes that used by ClientProtocol into hdfs-client. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12f4df04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12f4df04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12f4df04

Branch: refs/heads/trunk
Commit: 12f4df043fb6922c6ce1c470a2e020b4111f8739
Parents: 252e69f
Author: Haohui Mai <wheat9@apache.org>
Authored: Wed Apr 22 09:10:12 2015 -0700
Committer: Haohui Mai <wheat9@apache.org>
Committed: Wed Apr 22 11:33:21 2015 -0700

----------------------------------------------------------------------
 .../dev-support/findbugsExcludeFile.xml         |   7 +
 .../java/org/apache/hadoop/fs/CacheFlag.java    |  44 ++
 .../main/java/org/apache/hadoop/fs/XAttr.java   | 163 ++++++
 .../org/apache/hadoop/hdfs/inotify/Event.java   | 545 +++++++++++++++++++
 .../apache/hadoop/hdfs/inotify/EventBatch.java  |  41 ++
 .../hadoop/hdfs/inotify/EventBatchList.java     |  63 +++
 .../protocol/AlreadyBeingCreatedException.java  |  37 ++
 .../hdfs/protocol/BlockStoragePolicy.java       | 271 +++++++++
 .../hadoop/hdfs/protocol/CorruptFileBlocks.java |  75 +++
 .../hdfs/protocol/DSQuotaExceededException.java |  51 ++
 .../hadoop/hdfs/protocol/DatanodeLocalInfo.java |  64 +++
 .../hadoop/hdfs/protocol/DirectoryListing.java  |  85 +++
 .../hadoop/hdfs/protocol/EncryptionZone.java    | 110 ++++
 .../hdfs/protocol/LastBlockWithStatus.java      |  46 ++
 .../hdfs/protocol/NSQuotaExceededException.java |  60 ++
 .../hdfs/protocol/QuotaExceededException.java   |  65 +++
 .../hdfs/protocol/RollingUpgradeInfo.java       | 139 +++++
 .../hdfs/protocol/RollingUpgradeStatus.java     |  66 +++
 .../SnapshotAccessControlException.java         |  33 ++
 .../security/token/block/DataEncryptionKey.java |  50 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../java/org/apache/hadoop/fs/CacheFlag.java    |  44 --
 .../main/java/org/apache/hadoop/fs/XAttr.java   | 163 ------
 .../org/apache/hadoop/hdfs/inotify/Event.java   | 545 -------------------
 .../apache/hadoop/hdfs/inotify/EventBatch.java  |  41 --
 .../hadoop/hdfs/inotify/EventBatchList.java     |  63 ---
 .../protocol/AlreadyBeingCreatedException.java  |  37 --
 .../hdfs/protocol/BlockStoragePolicy.java       | 271 ---------
 .../hadoop/hdfs/protocol/CorruptFileBlocks.java |  75 ---
 .../hdfs/protocol/DSQuotaExceededException.java |  51 --
 .../hadoop/hdfs/protocol/DatanodeLocalInfo.java |  64 ---
 .../hadoop/hdfs/protocol/DirectoryListing.java  |  85 ---
 .../hadoop/hdfs/protocol/EncryptionZone.java    | 110 ----
 .../hdfs/protocol/LastBlockWithStatus.java      |  46 --
 .../hdfs/protocol/NSQuotaExceededException.java |  60 --
 .../hdfs/protocol/QuotaExceededException.java   |  65 ---
 .../hdfs/protocol/RollingUpgradeInfo.java       | 139 -----
 .../hdfs/protocol/RollingUpgradeStatus.java     |  66 ---
 .../SnapshotAccessControlException.java         |  33 --
 .../security/token/block/DataEncryptionKey.java |  50 --
 40 files changed, 2018 insertions(+), 2008 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 478a931..7aade70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -1,9 +1,16 @@
 <FindBugsFilter>
   <Match>
     <Or>
+      <Class name="org.apache.hadoop.fs.XAttr"/>
+      <Class name="org.apache.hadoop.fs.XAttr$Builder"/>
+      <Class name="org.apache.hadoop.hdfs.inotify.EventBatch"/>
       <Class name="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"/>
       <Class name="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
+      <Class name="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy"/>
+      <Class name="org.apache.hadoop.hdfs.protocol.CorruptFileBlocks"/>
+      <Class name="org.apache.hadoop.hdfs.protocol.DirectoryListing"/>
       <Class name="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
+      <Class name="org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey"/>
     </Or>
     <Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />
   </Match>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/CacheFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/CacheFlag.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/CacheFlag.java
new file mode 100644
index 0000000..f76fcaa
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/CacheFlag.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Specifies semantics for CacheDirective operations. Multiple flags can
+ * be combined in an EnumSet.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public enum CacheFlag {
+
+  /**
+   * Ignore cache pool resource limits when performing this operation.
+   */
+  FORCE((short) 0x01);
+  private final short mode;
+
+  private CacheFlag(short mode) {
+    this.mode = mode;
+  }
+
+  short getMode() {
+    return mode;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
new file mode 100644
index 0000000..f688c91
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.util.Arrays;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * XAttr is the POSIX Extended Attribute model similar to that found in
+ * traditional Operating Systems.  Extended Attributes consist of one
+ * or more name/value pairs associated with a file or directory. Five
+ * namespaces are defined: user, trusted, security, system and raw.
+ *   1) USER namespace attributes may be used by any user to store
+ *   arbitrary information. Access permissions in this namespace are
+ *   defined by a file directory's permission bits. For sticky directories,
+ *   only the owner and privileged user can write attributes.
+ * <br>
+ *   2) TRUSTED namespace attributes are only visible and accessible to
+ *   privileged users. This namespace is available from both user space
+ *   (filesystem API) and fs kernel.
+ * <br>
+ *   3) SYSTEM namespace attributes are used by the fs kernel to store
+ *   system objects.  This namespace is only available in the fs
+ *   kernel. It is not visible to users.
+ * <br>
+ *   4) SECURITY namespace attributes are used by the fs kernel for
+ *   security features. It is not visible to users.
+ * <br>
+ *   5) RAW namespace attributes are used for internal system attributes that
+ *   sometimes need to be exposed. Like SYSTEM namespace attributes they are
+ *   not visible to the user except when getXAttr/getXAttrs is called on a file
+ *   or directory in the /.reserved/raw HDFS directory hierarchy.  These
+ *   attributes can only be accessed by the superuser.
+ * <p/>
+ * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+ * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+ *
+ */
+@InterfaceAudience.Private
+public class XAttr {
+
+  public static enum NameSpace {
+    USER,
+    TRUSTED,
+    SECURITY,
+    SYSTEM,
+    RAW;
+  }
+
+  private final NameSpace ns;
+  private final String name;
+  private final byte[] value;
+
+  public static class Builder {
+    private NameSpace ns = NameSpace.USER;
+    private String name;
+    private byte[] value;
+
+    public Builder setNameSpace(NameSpace ns) {
+      this.ns = ns;
+      return this;
+    }
+
+    public Builder setName(String name) {
+      this.name = name;
+      return this;
+    }
+
+    public Builder setValue(byte[] value) {
+      this.value = value;
+      return this;
+    }
+
+    public XAttr build() {
+      return new XAttr(ns, name, value);
+    }
+  }
+
+  private XAttr(NameSpace ns, String name, byte[] value) {
+    this.ns = ns;
+    this.name = name;
+    this.value = value;
+  }
+
+  public NameSpace getNameSpace() {
+    return ns;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public byte[] getValue() {
+    return value;
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(811, 67)
+        .append(name)
+        .append(ns)
+        .append(value)
+        .toHashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) { return false; }
+    if (obj == this) { return true; }
+    if (obj.getClass() != getClass()) {
+      return false;
+    }
+    XAttr rhs = (XAttr) obj;
+    return new EqualsBuilder()
+        .append(ns, rhs.ns)
+        .append(name, rhs.name)
+        .append(value, rhs.value)
+        .isEquals();
+  }
+
+  /**
+   * Similar to {@link #equals(Object)}, except ignores the XAttr value.
+   *
+   * @param obj to compare equality
+   * @return if the XAttrs are equal, ignoring the XAttr value
+   */
+  public boolean equalsIgnoreValue(Object obj) {
+    if (obj == null) { return false; }
+    if (obj == this) { return true; }
+    if (obj.getClass() != getClass()) {
+      return false;
+    }
+    XAttr rhs = (XAttr) obj;
+    return new EqualsBuilder()
+        .append(ns, rhs.ns)
+        .append(name, rhs.name)
+        .isEquals();
+  }
+
+  @Override
+  public String toString() {
+    return "XAttr [ns=" + ns + ", name=" + name + ", value="
+        + Arrays.toString(value) + "]";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
new file mode 100644
index 0000000..53eefa0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
@@ -0,0 +1,545 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.inotify;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import java.util.List;
+
+/**
+ * Events sent by the inotify system. Note that no events are necessarily sent
+ * when a file is opened for read (although a MetadataUpdateEvent will be sent
+ * if the atime is updated).
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public abstract class Event {
+  public static enum EventType {
+    CREATE, CLOSE, APPEND, RENAME, METADATA, UNLINK
+  }
+
+  private EventType eventType;
+
+  public EventType getEventType() {
+    return eventType;
+  }
+
+  public Event(EventType eventType) {
+    this.eventType = eventType;
+  }
+
+  /**
+   * Sent when a file is closed after append or create.
+   */
+  public static class CloseEvent extends Event {
+    private String path;
+    private long fileSize;
+    private long timestamp;
+
+    public CloseEvent(String path, long fileSize, long timestamp) {
+      super(EventType.CLOSE);
+      this.path = path;
+      this.fileSize = fileSize;
+      this.timestamp = timestamp;
+    }
+
+    public String getPath() {
+      return path;
+    }
+
+    /**
+     * The size of the closed file in bytes. May be -1 if the size is not
+     * available (e.g. in the case of a close generated by a concat operation).
+     */
+    public long getFileSize() {
+      return fileSize;
+    }
+
+    /**
+     * The time when this event occurred, in milliseconds since the epoch.
+     */
+    public long getTimestamp() {
+      return timestamp;
+    }
+  }
+
+  /**
+   * Sent when a new file is created (including overwrite).
+   */
+  public static class CreateEvent extends Event {
+
+    public static enum INodeType {
+      FILE, DIRECTORY, SYMLINK;
+    }
+
+    private INodeType iNodeType;
+    private String path;
+    private long ctime;
+    private int replication;
+    private String ownerName;
+    private String groupName;
+    private FsPermission perms;
+    private String symlinkTarget;
+    private boolean overwrite;
+    private long defaultBlockSize;
+
+    public static class Builder {
+      private INodeType iNodeType;
+      private String path;
+      private long ctime;
+      private int replication;
+      private String ownerName;
+      private String groupName;
+      private FsPermission perms;
+      private String symlinkTarget;
+      private boolean overwrite;
+      private long defaultBlockSize = 0;
+
+      public Builder iNodeType(INodeType type) {
+        this.iNodeType = type;
+        return this;
+      }
+
+      public Builder path(String path) {
+        this.path = path;
+        return this;
+      }
+
+      public Builder ctime(long ctime) {
+        this.ctime = ctime;
+        return this;
+      }
+
+      public Builder replication(int replication) {
+        this.replication = replication;
+        return this;
+      }
+
+      public Builder ownerName(String ownerName) {
+        this.ownerName = ownerName;
+        return this;
+      }
+
+      public Builder groupName(String groupName) {
+        this.groupName = groupName;
+        return this;
+      }
+
+      public Builder perms(FsPermission perms) {
+        this.perms = perms;
+        return this;
+      }
+
+      public Builder symlinkTarget(String symlinkTarget) {
+        this.symlinkTarget = symlinkTarget;
+        return this;
+      }
+
+      public Builder overwrite(boolean overwrite) {
+        this.overwrite = overwrite;
+        return this;
+      }
+
+      public Builder defaultBlockSize(long defaultBlockSize) {
+        this.defaultBlockSize = defaultBlockSize;
+        return this;
+      }
+
+      public CreateEvent build() {
+        return new CreateEvent(this);
+      }
+    }
+
+    private CreateEvent(Builder b) {
+      super(EventType.CREATE);
+      this.iNodeType = b.iNodeType;
+      this.path = b.path;
+      this.ctime = b.ctime;
+      this.replication = b.replication;
+      this.ownerName = b.ownerName;
+      this.groupName = b.groupName;
+      this.perms = b.perms;
+      this.symlinkTarget = b.symlinkTarget;
+      this.overwrite = b.overwrite;
+      this.defaultBlockSize = b.defaultBlockSize;
+    }
+
+    public INodeType getiNodeType() {
+      return iNodeType;
+    }
+
+    public String getPath() {
+      return path;
+    }
+
+    /**
+     * Creation time of the file, directory, or symlink.
+     */
+    public long getCtime() {
+      return ctime;
+    }
+
+    /**
+     * Replication is zero if the CreateEvent iNodeType is directory or symlink.
+     */
+    public int getReplication() {
+      return replication;
+    }
+
+    public String getOwnerName() {
+      return ownerName;
+    }
+
+    public String getGroupName() {
+      return groupName;
+    }
+
+    public FsPermission getPerms() {
+      return perms;
+    }
+
+    /**
+     * Symlink target is null if the CreateEvent iNodeType is not symlink.
+     */
+    public String getSymlinkTarget() {
+      return symlinkTarget;
+    }
+
+    public boolean getOverwrite() {
+      return overwrite;
+    }
+
+    public long getDefaultBlockSize() {
+      return defaultBlockSize;
+    }
+  }
+
+  /**
+   * Sent when there is an update to directory or file (none of the metadata
+   * tracked here applies to symlinks) that is not associated with another
+   * inotify event. The tracked metadata includes atime/mtime, replication,
+   * owner/group, permissions, ACLs, and XAttributes. Fields not relevant to the
+   * metadataType of the MetadataUpdateEvent will be null or will have their default
+   * values.
+   */
+  public static class MetadataUpdateEvent extends Event {
+
+    public static enum MetadataType {
+      TIMES, REPLICATION, OWNER, PERMS, ACLS, XATTRS;
+    }
+
+    private String path;
+    private MetadataType metadataType;
+    private long mtime;
+    private long atime;
+    private int replication;
+    private String ownerName;
+    private String groupName;
+    private FsPermission perms;
+    private List<AclEntry> acls;
+    private List<XAttr> xAttrs;
+    private boolean xAttrsRemoved;
+
+    public static class Builder {
+      private String path;
+      private MetadataType metadataType;
+      private long mtime;
+      private long atime;
+      private int replication;
+      private String ownerName;
+      private String groupName;
+      private FsPermission perms;
+      private List<AclEntry> acls;
+      private List<XAttr> xAttrs;
+      private boolean xAttrsRemoved;
+
+      public Builder path(String path) {
+        this.path = path;
+        return this;
+      }
+
+      public Builder metadataType(MetadataType type) {
+        this.metadataType = type;
+        return this;
+      }
+
+      public Builder mtime(long mtime) {
+        this.mtime = mtime;
+        return this;
+      }
+
+      public Builder atime(long atime) {
+        this.atime = atime;
+        return this;
+      }
+
+      public Builder replication(int replication) {
+        this.replication = replication;
+        return this;
+      }
+
+      public Builder ownerName(String ownerName) {
+        this.ownerName = ownerName;
+        return this;
+      }
+
+      public Builder groupName(String groupName) {
+        this.groupName = groupName;
+        return this;
+      }
+
+      public Builder perms(FsPermission perms) {
+        this.perms = perms;
+        return this;
+      }
+
+      public Builder acls(List<AclEntry> acls) {
+        this.acls = acls;
+        return this;
+      }
+
+      public Builder xAttrs(List<XAttr> xAttrs) {
+        this.xAttrs = xAttrs;
+        return this;
+      }
+
+      public Builder xAttrsRemoved(boolean xAttrsRemoved) {
+        this.xAttrsRemoved = xAttrsRemoved;
+        return this;
+      }
+
+      public MetadataUpdateEvent build() {
+        return new MetadataUpdateEvent(this);
+      }
+    }
+
+    private MetadataUpdateEvent(Builder b) {
+      super(EventType.METADATA);
+      this.path = b.path;
+      this.metadataType = b.metadataType;
+      this.mtime = b.mtime;
+      this.atime = b.atime;
+      this.replication = b.replication;
+      this.ownerName = b.ownerName;
+      this.groupName = b.groupName;
+      this.perms = b.perms;
+      this.acls = b.acls;
+      this.xAttrs = b.xAttrs;
+      this.xAttrsRemoved = b.xAttrsRemoved;
+    }
+
+    public String getPath() {
+      return path;
+    }
+
+    public MetadataType getMetadataType() {
+      return metadataType;
+    }
+
+    public long getMtime() {
+      return mtime;
+    }
+
+    public long getAtime() {
+      return atime;
+    }
+
+    public int getReplication() {
+      return replication;
+    }
+
+    public String getOwnerName() {
+      return ownerName;
+    }
+
+    public String getGroupName() {
+      return groupName;
+    }
+
+    public FsPermission getPerms() {
+      return perms;
+    }
+
+    /**
+     * The full set of ACLs currently associated with this file or directory.
+     * May be null if all ACLs were removed.
+     */
+    public List<AclEntry> getAcls() {
+      return acls;
+    }
+
+    public List<XAttr> getxAttrs() {
+      return xAttrs;
+    }
+
+    /**
+     * Whether the xAttrs returned by getxAttrs() were removed (as opposed to
+     * added).
+     */
+    public boolean isxAttrsRemoved() {
+      return xAttrsRemoved;
+    }
+
+  }
+
+  /**
+   * Sent when a file, directory, or symlink is renamed.
+   */
+  public static class RenameEvent extends Event {
+    private String srcPath;
+    private String dstPath;
+    private long timestamp;
+
+    public static class Builder {
+      private String srcPath;
+      private String dstPath;
+      private long timestamp;
+
+      public Builder srcPath(String srcPath) {
+        this.srcPath = srcPath;
+        return this;
+      }
+
+      public Builder dstPath(String dstPath) {
+        this.dstPath = dstPath;
+        return this;
+      }
+
+      public Builder timestamp(long timestamp) {
+        this.timestamp = timestamp;
+        return this;
+      }
+
+      public RenameEvent build() {
+        return new RenameEvent(this);
+      }
+    }
+
+    private RenameEvent(Builder builder) {
+      super(EventType.RENAME);
+      this.srcPath = builder.srcPath;
+      this.dstPath = builder.dstPath;
+      this.timestamp = builder.timestamp;
+    }
+
+    public String getSrcPath() {
+      return srcPath;
+    }
+
+    public String getDstPath() {
+      return dstPath;
+    }
+
+    /**
+     * The time when this event occurred, in milliseconds since the epoch.
+     */
+    public long getTimestamp() {
+      return timestamp;
+    }
+  }
+
+  /**
+   * Sent when an existing file is opened for append.
+   */
+  public static class AppendEvent extends Event {
+    private String path;
+    private boolean newBlock;
+
+    public static class Builder {
+      private String path;
+      private boolean newBlock;
+
+      public Builder path(String path) {
+        this.path = path;
+        return this;
+      }
+
+      public Builder newBlock(boolean newBlock) {
+        this.newBlock = newBlock;
+        return this;
+      }
+
+      public AppendEvent build() {
+        return new AppendEvent(this);
+      }
+    }
+
+    private AppendEvent(Builder b) {
+      super(EventType.APPEND);
+      this.path = b.path;
+      this.newBlock = b.newBlock;
+    }
+
+    public String getPath() {
+      return path;
+    }
+
+    public boolean toNewBlock() {
+      return newBlock;
+    }
+  }
+
+  /**
+   * Sent when a file, directory, or symlink is deleted.
+   */
+  public static class UnlinkEvent extends Event {
+    private String path;
+    private long timestamp;
+
+    public static class Builder {
+      private String path;
+      private long timestamp;
+
+      public Builder path(String path) {
+        this.path = path;
+        return this;
+      }
+
+      public Builder timestamp(long timestamp) {
+        this.timestamp = timestamp;
+        return this;
+      }
+
+      public UnlinkEvent build() {
+        return new UnlinkEvent(this);
+      }
+    }
+
+    private UnlinkEvent(Builder builder) {
+      super(EventType.UNLINK);
+      this.path = builder.path;
+      this.timestamp = builder.timestamp;
+    }
+
+    public String getPath() {
+      return path;
+    }
+
+    /**
+     * The time when this event occurred, in milliseconds since the epoch.
+     */
+    public long getTimestamp() {
+      return timestamp;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatch.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatch.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatch.java
new file mode 100644
index 0000000..0ad1070
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatch.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.inotify;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A batch of events that all happened on the same transaction ID.
+ */
+@InterfaceAudience.Public
+public class EventBatch {
+  private final long txid;
+  private final Event[] events;
+
+  public EventBatch(long txid, Event[] events) {
+    this.txid = txid;
+    this.events = events;
+  }
+
+  public long getTxid() {
+    return txid;
+  }
+
+  public Event[] getEvents() { return events; }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatchList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatchList.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatchList.java
new file mode 100644
index 0000000..9c97038
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatchList.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.inotify;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.util.List;
+
+/**
+ * Contains a list of event batches, the transaction ID in the edit log up to
+ * which we read to produce these events, and the first txid we observed when
+ * producing these events (the last of which is for the purpose of determining
+ * whether we have missed events due to edit deletion). Also contains the most
+ * recent txid that the NameNode has sync'ed, so the client can determine how
+ * far behind in the edit log it is.
+ */
+@InterfaceAudience.Private
+public class EventBatchList {
+  private List<EventBatch> batches;
+  private long firstTxid;
+  private long lastTxid;
+  private long syncTxid;
+
+  public EventBatchList(List<EventBatch> batches, long firstTxid,
+                         long lastTxid, long syncTxid) {
+    this.batches = batches;
+    this.firstTxid = firstTxid;
+    this.lastTxid = lastTxid;
+    this.syncTxid = syncTxid;
+  }
+
+  public List<EventBatch> getBatches() {
+    return batches;
+  }
+
+  public long getFirstTxid() {
+    return firstTxid;
+  }
+
+  public long getLastTxid() {
+    return lastTxid;
+  }
+
+  public long getSyncTxid() {
+    return syncTxid;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java
new file mode 100644
index 0000000..cfa4f10
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The exception that happens when you ask to create a file that already
+ * is being created, but is not closed yet.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class AlreadyBeingCreatedException extends IOException {
+  static final long serialVersionUID = 0x12308AD009L;
+  public AlreadyBeingCreatedException(String msg) {
+    super(msg);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
new file mode 100644
index 0000000..9ecf6e8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.LinkedList;
+import java.util.List;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.StorageType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A block storage policy describes how to select the storage types
+ * for the replicas of a block.
+ */
+@InterfaceAudience.Private
+public class BlockStoragePolicy {
+  public static final Logger LOG = LoggerFactory.getLogger(BlockStoragePolicy
+      .class);
+
+  /** A 4-bit policy ID */
+  private final byte id;
+  /** Policy name */
+  private final String name;
+
+  /** The storage types to store the replicas of a new block. */
+  private final StorageType[] storageTypes;
+  /** The fallback storage type for block creation. */
+  private final StorageType[] creationFallbacks;
+  /** The fallback storage type for replication. */
+  private final StorageType[] replicationFallbacks;
+  /**
+   * Whether the policy is inherited during file creation.
+   * If set then the policy cannot be changed after file creation.
+   */
+  private boolean copyOnCreateFile;
+
+  @VisibleForTesting
+  public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes,
+      StorageType[] creationFallbacks, StorageType[] replicationFallbacks) {
+    this(id, name, storageTypes, creationFallbacks, replicationFallbacks,
+         false);
+  }
+
+  @VisibleForTesting
+  public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes,
+      StorageType[] creationFallbacks, StorageType[] replicationFallbacks,
+      boolean copyOnCreateFile) {
+    this.id = id;
+    this.name = name;
+    this.storageTypes = storageTypes;
+    this.creationFallbacks = creationFallbacks;
+    this.replicationFallbacks = replicationFallbacks;
+    this.copyOnCreateFile = copyOnCreateFile;
+  }
+
+  /**
+   * @return a list of {@link StorageType}s for storing the replicas of a block.
+   */
+  public List<StorageType> chooseStorageTypes(final short replication) {
+    final List<StorageType> types = new LinkedList<StorageType>();
+    int i = 0, j = 0;
+
+    // Do not return transient storage types. We will not have accurate
+    // usage information for transient types.
+    for (;i < replication && j < storageTypes.length; ++j) {
+      if (!storageTypes[j].isTransient()) {
+        types.add(storageTypes[j]);
+        ++i;
+      }
+    }
+
+    final StorageType last = storageTypes[storageTypes.length - 1];
+    if (!last.isTransient()) {
+      for (; i < replication; i++) {
+        types.add(last);
+      }
+    }
+    return types;
+  }
+
+  /**
+   * Choose the storage types for storing the remaining replicas, given the
+   * replication number and the storage types of the chosen replicas.
+   *
+   * @param replication the replication number.
+   * @param chosen the storage types of the chosen replicas.
+   * @return a list of {@link StorageType}s for storing the replicas of a block.
+   */
+  public List<StorageType> chooseStorageTypes(final short replication,
+      final Iterable<StorageType> chosen) {
+    return chooseStorageTypes(replication, chosen, null);
+  }
+
+  private List<StorageType> chooseStorageTypes(final short replication,
+      final Iterable<StorageType> chosen, final List<StorageType> excess) {
+    final List<StorageType> types = chooseStorageTypes(replication);
+    diff(types, chosen, excess);
+    return types;
+  }
+
+  /**
+   * Choose the storage types for storing the remaining replicas, given the
+   * replication number, the storage types of the chosen replicas and
+   * the unavailable storage types. It uses fallback storage in case that
+   * the desired storage type is unavailable.
+   *
+   * @param replication the replication number.
+   * @param chosen the storage types of the chosen replicas.
+   * @param unavailables the unavailable storage types.
+   * @param isNewBlock Is it for new block creation?
+   * @return a list of {@link StorageType}s for storing the replicas of a block.
+   */
+  public List<StorageType> chooseStorageTypes(final short replication,
+      final Iterable<StorageType> chosen,
+      final EnumSet<StorageType> unavailables,
+      final boolean isNewBlock) {
+    final List<StorageType> excess = new LinkedList<StorageType>();
+    final List<StorageType> storageTypes = chooseStorageTypes(
+        replication, chosen, excess);
+    final int expectedSize = storageTypes.size() - excess.size();
+    final List<StorageType> removed = new LinkedList<StorageType>();
+    for(int i = storageTypes.size() - 1; i >= 0; i--) {
+      // replace/remove unavailable storage types.
+      final StorageType t = storageTypes.get(i);
+      if (unavailables.contains(t)) {
+        final StorageType fallback = isNewBlock?
+            getCreationFallback(unavailables)
+            : getReplicationFallback(unavailables);
+        if (fallback == null) {
+          removed.add(storageTypes.remove(i));
+        } else {
+          storageTypes.set(i, fallback);
+        }
+      }
+    }
+    // remove excess storage types after fallback replacement.
+    diff(storageTypes, excess, null);
+    if (storageTypes.size() < expectedSize) {
+      LOG.warn("Failed to place enough replicas: expected size is " + expectedSize
+          + " but only " + storageTypes.size() + " storage types can be selected "
+          + "(replication=" + replication
+          + ", selected=" + storageTypes
+          + ", unavailable=" + unavailables
+          + ", removed=" + removed
+          + ", policy=" + this + ")");
+    }
+    return storageTypes;
+  }
+
+  /**
+   * Compute the difference between two lists t and c so that after the diff
+   * computation we have: t = t - c;
+   * Further, if e is not null, set e = e + c - t;
+   */
+  private static void diff(List<StorageType> t, Iterable<StorageType> c,
+      List<StorageType> e) {
+    for(StorageType storagetype : c) {
+      final int i = t.indexOf(storagetype);
+      if (i >= 0) {
+        t.remove(i);
+      } else if (e != null) {
+        e.add(storagetype);
+      }
+    }
+  }
+
+  /**
+   * Choose excess storage types for deletion, given the
+   * replication number and the storage types of the chosen replicas.
+   *
+   * @param replication the replication number.
+   * @param chosen the storage types of the chosen replicas.
+   * @return a list of {@link StorageType}s for deletion.
+   */
+  public List<StorageType> chooseExcess(final short replication,
+      final Iterable<StorageType> chosen) {
+    final List<StorageType> types = chooseStorageTypes(replication);
+    final List<StorageType> excess = new LinkedList<StorageType>();
+    diff(types, chosen, excess);
+    return excess;
+  }
+
+  /** @return the fallback {@link StorageType} for creation. */
+  public StorageType getCreationFallback(EnumSet<StorageType> unavailables) {
+    return getFallback(unavailables, creationFallbacks);
+  }
+
+  /** @return the fallback {@link StorageType} for replication. */
+  public StorageType getReplicationFallback(EnumSet<StorageType> unavailables) {
+    return getFallback(unavailables, replicationFallbacks);
+  }
+
+  @Override
+  public int hashCode() {
+    return Byte.valueOf(id).hashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == this) {
+      return true;
+    } else if (obj == null || !(obj instanceof BlockStoragePolicy)) {
+      return false;
+    }
+    final BlockStoragePolicy that = (BlockStoragePolicy)obj;
+    return this.id == that.id;
+  }
+
+  @Override
+  public String toString() {
+    return getClass().getSimpleName() + "{" + name + ":" + id
+        + ", storageTypes=" + Arrays.asList(storageTypes)
+        + ", creationFallbacks=" + Arrays.asList(creationFallbacks)
+        + ", replicationFallbacks=" + Arrays.asList(replicationFallbacks) + "}";
+  }
+
+  public byte getId() {
+    return id;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public StorageType[] getStorageTypes() {
+    return this.storageTypes;
+  }
+
+  public StorageType[] getCreationFallbacks() {
+    return this.creationFallbacks;
+  }
+
+  public StorageType[] getReplicationFallbacks() {
+    return this.replicationFallbacks;
+  }
+
+  private static StorageType getFallback(EnumSet<StorageType> unavailables,
+      StorageType[] fallbacks) {
+    for(StorageType fb : fallbacks) {
+      if (!unavailables.contains(fb)) {
+        return fb;
+      }
+    }
+    return null;
+  }
+
+  public boolean isCopyOnCreateFile() {
+    return copyOnCreateFile;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java
new file mode 100644
index 0000000..1b5dbd3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.util.Arrays;
+
+/**
+ * Contains a list of paths corresponding to corrupt files and a cookie
+ * used for iterative calls to NameNode.listCorruptFileBlocks.
+ *
+ */
+public class CorruptFileBlocks {
+  // used for hashCode
+  private static final int PRIME = 16777619;
+
+  private final String[] files;
+  private final String cookie;
+
+  public CorruptFileBlocks() {
+    this(new String[0], "");
+  }
+
+  public CorruptFileBlocks(String[] files, String cookie) {
+    this.files = files;
+    this.cookie = cookie;
+  }
+
+  public String[] getFiles() {
+    return files;
+  }
+
+  public String getCookie() {
+    return cookie;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (!(obj instanceof CorruptFileBlocks)) {
+      return false;
+    }
+    CorruptFileBlocks other = (CorruptFileBlocks) obj;
+    return cookie.equals(other.cookie) &&
+      Arrays.equals(files, other.files);
+  }
+
+
+  @Override
+  public int hashCode() {
+    int result = cookie.hashCode();
+
+    for (String file : files) {
+      result = PRIME * result + file.hashCode();
+    }
+
+    return result;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
new file mode 100644
index 0000000..481c130
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DSQuotaExceededException extends QuotaExceededException {
+  protected static final long serialVersionUID = 1L;
+
+  public DSQuotaExceededException() {}
+
+  public DSQuotaExceededException(String msg) {
+    super(msg);
+  }
+
+  public DSQuotaExceededException(long quota, long count) {
+    super(quota, count);
+  }
+
+  @Override
+  public String getMessage() {
+    String msg = super.getMessage();
+    if (msg == null) {
+      return "The DiskSpace quota" + (pathName==null?"": " of " + pathName)
+          + " is exceeded: quota = " + quota + " B = " + long2String(quota, "B", 2)
+          + " but diskspace consumed = " + count + " B = " + long2String(count, "B", 2);
+    } else {
+      return msg;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
new file mode 100644
index 0000000..b7b2289
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+
+/**
+ * Locally available datanode information
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DatanodeLocalInfo {
+  private final String softwareVersion;
+  private final String configVersion;
+  private final long uptime; // datanode uptime in seconds.
+
+  public DatanodeLocalInfo(String softwareVersion,
+      String configVersion, long uptime) {
+    this.softwareVersion = softwareVersion;
+    this.configVersion = configVersion;
+    this.uptime = uptime;
+  }
+
+  /** get software version */
+  public String getSoftwareVersion() {
+    return this.softwareVersion;
+  }
+
+  /** get config version */
+  public String getConfigVersion() {
+    return this.configVersion;
+  }
+
+  /** get uptime */
+  public long getUptime() {
+    return this.uptime;
+  }
+
+  /** A formatted string for printing the status of the DataNode. */
+  public String getDatanodeLocalReport() {
+    StringBuilder buffer = new StringBuilder();
+    buffer.append("Uptime: " + getUptime());
+    buffer.append(", Software version: " + getSoftwareVersion());
+    buffer.append(", Config version: " + getConfigVersion());
+    return buffer.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java
new file mode 100644
index 0000000..23802ad
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java
@@ -0,0 +1,85 @@
+/* Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This class defines a partial listing of a directory to support
+ * iterative directory listing.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DirectoryListing {
+  private HdfsFileStatus[] partialListing;
+  private int remainingEntries;
+
+  /**
+   * constructor
+   * @param partialListing a partial listing of a directory
+   * @param remainingEntries number of entries that are left to be listed
+   */
+  public DirectoryListing(HdfsFileStatus[] partialListing,
+      int remainingEntries) {
+    if (partialListing == null) {
+      throw new IllegalArgumentException("partial listing should not be null");
+    }
+    if (partialListing.length == 0 && remainingEntries != 0) {
+      throw new IllegalArgumentException("Partial listing is empty but " +
+          "the number of remaining entries is not zero");
+    }
+    this.partialListing = partialListing;
+    this.remainingEntries = remainingEntries;
+  }
+
+  /**
+   * Get the partial listing of file status
+   * @return the partial listing of file status
+   */
+  public HdfsFileStatus[] getPartialListing() {
+    return partialListing;
+  }
+
+  /**
+   * Get the number of remaining entries that are left to be listed
+   * @return the number of remaining entries that are left to be listed
+   */
+  public int getRemainingEntries() {
+    return remainingEntries;
+  }
+
+  /**
+   * Check if there are more entries that are left to be listed
+   * @return true if there are more entries that are left to be listed;
+   *         return false otherwise.
+   */
+  public boolean hasMore() {
+    return remainingEntries != 0;
+  }
+
+  /**
+   * Get the last name in this list
+   * @return the last name in the list if it is not empty; otherwise return null
+   */
+  public byte[] getLastName() {
+    if (partialListing.length == 0) {
+      return null;
+    }
+    return partialListing[partialListing.length-1].getLocalNameInBytes();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
new file mode 100644
index 0000000..f1441b5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+
+/**
+ * A simple class for representing an encryption zone. Presently an encryption
+ * zone only has a path (the root of the encryption zone), a key name, and a
+ * unique id. The id is used to implement batched listing of encryption zones.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class EncryptionZone {
+
+  private final long id;
+  private final String path;
+  private final CipherSuite suite;
+  private final CryptoProtocolVersion version;
+  private final String keyName;
+
+  public EncryptionZone(long id, String path, CipherSuite suite,
+      CryptoProtocolVersion version, String keyName) {
+    this.id = id;
+    this.path = path;
+    this.suite = suite;
+    this.version = version;
+    this.keyName = keyName;
+  }
+
+  public long getId() {
+    return id;
+  }
+
+  public String getPath() {
+    return path;
+  }
+
+  public CipherSuite getSuite() {
+    return suite;
+  }
+
+  public CryptoProtocolVersion getVersion() { return version; }
+
+  public String getKeyName() {
+    return keyName;
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(13, 31)
+        .append(id)
+        .append(path)
+        .append(suite)
+        .append(version)
+        .append(keyName).
+      toHashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) {
+      return false;
+    }
+    if (obj == this) {
+      return true;
+    }
+    if (obj.getClass() != getClass()) {
+      return false;
+    }
+
+    EncryptionZone rhs = (EncryptionZone) obj;
+    return new EqualsBuilder().
+      append(id, rhs.id).
+      append(path, rhs.path).
+      append(suite, rhs.suite).
+      append(version, rhs.version).
+      append(keyName, rhs.keyName).
+      isEquals();
+  }
+
+  @Override
+  public String toString() {
+    return "EncryptionZone [id=" + id +
+        ", path=" + path +
+        ", suite=" + suite +
+        ", version=" + version +
+        ", keyName=" + keyName + "]";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
new file mode 100644
index 0000000..1cd80f9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Class to contain Lastblock and HdfsFileStatus for the Append operation
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class LastBlockWithStatus {
+
+  private final LocatedBlock lastBlock;
+
+  private final HdfsFileStatus fileStatus;
+
+  public LastBlockWithStatus(LocatedBlock lastBlock, HdfsFileStatus fileStatus) {
+    this.lastBlock = lastBlock;
+    this.fileStatus = fileStatus;
+  }
+
+  public LocatedBlock getLastBlock() {
+    return lastBlock;
+  }
+
+  public HdfsFileStatus getFileStatus() {
+    return fileStatus;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
new file mode 100644
index 0000000..eeedd5a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class NSQuotaExceededException extends QuotaExceededException {
+  protected static final long serialVersionUID = 1L;
+
+  private String prefix;
+
+  public NSQuotaExceededException() {}
+
+  public NSQuotaExceededException(String msg) {
+    super(msg);
+  }
+
+  public NSQuotaExceededException(long quota, long count) {
+    super(quota, count);
+  }
+
+  @Override
+  public String getMessage() {
+    String msg = super.getMessage();
+    if (msg == null) {
+      msg = "The NameSpace quota (directories and files)" +
+      (pathName==null?"":(" of directory " + pathName)) +
+          " is exceeded: quota=" + quota + " file count=" + count;
+
+      if (prefix != null) {
+        msg = prefix + ": " + msg;
+      }
+    }
+    return msg;
+  }
+
+  /** Set a prefix for the error message. */
+  public void setMessagePrefix(final String prefix) {
+    this.prefix = prefix;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
new file mode 100644
index 0000000..f4e7f34
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This exception is thrown when modification to HDFS results in violation
+ * of a directory quota. A directory quota might be namespace quota (limit
+ * on number of files and directories) or a diskspace quota (limit on space
+ * taken by all the file under the directory tree). <br> <br>
+ *
+ * The message for the exception specifies the directory where the quota
+ * was violated and actual quotas. Specific message is generated in the
+ * corresponding Exception class:
+ *  DSQuotaExceededException or
+ *  NSQuotaExceededException
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class QuotaExceededException extends IOException {
+  protected static final long serialVersionUID = 1L;
+  protected String pathName=null;
+  protected long quota; // quota
+  protected long count; // actual value
+
+  protected QuotaExceededException() {}
+
+  protected QuotaExceededException(String msg) {
+    super(msg);
+  }
+
+  protected QuotaExceededException(long quota, long count) {
+    this.quota = quota;
+    this.count = count;
+  }
+
+  public void setPathName(String path) {
+    this.pathName = path;
+  }
+
+  @Override
+  public String getMessage() {
+    return super.getMessage();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
new file mode 100644
index 0000000..b527797
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.util.Date;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Rolling upgrade information
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class RollingUpgradeInfo extends RollingUpgradeStatus {
+  private final long startTime;
+  private long finalizeTime;
+  private boolean createdRollbackImages;
+
+  public RollingUpgradeInfo(String blockPoolId, boolean createdRollbackImages,
+      long startTime, long finalizeTime) {
+    super(blockPoolId, finalizeTime != 0);
+    this.createdRollbackImages = createdRollbackImages;
+    this.startTime = startTime;
+    this.finalizeTime = finalizeTime;
+  }
+
+  public boolean createdRollbackImages() {
+    return createdRollbackImages;
+  }
+
+  public void setCreatedRollbackImages(boolean created) {
+    this.createdRollbackImages = created;
+  }
+
+  public boolean isStarted() {
+    return startTime != 0;
+  }
+
+  /** @return The rolling upgrade starting time. */
+  public long getStartTime() {
+    return startTime;
+  }
+
+  @Override
+  public boolean isFinalized() {
+    return finalizeTime != 0;
+  }
+
+  /**
+   * Finalize the upgrade if not already finalized
+   * @param finalizeTime
+   */
+  public void finalize(long finalizeTime) {
+    if (finalizeTime != 0) {
+      this.finalizeTime = finalizeTime;
+      createdRollbackImages = false;
+    }
+  }
+
+  public long getFinalizeTime() {
+    return finalizeTime;
+  }
+
+  @Override
+  public int hashCode() {
+    //only use lower 32 bits
+    return super.hashCode() ^ (int)startTime ^ (int)finalizeTime;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == this) {
+      return true;
+    } else if (obj == null || !(obj instanceof RollingUpgradeInfo)) {
+      return false;
+    }
+    final RollingUpgradeInfo that = (RollingUpgradeInfo)obj;
+    return super.equals(that)
+        && this.startTime == that.startTime
+        && this.finalizeTime == that.finalizeTime;
+  }
+
+  @Override
+  public String toString() {
+    return super.toString()
+      +  "\n     Start Time: " + (startTime == 0? "<NOT STARTED>": timestamp2String(startTime))
+      +  "\n  Finalize Time: " + (finalizeTime == 0? "<NOT FINALIZED>": timestamp2String(finalizeTime));
+  }
+
+  private static String timestamp2String(long timestamp) {
+    return new Date(timestamp) + " (=" + timestamp + ")";
+  }
+
+  public static class Bean {
+    private final String blockPoolId;
+    private final long startTime;
+    private final long finalizeTime;
+    private final boolean createdRollbackImages;
+
+    public Bean(RollingUpgradeInfo f) {
+      this.blockPoolId = f.getBlockPoolId();
+      this.startTime = f.startTime;
+      this.finalizeTime = f.finalizeTime;
+      this.createdRollbackImages = f.createdRollbackImages();
+    }
+
+    public String getBlockPoolId() {
+      return blockPoolId;
+    }
+
+    public long getStartTime() {
+      return startTime;
+    }
+
+    public long getFinalizeTime() {
+      return finalizeTime;
+    }
+
+    public boolean isCreatedRollbackImages() {
+      return createdRollbackImages;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java
new file mode 100644
index 0000000..1f969fb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Rolling upgrade status
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class RollingUpgradeStatus {
+  private final String blockPoolId;
+  private final boolean finalized;
+
+  public RollingUpgradeStatus(String blockPoolId, boolean finalized) {
+    this.blockPoolId = blockPoolId;
+    this.finalized = finalized;
+  }
+
+  public String getBlockPoolId() {
+    return blockPoolId;
+  }
+
+  public boolean isFinalized() {
+    return finalized;
+  }
+
+  @Override
+  public int hashCode() {
+    return blockPoolId.hashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == this) {
+      return true;
+    } else if (obj == null || !(obj instanceof RollingUpgradeStatus)) {
+      return false;
+    }
+    final RollingUpgradeStatus that = (RollingUpgradeStatus) obj;
+    return this.blockPoolId.equals(that.blockPoolId)
+        && this.isFinalized() == that.isFinalized();
+  }
+
+  @Override
+  public String toString() {
+    return "  Block Pool ID: " + blockPoolId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotAccessControlException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotAccessControlException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotAccessControlException.java
new file mode 100644
index 0000000..2def72f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotAccessControlException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.security.AccessControlException;
+
+/** Snapshot access related exception. */
+public class SnapshotAccessControlException extends AccessControlException {
+  private static final long serialVersionUID = 1L;
+
+  public SnapshotAccessControlException(final String message) {
+    super(message);
+  }
+
+  public SnapshotAccessControlException(final Throwable cause) {
+    super(cause);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/DataEncryptionKey.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/DataEncryptionKey.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/DataEncryptionKey.java
new file mode 100644
index 0000000..87411f2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/DataEncryptionKey.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.security.token.block;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A little struct class to contain all fields required to perform encryption of
+ * the DataTransferProtocol.
+ */
+@InterfaceAudience.Private
+public class DataEncryptionKey {
+  public final int keyId;
+  public final String blockPoolId;
+  public final byte[] nonce;
+  public final byte[] encryptionKey;
+  public final long expiryDate;
+  public final String encryptionAlgorithm;
+
+  public DataEncryptionKey(int keyId, String blockPoolId, byte[] nonce,
+      byte[] encryptionKey, long expiryDate, String encryptionAlgorithm) {
+    this.keyId = keyId;
+    this.blockPoolId = blockPoolId;
+    this.nonce = nonce;
+    this.encryptionKey = encryptionKey;
+    this.expiryDate = expiryDate;
+    this.encryptionAlgorithm = encryptionAlgorithm;
+  }
+
+  @Override
+  public String toString() {
+    return keyId + "/" + blockPoolId + "/" + nonce.length + "/" +
+        encryptionKey.length;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 03c5228..521315b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -458,6 +458,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8185. Separate client related routines in HAUtil into a new class.
     (wheat9)
 
+    HDFS-8218. Move classes that used by ClientProtocol into hdfs-client.
+    (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/CacheFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/CacheFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/CacheFlag.java
deleted file mode 100644
index f76fcaa..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/CacheFlag.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Specifies semantics for CacheDirective operations. Multiple flags can
- * be combined in an EnumSet.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public enum CacheFlag {
-
-  /**
-   * Ignore cache pool resource limits when performing this operation.
-   */
-  FORCE((short) 0x01);
-  private final short mode;
-
-  private CacheFlag(short mode) {
-    this.mode = mode;
-  }
-
-  short getMode() {
-    return mode;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f4df04/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
deleted file mode 100644
index 968ee00..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs;
-
-import java.util.Arrays;
-
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * XAttr is the POSIX Extended Attribute model similar to that found in
- * traditional Operating Systems.  Extended Attributes consist of one
- * or more name/value pairs associated with a file or directory. Five
- * namespaces are defined: user, trusted, security, system and raw.
- *   1) USER namespace attributes may be used by any user to store
- *   arbitrary information. Access permissions in this namespace are
- *   defined by a file directory's permission bits. For sticky directories,
- *   only the owner and privileged user can write attributes.
- * <br>
- *   2) TRUSTED namespace attributes are only visible and accessible to
- *   privileged users. This namespace is available from both user space
- *   (filesystem API) and fs kernel.
- * <br>
- *   3) SYSTEM namespace attributes are used by the fs kernel to store
- *   system objects.  This namespace is only available in the fs
- *   kernel. It is not visible to users.
- * <br>
- *   4) SECURITY namespace attributes are used by the fs kernel for
- *   security features. It is not visible to users.
- * <br>
- *   5) RAW namespace attributes are used for internal system attributes that
- *   sometimes need to be exposed. Like SYSTEM namespace attributes they are
- *   not visible to the user except when getXAttr/getXAttrs is called on a file
- *   or directory in the /.reserved/raw HDFS directory hierarchy.  These
- *   attributes can only be accessed by the superuser.
- * <p/>
- * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
- * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
- *
- */
-@InterfaceAudience.Private
-public class XAttr {
-
-  public static enum NameSpace {
-    USER,
-    TRUSTED,
-    SECURITY,
-    SYSTEM,
-    RAW;
-  }
-  
-  private final NameSpace ns;
-  private final String name;
-  private final byte[] value;
-  
-  public static class Builder {
-    private NameSpace ns = NameSpace.USER;
-    private String name;
-    private byte[] value;
-    
-    public Builder setNameSpace(NameSpace ns) {
-      this.ns = ns;
-      return this;
-    }
-    
-    public Builder setName(String name) {
-      this.name = name;
-      return this;
-    }
-    
-    public Builder setValue(byte[] value) {
-      this.value = value;
-      return this;
-    }
-    
-    public XAttr build() {
-      return new XAttr(ns, name, value);
-    }
-  }
-  
-  private XAttr(NameSpace ns, String name, byte[] value) {
-    this.ns = ns;
-    this.name = name;
-    this.value = value;
-  }
-  
-  public NameSpace getNameSpace() {
-    return ns;
-  }
-  
-  public String getName() {
-    return name;
-  }
-  
-  public byte[] getValue() {
-    return value;
-  }
-  
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(811, 67)
-        .append(name)
-        .append(ns)
-        .append(value)
-        .toHashCode();
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) { return false; }
-    if (obj == this) { return true; }
-    if (obj.getClass() != getClass()) {
-      return false;
-    }
-    XAttr rhs = (XAttr) obj;
-    return new EqualsBuilder()
-        .append(ns, rhs.ns)
-        .append(name, rhs.name)
-        .append(value, rhs.value)
-        .isEquals();
-  }
-
-  /**
-   * Similar to {@link #equals(Object)}, except ignores the XAttr value.
-   *
-   * @param obj to compare equality
-   * @return if the XAttrs are equal, ignoring the XAttr value
-   */
-  public boolean equalsIgnoreValue(Object obj) {
-    if (obj == null) { return false; }
-    if (obj == this) { return true; }
-    if (obj.getClass() != getClass()) {
-      return false;
-    }
-    XAttr rhs = (XAttr) obj;
-    return new EqualsBuilder()
-        .append(ns, rhs.ns)
-        .append(name, rhs.name)
-        .isEquals();
-  }
-
-  @Override
-  public String toString() {
-    return "XAttr [ns=" + ns + ", name=" + name + ", value="
-        + Arrays.toString(value) + "]";
-  }
-}


Mime
View raw message