hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aengin...@apache.org
Subject [12/51] [partial] hadoop git commit: HDFS-13258. Ozone: restructure Hdsl/Ozone code to separated maven subprojects. Contributed by Elek Marton, Mukul Kumar Singh, Xiaoyu Yao, Ajay Kumar, Anu Engineer, Lokesh Jain, Nanda Kumar.
Date Thu, 15 Mar 2018 16:32:27 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java
deleted file mode 100644
index 4fbc18f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * Netty-based HTTP server implementation for Ozone.
- */
-package org.apache.hadoop.ozone.web.request;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
deleted file mode 100644
index e66cd20..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-import com.google.common.base.Preconditions;
-
-/**
- * BucketInfo class, this is used as response class to send
- * Json info about a bucket back to a client.
- */
-public class BucketInfo implements Comparable<BucketInfo> {
-  static final String BUCKET_INFO = "BUCKET_INFO_FILTER";
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(BucketInfo.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"bytesUsed", "keyCount"};
-
-    FilterProvider filters = new SimpleFilterProvider().addFilter(BUCKET_INFO,
-        SimpleBeanPropertyFilter.serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  private String volumeName;
-  private String bucketName;
-  private String createdOn;
-  private List<OzoneAcl> acls;
-  private OzoneConsts.Versioning versioning;
-  private StorageType storageType;
-  private long bytesUsed;
-  private long keyCount;
-
-  /**
-   * Constructor for BucketInfo.
-   *
-   * @param volumeName
-   * @param bucketName
-   */
-  public BucketInfo(String volumeName, String bucketName) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-  }
-
-
-  /**
-   * Default constructor for BucketInfo.
-   */
-  public BucketInfo() {
-    acls = new LinkedList<OzoneAcl>();
-  }
-
-  /**
-   * Parse a JSON string into BucketInfo Object.
-   *
-   * @param jsonString - Json String
-   *
-   * @return - BucketInfo
-   *
-   * @throws IOException
-   */
-  public static BucketInfo parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-  /**
-   * Returns a List of ACL on the Bucket.
-   *
-   * @return List of Acls
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  /**
-   * Sets ACls.
-   *
-   * @param acls - Acls list
-   */
-  public void setAcls(List<OzoneAcl> acls) {
-    this.acls = acls;
-  }
-
-  /**
-   * Returns Storage Type info.
-   *
-   * @return Storage Type of the bucket
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Sets the Storage Type.
-   *
-   * @param storageType - Storage Type
-   */
-  public void setStorageType(StorageType storageType) {
-    this.storageType = storageType;
-  }
-
-  /**
-   * Returns versioning.
-   *
-   * @return versioning Enum
-   */
-  public OzoneConsts.Versioning getVersioning() {
-    return versioning;
-  }
-
-  /**
-   * Sets Versioning.
-   *
-   * @param versioning
-   */
-  public void setVersioning(OzoneConsts.Versioning versioning) {
-    this.versioning = versioning;
-  }
-
-
-  /**
-   * Gets bucket Name.
-   *
-   * @return String
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Sets bucket Name.
-   *
-   * @param bucketName - Name of the bucket
-   */
-  public void setBucketName(String bucketName) {
-    this.bucketName = bucketName;
-  }
-
-  /**
-   * Sets creation time of the bucket.
-   *
-   * @param creationTime - Date String
-   */
-  public void setCreatedOn(String creationTime) {
-    this.createdOn = creationTime;
-  }
-
-  /**
-   * Returns creation time.
-   *
-   * @return creation time of bucket.
-   */
-  public String getCreatedOn() {
-    return createdOn;
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   * After stripping out bytesUsed and keyCount
-   *
-   * @return String
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Returns the Object as a Json String.
-   *
-   * The reason why both toJSONString exists and toDBString exists
-   * is because toJSONString supports an external facing contract with
-   * REST clients. However server internally would want to add more
-   * fields to this class. The distinction helps in serializing all
-   * fields vs. only fields that are part of REST protocol.
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-
-  /**
-   * Returns Volume Name.
-   *
-   * @return String volume name
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Sets the Volume Name of the bucket.
-   *
-   * @param volumeName - volumeName
-   */
-  public void setVolumeName(String volumeName) {
-    this.volumeName = volumeName;
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less
-   * than, equal to, or greater than the specified object.
-   *
-   * Please note : BucketInfo compare functions are used only within the
-   * context of a volume, hence volume name is purposefully ignored in
-   * compareTo, equal and hashcode functions of this class.
-   */
-  @Override
-  public int compareTo(BucketInfo o) {
-    Preconditions.checkState(o.getVolumeName().equals(this.getVolumeName()));
-    return this.bucketName.compareTo(o.getBucketName());
-  }
-
-  /**
-   * Checks if two bucketInfo's are equal.
-   * @param o Object BucketInfo
-   * @return  True or False
-   */
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (!(o instanceof BucketInfo)) {
-      return false;
-    }
-
-    BucketInfo that = (BucketInfo) o;
-    Preconditions.checkState(that.getVolumeName().equals(this.getVolumeName()));
-    return bucketName.equals(that.bucketName);
-
-  }
-
-  /**
-   * Hash Code for this object.
-   * @return int
-   */
-  @Override
-  public int hashCode() {
-    return bucketName.hashCode();
-  }
-
-  /**
-   * Get the number of bytes used by this bucket.
-   *
-   * @return long
-   */
-  public long getBytesUsed() {
-    return bytesUsed;
-  }
-
-  /**
-   * Set bytes Used.
-   *
-   * @param bytesUsed - bytesUsed
-   */
-  public void setBytesUsed(long bytesUsed) {
-    this.bytesUsed = bytesUsed;
-  }
-
-  /**
-   * Get Key Count  inside this bucket.
-   *
-   * @return - KeyCount
-   */
-  public long getKeyCount() {
-    return keyCount;
-  }
-
-  /**
-   * Set Key Count inside this bucket.
-   *
-   * @param keyCount - Sets the Key Count
-   */
-  public void setKeyCount(long keyCount) {
-    this.keyCount = keyCount;
-  }
-
-  /**
-   * This class allows us to create custom filters
-   * for the Json serialization.
-   */
-  @JsonFilter(BUCKET_INFO)
-  class MixIn {
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
deleted file mode 100644
index 34885f6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-import java.io.IOException;
-
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-
-/**
- * Represents an Ozone key Object.
- */
-public class KeyInfo implements Comparable<KeyInfo> {
-  static final String OBJECT_INFO = "OBJECT_INFO_FILTER";
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(KeyInfo.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"dataFileName"};
-
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(OBJECT_INFO, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  /**
-   * This class allows us to create custom filters
-   * for the Json serialization.
-   */
-  @JsonFilter(OBJECT_INFO)
-  class MixIn {
-
-  }
-  private long version;
-  private String md5hash;
-  private String createdOn;
-  private String modifiedOn;
-  private long size;
-  private String keyName;
-
-  private String dataFileName;
-
-  /**
-   * When this key was created.
-   *
-   * @return Date String
-   */
-  public String getCreatedOn() {
-    return createdOn;
-  }
-
-  /**
-   * When this key was modified.
-   *
-   * @return Date String
-   */
-  public String getModifiedOn() {
-    return modifiedOn;
-  }
-
-  /**
-   * When this key was created.
-   *
-   * @param createdOn - Date String
-   */
-  public void setCreatedOn(String createdOn) {
-    this.createdOn = createdOn;
-  }
-
-  /**
-   * When this key was modified.
-   *
-   * @param modifiedOn - Date String
-   */
-  public void setModifiedOn(String modifiedOn) {
-    this.modifiedOn = modifiedOn;
-  }
-
-  /**
-   * Full path to where the actual data for this key is stored.
-   *
-   * @return String
-   */
-  public String getDataFileName() {
-    return dataFileName;
-  }
-
-  /**
-   * Sets up where the file path is stored.
-   *
-   * @param dataFileName - Data File Name
-   */
-  public void setDataFileName(String dataFileName) {
-    this.dataFileName = dataFileName;
-  }
-
-  /**
-   * Gets the Keyname of this object.
-   *
-   * @return String
-   */
-  public String getKeyName() {
-    return keyName;
-  }
-
-  /**
-   * Sets the Key name of this object.
-   *
-   * @param keyName - String
-   */
-  public void setKeyName(String keyName) {
-    this.keyName = keyName;
-  }
-
-  /**
-   * Returns the MD5 Hash for the data of this key.
-   *
-   * @return String MD5
-   */
-  public String getMd5hash() {
-    return md5hash;
-  }
-
-  /**
-   * Sets the MD5 of this file.
-   *
-   * @param md5hash - Md5 of this file
-   */
-  public void setMd5hash(String md5hash) {
-    this.md5hash = md5hash;
-  }
-
-  /**
-   * Number of bytes stored in the data part of this key.
-   *
-   * @return long size of the data file
-   */
-  public long getSize() {
-    return size;
-  }
-
-  /**
-   * Sets the size of the Data part of this key.
-   *
-   * @param size - Size in long
-   */
-  public void setSize(long size) {
-    this.size = size;
-  }
-
-  /**
-   * Version of this key.
-   *
-   * @return - returns the version of this key.
-   */
-  public long getVersion() {
-    return version;
-  }
-
-  /**
-   * Sets the version of this key.
-   *
-   * @param version - Version String
-   */
-  public void setVersion(long version) {
-    this.version = version;
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less
-   * than, equal to, or greater than the specified object.
-   *
-   * @param o the object to be compared.
-   *
-   * @return a negative integer, zero, or a positive integer as this object
-   * is less than, equal to, or greater than the specified object.
-   *
-   * @throws NullPointerException if the specified object is null
-   * @throws ClassCastException if the specified object's type prevents it
-   * from being compared to this object.
-   */
-  @Override
-  public int compareTo(KeyInfo o) {
-    if (this.keyName.compareTo(o.getKeyName()) != 0) {
-      return this.keyName.compareTo(o.getKeyName());
-    }
-
-    if (this.getVersion() == o.getVersion()) {
-      return 0;
-    }
-    if (this.getVersion() < o.getVersion()) {
-      return -1;
-    }
-    return 1;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    KeyInfo keyInfo = (KeyInfo) o;
-
-    return new EqualsBuilder()
-        .append(version, keyInfo.version)
-        .append(keyName, keyInfo.keyName)
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(17, 37)
-        .append(version)
-        .append(keyName)
-        .toHashCode();
-  }
-
-  /**
-
-   * Parse a string to retuen BucketInfo Object.
-   *
-   * @param jsonString - Json String
-   *
-   * @return - BucketInfo
-   *
-   * @throws IOException
-   */
-  public static KeyInfo parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-
-  /**
-   * Returns a JSON string of this object.
-   * After stripping out bytesUsed and keyCount
-   *
-   * @return String
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Returns the Object as a Json String.
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
deleted file mode 100644
index bc4e65b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-
-/**
- * List Bucket is the response for the ListBucket Query.
- */
-public class ListBuckets {
-  static final String BUCKET_LIST = "BUCKET_LIST_FILTER";
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(ListBuckets.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"dataFileName"};
-
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(BUCKET_LIST, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  private List<BucketInfo> buckets;
-
-  /**
-   * Constructor for ListBuckets.
-   * @param buckets - List of buckets owned by this user
-   */
-  public ListBuckets(List<BucketInfo> buckets) {
-    this.buckets = buckets;
-
-  }
-
-  /**
-   * Constructor for ListBuckets.
-  */
-  public ListBuckets() {
-    this.buckets = new LinkedList<BucketInfo>();
-  }
-
-  /**
-   * Parses a String to return ListBuckets object.
-   *
-   * @param data - Json String
-   *
-   * @return - ListBuckets
-   *
-   * @throws IOException
-   */
-  public static ListBuckets parse(String data) throws IOException {
-    return READER.readValue(data);
-  }
-
-  /**
-   * Returns a list of Buckets.
-   *
-   * @return Bucket list
-   */
-  public List<BucketInfo> getBuckets() {
-    return buckets;
-  }
-
-  /**
-   * Sets the list of buckets owned by this user.
-   *
-   * @param buckets - List of Buckets
-   */
-  public void setBuckets(List<BucketInfo> buckets) {
-    this.buckets = buckets;
-  }
-
-
-  /**
-   * Returns a JSON string of this object.
-   * After stripping out bytesUsed and keyCount
-   *
-   * @return String
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Returns the Object as a Json String.
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-
-  /**
-   * Sorts the buckets based on bucketName.
-   * This is useful when we return the list of buckets
-   */
-  public void sort() {
-    Collections.sort(buckets);
-  }
-
-  /**
-   * Add a new bucket to the list of buckets.
-   * @param bucketInfo - bucket Info
-   */
-  public void addBucket(BucketInfo bucketInfo){
-    this.buckets.add(bucketInfo);
-  }
-
-  /**
-   * This class allows us to create custom filters
-   * for the Json serialization.
-   */
-  @JsonFilter(BUCKET_LIST)
-  class MixIn {
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
deleted file mode 100644
index 9dc77d2..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.ListArgs;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-import com.google.common.base.Preconditions;
-
-/**
- * This class the represents the list of keys (Objects) in a bucket.
- */
-public class ListKeys {
-  static final String OBJECT_LIST = "OBJECT_LIST_FILTER";
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(ListKeys.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"dataFileName"};
-
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(OBJECT_LIST, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  private String name;
-  private String prefix;
-  private long maxKeys;
-  private boolean truncated;
-  private List<KeyInfo> keyList;
-
-  /**
-   * Default constructor needed for json serialization.
-   */
-  public ListKeys() {
-    this.keyList = new LinkedList<>();
-  }
-
-  /**
-   * Constructor for ListKeys.
-   *
-   * @param args      ListArgs
-   * @param truncated is truncated
-   */
-  public ListKeys(ListArgs args, boolean truncated) {
-    Preconditions.checkState(args.getArgs() instanceof  BucketArgs);
-    this.name = ((BucketArgs) args.getArgs()).getBucketName();
-    this.prefix = args.getPrefix();
-    this.maxKeys = args.getMaxKeys();
-    this.truncated = truncated;
-  }
-
-  /**
-   * Converts a Json string to POJO.
-   * @param jsonString - json string.
-   * @return ListObject
-   * @throws IOException - Json conversion error.
-   */
-  public static ListKeys parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-  /**
-   * Returns a list of Objects.
-   *
-   * @return List of KeyInfo Objects.
-   */
-  public List<KeyInfo> getKeyList() {
-    return keyList;
-  }
-
-  /**
-   * Sets the list of Objects.
-   *
-   * @param objectList - List of Keys
-   */
-  public void setKeyList(List<KeyInfo> objectList) {
-    this.keyList = objectList;
-  }
-
-  /**
-   * Gets the Max Key Count.
-   *
-   * @return long
-   */
-  public long getMaxKeys() {
-    return maxKeys;
-  }
-
-  /**
-   * Gets bucket Name.
-   *
-   * @return String
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Gets Prefix.
-   *
-   * @return String
-   */
-  public String getPrefix() {
-    return prefix;
-  }
-
-  /**
-   * Gets truncated Status.
-   *
-   * @return Boolean
-   */
-  public boolean isTruncated() {
-    return truncated;
-  }
-
-  /**
-   * Sets the value of truncated.
-   *
-   * @param value - Boolean
-   */
-  public void setTruncated(boolean value) {
-    this.truncated = value;
-  }
-
-  /**
-   * Returns a JSON string of this object. After stripping out bytesUsed and
-   * keyCount.
-   *
-   * @return String
-   * @throws  IOException - On json Errors.
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Returns the Object as a Json String.
-   *
-   * @return String
-   * @throws IOException - on json errors.
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-
-  /**
-   * Sorts the keys based on name and version. This is useful when we return the
-   * list of keys.
-   */
-  public void sort() {
-    Collections.sort(keyList);
-  }
-
-  /**
-   * Add a new key to the list of keys.
-   * @param keyInfo - key Info
-   */
-  public void addKey(KeyInfo keyInfo){
-    this.keyList.add(keyInfo);
-  }
-
-  /**
-   * This class allows us to create custom filters for the Json serialization.
-   */
-  @JsonFilter(OBJECT_LIST)
-  class MixIn {
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java
deleted file mode 100644
index b918349..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-
-/**
- * List Volume Class is the class that is returned in JSON format to
- * users when they call ListVolumes.
- */
-@InterfaceAudience.Private
-public class ListVolumes {
-  private List<VolumeInfo> volumes;
-
-  static final String VOLUME_LIST = "VOLUME_LIST_FILTER";
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(ListVolumes.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"bytesUsed", "bucketCount"};
-
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(VOLUME_LIST, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  /**
-   * Used for json filtering.
-   */
-  @JsonFilter(VOLUME_LIST)
-  class MixIn {
-  }
-
-  /**
-   * Constructs ListVolume objects.
-   */
-  public ListVolumes() {
-    this.volumes = new LinkedList<VolumeInfo>();
-  }
-
-  /**
-   * Gets the list of volumes.
-   *
-   * @return List of VolumeInfo Objects
-   */
-  public List<VolumeInfo> getVolumes() {
-    return volumes;
-  }
-
-
-  /**
-   * Sets volume info.
-   *
-   * @param volumes - List of Volumes
-   */
-  public void setVolumes(List<VolumeInfo> volumes) {
-    this.volumes = volumes;
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   * After stripping out bytesUsed and bucketCount
-   *
-   * @return String
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * When we serialize a volumeInfo to our database
-   * we will use all fields. However the toJsonString
-   * will strip out bytesUsed and bucketCount from the
-   * volume Info
-   *
-   * @return Json String
-   *
-   * @throws IOException
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-
-  /**
-   * Parses a String to return ListVolumes object.
-   *
-   * @param data - Json String
-   *
-   * @return - ListVolumes
-   *
-   * @throws IOException
-   */
-  public static ListVolumes parse(String data) throws IOException {
-    return READER.readValue(data);
-  }
-
-  /**
-   * Adds a new volume info to the List.
-   *
-   * @param info - VolumeInfo
-   */
-  public void addVolume(VolumeInfo info) {
-    this.volumes.add(info);
-  }
-
-  /**
-   * Sorts the volume names based on volume name.
-   * This is useful when we return the list of volume names
-   */
-  public void sort() {
-    Collections.sort(volumes);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java
deleted file mode 100644
index 112b27e..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.response;
-
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-
-/**
- * VolumeInfo Class is the Java class that represents
- * Json when VolumeInfo Call is made.
- */
-@InterfaceAudience.Private
-public class VolumeInfo implements Comparable<VolumeInfo> {
-
-  static final String VOLUME_INFO = "VOLUME_INFO_FILTER";
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(VolumeInfo.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"bytesUsed", "bucketCount"};
-
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(VOLUME_INFO, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  /**
-   * Custom Json Filter Class.
-   */
-  @JsonFilter(VOLUME_INFO)
-  class MixIn {
-  }
-  private VolumeOwner owner;
-  private OzoneQuota quota;
-  private String volumeName;
-  private String createdOn;
-  private String createdBy;
-
-  private long bytesUsed;
-  private long bucketCount;
-
-
-  /**
-   * Constructor for VolumeInfo.
-   *
-   * @param volumeName - Name of the Volume
-   * @param createdOn _ Date String
-   * @param createdBy - Person who created it
-   */
-  public VolumeInfo(String volumeName, String createdOn, String createdBy) {
-    this.createdOn = createdOn;
-    this.volumeName = volumeName;
-    this.createdBy = createdBy;
-  }
-
-  /**
-   * Constructor for VolumeInfo.
-   */
-  public VolumeInfo() {
-  }
-
-  /**
-   * Returns the name of the person who created this volume.
-   *
-   * @return Name of Admin who created this
-   */
-  public String getCreatedBy() {
-    return createdBy;
-  }
-
-  /**
-   * Sets the user name of the person who created this volume.
-   *
-   * @param createdBy - UserName
-   */
-  public void setCreatedBy(String createdBy) {
-    this.createdBy = createdBy;
-  }
-
-  /**
-   * Gets the date on which this volume was created.
-   *
-   * @return - Date String
-   */
-  public String getCreatedOn() {
-    return createdOn;
-  }
-
-  /**
-   * Sets the date string.
-   *
-   * @param createdOn - Date String
-   */
-  public void setCreatedOn(String createdOn) {
-    this.createdOn = createdOn;
-  }
-
-  /**
-   * Returns the owner info.
-   *
-   * @return - OwnerInfo
-   */
-  public VolumeOwner getOwner() {
-    return owner;
-  }
-
-  /**
-   * Sets the owner.
-   *
-   * @param owner - OwnerInfo
-   */
-  public void setOwner(VolumeOwner owner) {
-    this.owner = owner;
-  }
-
-  /**
-   * Returns the quota information on a volume.
-   *
-   * @return Quota
-   */
-  public OzoneQuota getQuota() {
-    return quota;
-  }
-
-  /**
-   * Sets the quota info.
-   *
-   * @param quota - Quota Info
-   */
-  public void setQuota(OzoneQuota quota) {
-    this.quota = quota;
-  }
-
-  /**
-   * gets the volume name.
-   *
-   * @return - Volume Name
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Sets the volume name.
-   *
-   * @param volumeName - Volume Name
-   */
-  public void setVolumeName(String volumeName) {
-    this.volumeName = volumeName;
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   * After stripping out bytesUsed and bucketCount
-   *
-   * @return String - json string
-   * @throws IOException
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * When we serialize a volumeInfo to our database
-   * we will use all fields. However the toJsonString
-   * will strip out bytesUsed and bucketCount from the
-   * volume Info
-   *
-   * @return Json String
-   *
-   * @throws IOException
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-
-
-  /**
-   * Comparable Interface.
-   * @param o VolumeInfo Object.
-   * @return Result of comparison
-   */
-  @Override
-  public int compareTo(VolumeInfo o) {
-    return this.volumeName.compareTo(o.getVolumeName());
-  }
-
-  /**
-   * Gets the number of bytesUsed by this volume.
-   *
-   * @return long - Bytes used
-   */
-  public long getBytesUsed() {
-    return bytesUsed;
-  }
-
-  /**
-   * Sets number of bytesUsed by this volume.
-   *
-   * @param bytesUsed - Number of bytesUsed
-   */
-  public void setBytesUsed(long bytesUsed) {
-    this.bytesUsed = bytesUsed;
-  }
-
-  /**
-   * Returns VolumeInfo class from json string.
-   *
-   * @param data - Json String
-   *
-   * @return VolumeInfo
-   *
-   * @throws IOException
-   */
-  public static VolumeInfo parse(String data) throws IOException {
-    return READER.readValue(data);
-  }
-
-  /**
-   * Indicates whether some other object is "equal to" this one.
-   *
-   * @param obj the reference object with which to compare.
-   *
-   * @return {@code true} if this object is the same as the obj
-   * argument; {@code false} otherwise.
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    VolumeInfo otherInfo = (VolumeInfo) obj;
-    return otherInfo.getVolumeName().equals(this.getVolumeName());
-  }
-
-  /**
-   * Returns a hash code value for the object. This method is
-   * supported for the benefit of hash tables such as those provided by
-   * HashMap.
-   * @return a hash code value for this object.
-   *
-   * @see Object#equals(Object)
-   * @see System#identityHashCode
-   */
-  @Override
-  public int hashCode() {
-    return getVolumeName().hashCode();
-  }
-
-  /**
-   * Total number of buckets under this volume.
-   *
-   * @return - bucketCount
-   */
-  public long getBucketCount() {
-    return bucketCount;
-  }
-
-  /**
-   * Sets the buckets count.
-   *
-   * @param bucketCount - Bucket Count
-   */
-  public void setBucketCount(long bucketCount) {
-    this.bucketCount = bucketCount;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java
deleted file mode 100644
index afb0460..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.response;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * Volume Owner represents the owner of a volume.
- *
- * This is a class instead of a string since we might need to extend this class
- * to support other forms of authentication.
- */
-@InterfaceAudience.Private
-public class VolumeOwner {
-  @JsonInclude(JsonInclude.Include.NON_NULL)
-  private String name;
-
-  /**
-   * Constructor for VolumeOwner.
-   *
-   * @param name - name of the User
-   */
-  public VolumeOwner(String name) {
-    this.name = name;
-  }
-
-  /**
-   * Constructs Volume Owner.
-   */
-  public VolumeOwner() {
-    name = null;
-  }
-
-  /**
-   * Returns the user name.
-   *
-   * @return Name
-   */
-  public String getName() {
-    return name;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java
deleted file mode 100644
index 3bf66c8..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * Netty-based HTTP server implementation for Ozone.
- */
-package org.apache.hadoop.ozone.web.response;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
deleted file mode 100644
index 1830c71..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.storage;
-
-import com.google.common.base.Strings;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.client.io.LengthInputStream;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.protocolPB
-    .KeySpaceManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneConsts.Versioning;
-import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
-import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
-import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.scm.ScmConfigKeys;
-import org.apache.hadoop.scm.XceiverClientManager;
-import org.apache.hadoop.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.ListArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.response.ListVolumes;
-import org.apache.hadoop.ozone.web.response.VolumeInfo;
-import org.apache.hadoop.ozone.web.response.VolumeOwner;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.ozone.web.response.ListBuckets;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
-import org.apache.hadoop.ozone.web.response.KeyInfo;
-import org.apache.hadoop.ozone.web.response.ListKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.List;
-
-/**
- * A {@link StorageHandler} implementation that distributes object storage
- * across the nodes of an HDFS cluster.
- */
-public final class DistributedStorageHandler implements StorageHandler {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DistributedStorageHandler.class);
-
-  private final StorageContainerLocationProtocolClientSideTranslatorPB
-      storageContainerLocationClient;
-  private final KeySpaceManagerProtocolClientSideTranslatorPB
-      keySpaceManagerClient;
-  private final XceiverClientManager xceiverClientManager;
-  private final OzoneAcl.OzoneACLRights userRights;
-  private final OzoneAcl.OzoneACLRights groupRights;
-  private int chunkSize;
-  private final boolean useRatis;
-  private final OzoneProtos.ReplicationType type;
-  private final OzoneProtos.ReplicationFactor factor;
-
-  /**
-   * Creates a new DistributedStorageHandler.
-   *
-   * @param conf configuration
-   * @param storageContainerLocation StorageContainerLocationProtocol proxy
-   * @param keySpaceManagerClient KeySpaceManager proxy
-   */
-  public DistributedStorageHandler(OzoneConfiguration conf,
-      StorageContainerLocationProtocolClientSideTranslatorPB
-          storageContainerLocation,
-      KeySpaceManagerProtocolClientSideTranslatorPB
-          keySpaceManagerClient) {
-    this.keySpaceManagerClient = keySpaceManagerClient;
-    this.storageContainerLocationClient = storageContainerLocation;
-    this.xceiverClientManager = new XceiverClientManager(conf);
-    this.useRatis = conf.getBoolean(
-        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
-        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
-
-    if(useRatis) {
-      type = OzoneProtos.ReplicationType.RATIS;
-      factor = OzoneProtos.ReplicationFactor.THREE;
-    } else {
-      type = OzoneProtos.ReplicationType.STAND_ALONE;
-      factor = OzoneProtos.ReplicationFactor.ONE;
-    }
-
-    chunkSize = conf.getInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
-        ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT);
-    userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
-        KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
-    groupRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS,
-        KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS_DEFAULT);
-    if(chunkSize > ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE) {
-      LOG.warn("The chunk size ({}) is not allowed to be more than"
-              + " the maximum size ({}),"
-              + " resetting to the maximum size.",
-          chunkSize, ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE);
-      chunkSize = ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE;
-    }
-  }
-
-  @Override
-  public void createVolume(VolumeArgs args) throws IOException, OzoneException {
-    long quota = args.getQuota() == null ?
-        OzoneConsts.MAX_QUOTA_IN_BYTES : args.getQuota().sizeInBytes();
-    OzoneAcl userAcl =
-        new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-            args.getUserName(), userRights);
-    KsmVolumeArgs.Builder builder = KsmVolumeArgs.newBuilder();
-    builder.setAdminName(args.getAdminName())
-        .setOwnerName(args.getUserName())
-        .setVolume(args.getVolumeName())
-        .setQuotaInBytes(quota)
-        .addOzoneAcls(KSMPBHelper.convertOzoneAcl(userAcl));
-    if (args.getGroups() != null) {
-      for (String group : args.getGroups()) {
-        OzoneAcl groupAcl =
-            new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights);
-        builder.addOzoneAcls(KSMPBHelper.convertOzoneAcl(groupAcl));
-      }
-    }
-    keySpaceManagerClient.createVolume(builder.build());
-  }
-
-  @Override
-  public void setVolumeOwner(VolumeArgs args) throws
-      IOException, OzoneException {
-    keySpaceManagerClient.setOwner(args.getVolumeName(), args.getUserName());
-  }
-
-  @Override
-  public void setVolumeQuota(VolumeArgs args, boolean remove)
-      throws IOException, OzoneException {
-    long quota = remove ? OzoneConsts.MAX_QUOTA_IN_BYTES :
-        args.getQuota().sizeInBytes();
-    keySpaceManagerClient.setQuota(args.getVolumeName(), quota);
-  }
-
-  @Override
-  public boolean checkVolumeAccess(String volume, OzoneAcl acl)
-      throws IOException, OzoneException {
-    return keySpaceManagerClient
-        .checkVolumeAccess(volume, KSMPBHelper.convertOzoneAcl(acl));
-  }
-
-  @Override
-  public ListVolumes listVolumes(ListArgs args)
-      throws IOException, OzoneException {
-    int maxNumOfKeys = args.getMaxKeys();
-    if (maxNumOfKeys <= 0 ||
-        maxNumOfKeys > OzoneConsts.MAX_LISTVOLUMES_SIZE) {
-      throw new IllegalArgumentException(
-          String.format("Illegal max number of keys specified,"
-                  + " the value must be in range (0, %d], actual : %d.",
-              OzoneConsts.MAX_LISTVOLUMES_SIZE, maxNumOfKeys));
-    }
-
-    List<KsmVolumeArgs> listResult;
-    if (args.isRootScan()) {
-      listResult = keySpaceManagerClient.listAllVolumes(args.getPrefix(),
-          args.getPrevKey(), args.getMaxKeys());
-    } else {
-      UserArgs userArgs = args.getArgs();
-      if (userArgs == null || userArgs.getUserName() == null) {
-        throw new IllegalArgumentException("Illegal argument,"
-            + " missing user argument.");
-      }
-      listResult = keySpaceManagerClient.listVolumeByUser(
-          args.getArgs().getUserName(), args.getPrefix(), args.getPrevKey(),
-          args.getMaxKeys());
-    }
-
-    // TODO Add missing fields createdBy, bucketCount and bytesUsed
-    ListVolumes result = new ListVolumes();
-    for (KsmVolumeArgs volumeArgs : listResult) {
-      VolumeInfo info = new VolumeInfo();
-      KeySpaceManagerProtocolProtos.VolumeInfo
-          infoProto = volumeArgs.getProtobuf();
-      info.setOwner(new VolumeOwner(infoProto.getOwnerName()));
-      info.setQuota(OzoneQuota.getOzoneQuota(infoProto.getQuotaInBytes()));
-      info.setVolumeName(infoProto.getVolume());
-      info.setCreatedOn(OzoneUtils.formatTime(infoProto.getCreationTime()));
-      result.addVolume(info);
-    }
-
-    return result;
-  }
-
-  @Override
-  public void deleteVolume(VolumeArgs args)
-      throws IOException, OzoneException {
-    keySpaceManagerClient.deleteVolume(args.getVolumeName());
-  }
-
-  @Override
-  public VolumeInfo getVolumeInfo(VolumeArgs args)
-      throws IOException, OzoneException {
-    KsmVolumeArgs volumeArgs =
-        keySpaceManagerClient.getVolumeInfo(args.getVolumeName());
-    //TODO: add support for createdOn and other fields in getVolumeInfo
-    VolumeInfo volInfo =
-        new VolumeInfo(volumeArgs.getVolume(), null,
-            volumeArgs.getAdminName());
-    volInfo.setOwner(new VolumeOwner(volumeArgs.getOwnerName()));
-    volInfo.setQuota(OzoneQuota.getOzoneQuota(volumeArgs.getQuotaInBytes()));
-    volInfo.setCreatedOn(OzoneUtils.formatTime(volumeArgs.getCreationTime()));
-    return volInfo;
-  }
-
-  @Override
-  public void createBucket(final BucketArgs args)
-      throws IOException, OzoneException {
-    KsmBucketInfo.Builder builder = KsmBucketInfo.newBuilder();
-    builder.setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName());
-    if(args.getAddAcls() != null) {
-      builder.setAcls(args.getAddAcls());
-    }
-    if(args.getStorageType() != null) {
-      builder.setStorageType(args.getStorageType());
-    }
-    if(args.getVersioning() != null) {
-      builder.setIsVersionEnabled(getBucketVersioningProtobuf(
-          args.getVersioning()));
-    }
-    keySpaceManagerClient.createBucket(builder.build());
-  }
-
-  /**
-   * Converts OzoneConts.Versioning enum to boolean.
-   *
-   * @param version
-   * @return corresponding boolean value
-   */
-  private boolean getBucketVersioningProtobuf(
-      Versioning version) {
-    if(version != null) {
-      switch(version) {
-      case ENABLED:
-        return true;
-      case NOT_DEFINED:
-      case DISABLED:
-      default:
-        return false;
-      }
-    }
-    return false;
-  }
-
-  @Override
-  public void setBucketAcls(BucketArgs args)
-      throws IOException, OzoneException {
-    List<OzoneAcl> removeAcls = args.getRemoveAcls();
-    List<OzoneAcl> addAcls = args.getAddAcls();
-    if(removeAcls != null || addAcls != null) {
-      KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
-      builder.setVolumeName(args.getVolumeName())
-          .setBucketName(args.getBucketName());
-      if(removeAcls != null && !removeAcls.isEmpty()) {
-        builder.setRemoveAcls(args.getRemoveAcls());
-      }
-      if(addAcls != null && !addAcls.isEmpty()) {
-        builder.setAddAcls(args.getAddAcls());
-      }
-      keySpaceManagerClient.setBucketProperty(builder.build());
-    }
-  }
-
-  @Override
-  public void setBucketVersioning(BucketArgs args)
-      throws IOException, OzoneException {
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
-    builder.setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setIsVersionEnabled(getBucketVersioningProtobuf(
-            args.getVersioning()));
-    keySpaceManagerClient.setBucketProperty(builder.build());
-  }
-
-  @Override
-  public void setBucketStorageClass(BucketArgs args)
-      throws IOException, OzoneException {
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
-    builder.setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setStorageType(args.getStorageType());
-    keySpaceManagerClient.setBucketProperty(builder.build());
-  }
-
-  @Override
-  public void deleteBucket(BucketArgs args)
-      throws IOException, OzoneException {
-    keySpaceManagerClient.deleteBucket(args.getVolumeName(),
-        args.getBucketName());
-  }
-
-  @Override
-  public void checkBucketAccess(BucketArgs args)
-      throws IOException, OzoneException {
-    throw new UnsupportedOperationException(
-        "checkBucketAccess not implemented");
-  }
-
-  @Override
-  public ListBuckets listBuckets(ListArgs args)
-      throws IOException, OzoneException {
-    ListBuckets result = new ListBuckets();
-    UserArgs userArgs = args.getArgs();
-    if (userArgs instanceof VolumeArgs) {
-      VolumeArgs va = (VolumeArgs) userArgs;
-      if (Strings.isNullOrEmpty(va.getVolumeName())) {
-        throw new IllegalArgumentException("Illegal argument,"
-            + " volume name cannot be null or empty.");
-      }
-
-      int maxNumOfKeys = args.getMaxKeys();
-      if (maxNumOfKeys <= 0 ||
-          maxNumOfKeys > OzoneConsts.MAX_LISTBUCKETS_SIZE) {
-        throw new IllegalArgumentException(
-            String.format("Illegal max number of keys specified,"
-                + " the value must be in range (0, %d], actual : %d.",
-                OzoneConsts.MAX_LISTBUCKETS_SIZE, maxNumOfKeys));
-      }
-
-      List<KsmBucketInfo> buckets =
-          keySpaceManagerClient.listBuckets(va.getVolumeName(),
-              args.getPrevKey(), args.getPrefix(), args.getMaxKeys());
-
-      // Convert the result for the web layer.
-      for (KsmBucketInfo bucketInfo : buckets) {
-        BucketInfo bk = new BucketInfo();
-        bk.setVolumeName(bucketInfo.getVolumeName());
-        bk.setBucketName(bucketInfo.getBucketName());
-        bk.setStorageType(bucketInfo.getStorageType());
-        bk.setAcls(bucketInfo.getAcls());
-        bk.setCreatedOn(OzoneUtils.formatTime(bucketInfo.getCreationTime()));
-        result.addBucket(bk);
-      }
-      return result;
-    } else {
-      throw new IllegalArgumentException("Illegal argument provided,"
-          + " expecting VolumeArgs type but met "
-          + userArgs.getClass().getSimpleName());
-    }
-  }
-
-  @Override
-  public BucketInfo getBucketInfo(BucketArgs args)
-      throws IOException {
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    KsmBucketInfo ksmBucketInfo = keySpaceManagerClient.getBucketInfo(
-        volumeName, bucketName);
-    BucketInfo bucketInfo = new BucketInfo(ksmBucketInfo.getVolumeName(),
-        ksmBucketInfo.getBucketName());
-    if(ksmBucketInfo.getIsVersionEnabled()) {
-      bucketInfo.setVersioning(Versioning.ENABLED);
-    } else {
-      bucketInfo.setVersioning(Versioning.DISABLED);
-    }
-    bucketInfo.setStorageType(ksmBucketInfo.getStorageType());
-    bucketInfo.setAcls(ksmBucketInfo.getAcls());
-    bucketInfo.setCreatedOn(
-        OzoneUtils.formatTime(ksmBucketInfo.getCreationTime()));
-    return bucketInfo;
-  }
-
-  @Override
-  public OutputStream newKeyWriter(KeyArgs args) throws IOException,
-      OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getSize())
-        .setType(xceiverClientManager.getType())
-        .setFactor(xceiverClientManager.getFactor())
-        .build();
-    // contact KSM to allocate a block for key.
-    OpenKeySession openKey = keySpaceManagerClient.openKey(keyArgs);
-    ChunkGroupOutputStream groupOutputStream =
-        new ChunkGroupOutputStream.Builder()
-            .setHandler(openKey)
-            .setXceiverClientManager(xceiverClientManager)
-            .setScmClient(storageContainerLocationClient)
-            .setKsmClient(keySpaceManagerClient)
-            .setChunkSize(chunkSize)
-            .setRequestID(args.getRequestID())
-            .setType(xceiverClientManager.getType())
-            .setFactor(xceiverClientManager.getFactor())
-            .build();
-    groupOutputStream.addPreallocateBlocks(
-        openKey.getKeyInfo().getLatestVersionLocations(),
-        openKey.getOpenVersion());
-    return new OzoneOutputStream(groupOutputStream);
-  }
-
-  @Override
-  public void commitKey(KeyArgs args, OutputStream stream) throws
-      IOException, OzoneException {
-    stream.close();
-  }
-
-  @Override
-  public LengthInputStream newKeyReader(KeyArgs args) throws IOException,
-      OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getSize())
-        .build();
-    KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs);
-    return ChunkGroupInputStream.getFromKsmKeyInfo(
-        keyInfo, xceiverClientManager, storageContainerLocationClient,
-        args.getRequestID());
-  }
-
-  @Override
-  public void deleteKey(KeyArgs args) throws IOException, OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .build();
-    keySpaceManagerClient.deleteKey(keyArgs);
-  }
-
-  @Override
-  public KeyInfo getKeyInfo(KeyArgs args) throws IOException, OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .build();
-
-    KsmKeyInfo ksmKeyInfo = keySpaceManagerClient.lookupKey(keyArgs);
-    KeyInfo keyInfo = new KeyInfo();
-    keyInfo.setVersion(0);
-    keyInfo.setKeyName(ksmKeyInfo.getKeyName());
-    keyInfo.setSize(ksmKeyInfo.getDataSize());
-    keyInfo.setCreatedOn(
-        OzoneUtils.formatTime(ksmKeyInfo.getCreationTime()));
-    keyInfo.setModifiedOn(
-        OzoneUtils.formatTime(ksmKeyInfo.getModificationTime()));
-    return keyInfo;
-  }
-
-  @Override
-  public ListKeys listKeys(ListArgs args) throws IOException, OzoneException {
-    ListKeys result = new ListKeys();
-    UserArgs userArgs = args.getArgs();
-    if (userArgs instanceof BucketArgs) {
-      BucketArgs bucketArgs = (BucketArgs) userArgs;
-      if (Strings.isNullOrEmpty(bucketArgs.getVolumeName())) {
-        throw new IllegalArgumentException("Illegal argument,"
-            + " volume name cannot be null or empty.");
-      }
-
-      if (Strings.isNullOrEmpty(bucketArgs.getBucketName())) {
-        throw new IllegalArgumentException("Illegal argument,"
-            + " bucket name cannot be null or empty.");
-      }
-
-      int maxNumOfKeys = args.getMaxKeys();
-      if (maxNumOfKeys <= 0 ||
-          maxNumOfKeys > OzoneConsts.MAX_LISTKEYS_SIZE) {
-        throw new IllegalArgumentException(
-            String.format("Illegal max number of keys specified,"
-                + " the value must be in range (0, %d], actual : %d.",
-                OzoneConsts.MAX_LISTKEYS_SIZE, maxNumOfKeys));
-      }
-
-      List<KsmKeyInfo> keys=
-          keySpaceManagerClient.listKeys(bucketArgs.getVolumeName(),
-              bucketArgs.getBucketName(),
-              args.getPrevKey(), args.getPrefix(), args.getMaxKeys());
-
-      // Convert the result for the web layer.
-      for (KsmKeyInfo info : keys) {
-        KeyInfo tempInfo = new KeyInfo();
-        tempInfo.setVersion(0);
-        tempInfo.setKeyName(info.getKeyName());
-        tempInfo.setSize(info.getDataSize());
-        tempInfo.setCreatedOn(
-            OzoneUtils.formatTime(info.getCreationTime()));
-        tempInfo.setModifiedOn(
-            OzoneUtils.formatTime(info.getModificationTime()));
-
-        result.addKey(tempInfo);
-      }
-      return result;
-    } else {
-      throw new IllegalArgumentException("Illegal argument provided,"
-          + " expecting BucketArgs type but met "
-          + userArgs.getClass().getSimpleName());
-    }
-  }
-
-  /**
-   * Closes DistributedStorageHandler.
-   */
-  @Override
-  public void close() {
-    IOUtils.cleanupWithLogger(LOG, xceiverClientManager);
-    IOUtils.cleanupWithLogger(LOG, keySpaceManagerClient);
-    IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/package-info.java
deleted file mode 100644
index f5499f5..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/package-info.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * Ozone storage handler implementation integrating REST interface front-end
- * with container data pipeline back-end.
- */
-@InterfaceAudience.Private
-package org.apache.hadoop.ozone.web.storage;
-
-import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java
deleted file mode 100644
index 397c80f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.userauth;
-
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.interfaces.UserAuth;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import javax.ws.rs.core.HttpHeaders;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Simple is an UserAuth class that is used in the insecure
- * mode of ozone. This maps more or less to the simple user scheme in
- * HDFS.
- */
-@InterfaceAudience.Private
-public class Simple implements UserAuth {
-  /**
-   * Returns the x-ozone-user or the user on behalf of, This is
-   * used in volume creation path.
-   *
-   * @param userArgs - UserArgs
-   *
-   * @throws OzoneException
-   */
-  @Override
-  public String getOzoneUser(UserArgs userArgs) throws OzoneException {
-    assert userArgs != null : "userArgs cannot be null";
-
-    HttpHeaders headers = userArgs.getHeaders();
-    List<String> users = headers.getRequestHeader(Header.OZONE_USER);
-
-    if ((users == null) || (users.size() == 0)) {
-      return null;
-    }
-    if (users.size() > 1) {
-      throw ErrorTable.newError(ErrorTable.BAD_AUTHORIZATION, userArgs);
-    }
-    return users.get(0).toLowerCase().trim();
-  }
-
-  /**
-   * Returns the user name as a string from the URI and HTTP headers.
-   *
-   * @param userArgs - user args
-   *
-   * @throws OzoneException -- Allows the underlying system
-   * to throw, that error will get propagated to clients
-   */
-  @Override
-  public String getUser(UserArgs userArgs) throws OzoneException {
-    assert userArgs != null : "userArgs cannot be null";
-
-    HttpHeaders headers = userArgs.getHeaders();
-    List<String> users = headers.getRequestHeader(HttpHeaders.AUTHORIZATION);
-    if (users == null || users.size() > 1) {
-      throw ErrorTable.newError(ErrorTable.BAD_AUTHORIZATION, userArgs);
-    }
-
-    if (users.size() == 0) {
-      return null;
-    }
-
-    String user = users.get(0).trim();
-    if (user.startsWith(Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME)) {
-      user = user.replace(Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME, "");
-      return user.toLowerCase().trim();
-    } else {
-      throw ErrorTable.newError(ErrorTable.BAD_AUTHORIZATION, userArgs);
-    }
-  }
-
-
-  /**
-   * Returns true if a user is a Admin - {root and hdfs are treated as admins}.
-   *
-   * @param userArgs - User Args
-   *
-   * @throws OzoneException -- Allows the underlying system
-   * to throw, that error will get propagated to clients
-   */
-  @Override
-  public boolean isAdmin(UserArgs userArgs) throws OzoneException {
-    assert userArgs != null : "userArgs cannot be null";
-
-    String user;
-    String currentUser;
-    try {
-      user = getUser(userArgs);
-      currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
-    } catch (IOException e) {
-      throw ErrorTable.newError(ErrorTable.BAD_AUTHORIZATION, userArgs);
-    }
-    return
-        (user.compareToIgnoreCase(OzoneConsts.OZONE_SIMPLE_ROOT_USER) == 0) ||
-            (user.compareToIgnoreCase(OzoneConsts.OZONE_SIMPLE_HDFS_USER) == 0)
-            || (user.compareToIgnoreCase(currentUser) == 0);
-  }
-
-  /**
-   * Returns true if the request is Anonymous.
-   *
-   * @param userArgs - user Args
-   *
-   * @throws OzoneException -- Allows the underlying system
-   * to throw, that error will get propagated to clients
-   */
-  @Override
-  public boolean isAnonymous(UserArgs userArgs) throws OzoneException {
-    assert userArgs != null : "userArgs cannot be null";
-
-    return getUser(userArgs) == null;
-  }
-
-  /**
-   * Returns true if the name is a recognizable user in the system.
-   *
-   * @param userName - Name of the user
-   * @param userArgs - user Args
-   *
-   * @throws OzoneException -- Allows the underlying system
-   * to throw, that error will get propagated to clients
-   */
-  @Override
-  public boolean isUser(String userName, UserArgs userArgs)
-      throws OzoneException {
-    // In the simple case, all non-null users names are users :)
-    return userName != null;
-  }
-
-  /**
-   * Returns all the Groups that user is a member of.
-   *
-   * @param userArgs - User Args
-   *
-   * @return String Array which contains 0 or more group names
-   *
-   * @throws OzoneException
-   */
-  @Override
-  public String[] getGroups(UserArgs userArgs) throws OzoneException {
-    // Not implemented
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/package-info.java
deleted file mode 100644
index d498fc8..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * Netty-based HTTP server implementation for Ozone.
- */
-package org.apache.hadoop.ozone.web.userauth;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
deleted file mode 100644
index 909873f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.utils;
-
-import java.io.IOException;
-import java.util.List;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.type.CollectionType;
-
-/**
- * JSON Utility functions used in ozone.
- */
-public final class JsonUtils {
-
-  // Reuse ObjectMapper instance for improving performance.
-  // ObjectMapper is thread safe as long as we always configure instance
-  // before use.
-  private static final ObjectMapper MAPPER = new ObjectMapper();
-  private static final ObjectReader READER = MAPPER.readerFor(Object.class);
-  private static final ObjectWriter WRITTER =
-      MAPPER.writerWithDefaultPrettyPrinter();
-
-  private JsonUtils() {
-    // Never constructed
-  }
-
-  public static String toJsonStringWithDefaultPrettyPrinter(String jsonString)
-      throws IOException {
-    Object json = READER.readValue(jsonString);
-    return WRITTER.writeValueAsString(json);
-  }
-
-  public static String toJsonString(Object obj) throws IOException {
-    return MAPPER.writeValueAsString(obj);
-  }
-
-  /**
-   * Deserialize a list of elements from a given string,
-   * each element in the list is in the given type.
-   *
-   * @param str json string.
-   * @param elementType element type.
-   * @return List of elements of type elementType
-   * @throws IOException
-   */
-  public static List<?> toJsonList(String str, Class<?> elementType)
-      throws IOException {
-    CollectionType type = MAPPER.getTypeFactory()
-        .constructCollectionType(List.class, elementType);
-    return MAPPER.readValue(str, type);
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message