hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j..@apache.org
Subject [3/4] hadoop git commit: HDFS-10785: libhdfs++: Implement the rest of the tools. Contributed by Anatoli Schein
Date Tue, 13 Jun 2017 15:28:18 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/content_summary.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/content_summary.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/content_summary.cc
new file mode 100644
index 0000000..0dca36a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/content_summary.cc
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hdfspp/content_summary.h>
+#include <sstream>
+#include <iomanip>
+
+namespace hdfs {
+
+ContentSummary::ContentSummary()
+: length(0),
+  filecount(0),
+  directorycount(0),
+  quota(0),
+  spaceconsumed(0),
+  spacequota(0) {
+}
+
+std::string ContentSummary::str(bool include_quota) const {
+  std::stringstream ss;
+  if(include_quota){
+    ss  << this->quota << " "
+        << spacequota << " "
+        << spaceconsumed << " ";
+  }
+  ss  << directorycount << " "
+      << filecount << " "
+      << length << " "
+      << path;
+  return ss.str();
+}
+
+std::string ContentSummary::str_du() const {
+  std::stringstream ss;
+  ss  << std::left << std::setw(10) << length
+      << path;
+  return ss.str();
+}
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc
new file mode 100644
index 0000000..9f350a8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/fsinfo.cc
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hdfspp/fsinfo.h>
+#include <sstream>
+#include <iomanip>
+
+namespace hdfs {
+
+FsInfo::FsInfo()
+  : capacity(0),
+    used(0),
+    remaining(0),
+    under_replicated(0),
+    corrupt_blocks(0),
+    missing_blocks(0),
+    missing_repl_one_blocks(0),
+    blocks_in_future(0) {
+}
+
+std::string FsInfo::str(const std::string fs_name) const {
+  std::string fs_name_label = "Filesystem";
+  std::string size = std::to_string(capacity);
+  std::string size_label = "Size";
+  std::string used = std::to_string(this->used);
+  std::string used_label = "Used";
+  std::string available = std::to_string(remaining);
+  std::string available_label = "Available";
+  std::string use_percentage = std::to_string(this->used * 100 / capacity) + "%";
+  std::string use_percentage_label = "Use%";
+  std::stringstream ss;
+  ss  << std::left << std::setw(std::max(fs_name.size(), fs_name_label.size())) << fs_name_label
+      << std::right << std::setw(std::max(size.size(), size_label.size()) + 2) << size_label
+      << std::right << std::setw(std::max(used.size(), used_label.size()) + 2) << used_label
+      << std::right << std::setw(std::max(available.size(), available_label.size()) + 2) << available_label
+      << std::right << std::setw(std::max(use_percentage.size(), use_percentage_label.size()) + 2) << use_percentage_label
+      << std::endl
+      << std::left << std::setw(std::max(fs_name.size(), fs_name_label.size())) << fs_name
+      << std::right << std::setw(std::max(size.size(), size_label.size()) + 2) << size
+      << std::right << std::setw(std::max(used.size(), used_label.size()) + 2) << used
+      << std::right << std::setw(std::max(available.size(), available_label.size()) + 2) << available
+      << std::right << std::setw(std::max(use_percentage.size(), use_percentage_label.size()) + 2) << use_percentage;
+  return ss.str();
+}
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/statinfo.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/statinfo.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/statinfo.cc
new file mode 100644
index 0000000..2fb744f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/statinfo.cc
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hdfspp/statinfo.h>
+#include <sys/stat.h>
+#include <sstream>
+#include <iomanip>
+
+namespace hdfs {
+
+StatInfo::StatInfo()
+  : file_type(0),
+    length(0),
+    permissions(0),
+    modification_time(0),
+    access_time(0),
+    block_replication(0),
+    blocksize(0),
+    fileid(0),
+    children_num(0) {
+}
+
+std::string StatInfo::str() const {
+  char perms[11];
+  perms[0] = file_type == StatInfo::IS_DIR ? 'd' : '-';
+  perms[1] = permissions & S_IRUSR? 'r' : '-';
+  perms[2] = permissions & S_IWUSR? 'w': '-';
+  perms[3] = permissions & S_IXUSR? 'x': '-';
+  perms[4] = permissions & S_IRGRP? 'r' : '-';
+  perms[5] = permissions & S_IWGRP? 'w': '-';
+  perms[6] = permissions & S_IXGRP? 'x': '-';
+  perms[7] = permissions & S_IROTH? 'r' : '-';
+  perms[8] = permissions & S_IWOTH? 'w': '-';
+  perms[9] = permissions & S_IXOTH? 'x': '-';
+  perms[10] = 0;
+
+  //Convert to seconds from milliseconds
+  const int time_field_length = 17;
+  time_t rawtime = modification_time/1000;
+  struct tm * timeinfo;
+  char buffer[time_field_length];
+  timeinfo = localtime(&rawtime);
+
+  strftime(buffer,time_field_length,"%Y-%m-%d %H:%M",timeinfo);
+  buffer[time_field_length-1] = 0;  //null terminator
+  std::string time(buffer);
+
+  std::stringstream ss;
+  ss  << std::left << std::setw(12) << perms
+      << std::left << std::setw(3) << (!block_replication ? "-" : std::to_string(block_replication))
+      << std::left << std::setw(15) << owner
+      << std::left << std::setw(15) << group
+      << std::right << std::setw(5) << length
+      << std::right << std::setw(time_field_length + 2) << time//modification_time
+      << "  " << full_path;
+  return ss.str();
+}
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
index b46102a..9ef3aa0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.cc
@@ -479,6 +479,15 @@ void FileSystemImpl::GetFileInfo(
   nn_.GetFileInfo(path, handler);
 }
 
+void FileSystemImpl::GetContentSummary(
+    const std::string &path,
+    const std::function<void(const Status &, const ContentSummary &)> &handler) {
+  LOG_DEBUG(kFileSystem, << "FileSystemImpl::GetContentSummary("
+                                 << FMT_THIS_ADDR << ", path="
+                                 << path << ") called");
+
+  nn_.GetContentSummary(path, handler);
+}
 
 void FileSystemImpl::GetFsStats(
     const std::function<void(const Status &, const FsInfo &)> &handler) {
@@ -515,13 +524,16 @@ void FileSystemImpl::GetListing(
   LOG_DEBUG(kFileSystem, << "FileSystemImpl::GetListing("
                                  << FMT_THIS_ADDR << ", path="
                                  << path << ") called");
-
+  std::string path_fixed = path;
+  if(path.back() != '/'){
+    path_fixed += "/";
+  }
   // Caputure the state and push it into the shim
-  auto callback = [this, path, handler](const Status &stat, const std::vector<StatInfo> & stat_infos, bool has_more) {
-    GetListingShim(stat, stat_infos, has_more, path, handler);
+  auto callback = [this, path_fixed, handler](const Status &stat, const std::vector<StatInfo> & stat_infos, bool has_more) {
+    GetListingShim(stat, stat_infos, has_more, path_fixed, handler);
   };
 
-  nn_.GetListing(path, callback);
+  nn_.GetListing(path_fixed, callback);
 }
 
 
@@ -772,6 +784,28 @@ void FileSystemImpl::DeleteSnapshot(const std::string &path,
   nn_.DeleteSnapshot(path, name, handler);
 }
 
+void FileSystemImpl::RenameSnapshot(const std::string &path,
+    const std::string &old_name, const std::string &new_name,
+    const std::function<void(const Status &)> &handler) {
+  LOG_DEBUG(kFileSystem,
+    << "FileSystemImpl::RenameSnapshot(" << FMT_THIS_ADDR << ", path=" << path <<
+    ", old_name=" << old_name << ", new_name=" << new_name << ") called");
+
+  if (path.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'path' cannot be empty"));
+    return;
+  }
+  if (old_name.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'old_name' cannot be empty"));
+    return;
+  }
+  if (new_name.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'new_name' cannot be empty"));
+    return;
+  }
+
+  nn_.RenameSnapshot(path, old_name, new_name, handler);
+}
 
 void FileSystemImpl::AllowSnapshot(const std::string &path,
     const std::function<void(const Status &)> &handler) {
@@ -817,4 +851,8 @@ Options FileSystemImpl::get_options() {
   return options_;
 }
 
+std::string FileSystemImpl::get_cluster_name() {
+  return cluster_name_;
+}
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h
index fbc3967..d97eb25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem.h
@@ -87,6 +87,10 @@ public:
 
   Status GetFileInfo(const std::string &path, StatInfo & stat_info) override;
 
+  void GetContentSummary(const std::string &path,
+        const std::function<void(const Status &, const ContentSummary &)> &handler) override;
+  Status GetContentSummary(const std::string &path, ContentSummary & stat_info) override;
+
   /**
    * Retrieves the file system information such as the total raw size of all files in the filesystem
    * and the raw capacity of the filesystem
@@ -160,6 +164,18 @@ public:
   Status DeleteSnapshot(const std::string &path, const std::string &name) override;
 
   /**
+   * Renames the directory snapshot specified by path from old_name to new_name
+   *
+   *  @param path       Path to the snapshotted directory (must be non-blank)
+   *  @param old_name   Current name of the snapshot (must be non-blank)
+   *  @param new_name   New name of the snapshot (must be non-blank)
+   **/
+  void RenameSnapshot(const std::string &path, const std::string &old_name,
+      const std::string &new_name, const std::function<void(const Status &)> &handler) override;
+  Status RenameSnapshot(const std::string &path, const std::string &old_name,
+      const std::string &new_name) override;
+
+  /**
    * Allows snapshots to be made on the specified directory
    *
    *  @param path    Path to the directory to be made snapshottable (must be non-empty)
@@ -189,6 +205,8 @@ public:
 
   Options get_options();
 
+  std::string get_cluster_name();
+
 private:
   /**
    *  The IoService must be the first member variable to ensure that it gets

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc
index 73be538..53c9e26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filesystem_sync.cc
@@ -252,6 +252,35 @@ Status FileSystemImpl::GetFileInfo(const std::string &path,
   return stat;
 }
 
+Status FileSystemImpl::GetContentSummary(const std::string &path,
+                                         ContentSummary & content_summary) {
+  LOG_DEBUG(kFileSystem, << "FileSystemImpl::[sync]GetContentSummary("
+                                 << FMT_THIS_ADDR << ", path="
+                                 << path << ") called");
+
+  auto callstate = std::make_shared<std::promise<std::tuple<Status, ContentSummary>>>();
+  std::future<std::tuple<Status, ContentSummary>> future(callstate->get_future());
+
+  /* wrap async FileSystem::GetContentSummary with promise to make it a blocking call */
+  auto h = [callstate](const Status &s, const ContentSummary &si) {
+    callstate->set_value(std::make_tuple(s, si));
+  };
+
+  GetContentSummary(path, h);
+
+  /* block until promise is set */
+  auto returnstate = future.get();
+  Status stat = std::get<0>(returnstate);
+  ContentSummary cs = std::get<1>(returnstate);
+
+  if (!stat.ok()) {
+    return stat;
+  }
+
+  content_summary = cs;
+  return stat;
+}
+
 Status FileSystemImpl::GetFsStats(FsInfo & fs_info) {
   LOG_DEBUG(kFileSystem,
       << "FileSystemImpl::[sync]GetFsStats(" << FMT_THIS_ADDR << ") called");
@@ -510,6 +539,29 @@ Status FileSystemImpl::DeleteSnapshot(const std::string &path,
   return stat;
 }
 
+Status FileSystemImpl::RenameSnapshot(const std::string &path,
+    const std::string &old_name, const std::string &new_name) {
+  LOG_DEBUG(kFileSystem,
+    << "FileSystemImpl::[sync]RenameSnapshot(" << FMT_THIS_ADDR << ", path=" << path <<
+    ", old_name=" << old_name << ", new_name=" << new_name << ") called");
+
+  auto callstate = std::make_shared<std::promise<Status>>();
+  std::future<Status> future(callstate->get_future());
+
+  /* wrap async FileSystem::RenameSnapshot with promise to make it a blocking call */
+  auto h = [callstate](const Status &s) {
+    callstate->set_value(s);
+  };
+
+  RenameSnapshot(path, old_name, new_name, h);
+
+  /* block until promise is set */
+  auto returnstate = future.get();
+  Status stat = returnstate;
+
+  return stat;
+}
+
 Status FileSystemImpl::AllowSnapshot(const std::string &path) {
   LOG_DEBUG(kFileSystem,
       << "FileSystemImpl::[sync]AllowSnapshot(" << FMT_THIS_ADDR << ", path=" << path << ") called");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc
index 9e2d90a..b5a6564 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.cc
@@ -247,6 +247,45 @@ void NameNodeOperations::GetFileInfo(const std::string & path,
   });
 }
 
+void NameNodeOperations::GetContentSummary(const std::string & path,
+  std::function<void(const Status &, const ContentSummary &)> handler)
+{
+  using ::hadoop::hdfs::GetContentSummaryRequestProto;
+  using ::hadoop::hdfs::GetContentSummaryResponseProto;
+
+  LOG_TRACE(kFileSystem, << "NameNodeOperations::GetContentSummary("
+                           << FMT_THIS_ADDR << ", path=" << path << ") called");
+
+  if (path.empty()) {
+    handler(Status::InvalidArgument("GetContentSummary: argument 'path' cannot be empty"), ContentSummary());
+    return;
+  }
+
+  GetContentSummaryRequestProto req;
+  req.set_path(path);
+
+  auto resp = std::make_shared<GetContentSummaryResponseProto>();
+
+  namenode_.GetContentSummary(&req, resp, [resp, handler, path](const Status &stat) {
+    if (stat.ok()) {
+      // For non-existant files, the server will respond with an OK message but
+      //   no summary in the protobuf.
+      if(resp -> has_summary()){
+          struct ContentSummary content_summary;
+          content_summary.path = path;
+          ContentSummaryProtoToContentSummary(content_summary, resp->summary());
+          handler(stat, content_summary);
+        } else {
+          std::string errormsg = "No such file or directory: " + path;
+          Status statNew = Status::PathNotFound(errormsg.c_str());
+          handler(statNew, ContentSummary());
+        }
+    } else {
+      handler(stat, ContentSummary());
+    }
+  });
+}
+
 void NameNodeOperations::GetFsStats(
     std::function<void(const Status &, const FsInfo &)> handler) {
   using ::hadoop::hdfs::GetFsStatusRequestProto;
@@ -300,7 +339,10 @@ void NameNodeOperations::GetListing(
         for (::hadoop::hdfs::HdfsFileStatusProto const& fs : resp->dirlist().partiallisting()) {
           StatInfo si;
           si.path = fs.path();
-          si.full_path = path + fs.path() + "/";
+          si.full_path = path + fs.path();
+          if(si.full_path.back() != '/'){
+            si.full_path += "/";
+          }
           HdfsFileStatusProtoToStatInfo(si, fs);
           stat_infos.push_back(si);
         }
@@ -554,6 +596,41 @@ void NameNodeOperations::DeleteSnapshot(const std::string & path,
       });
 }
 
+void NameNodeOperations::RenameSnapshot(const std::string & path, const std::string & old_name,
+    const std::string & new_name, std::function<void(const Status &)> handler) {
+  using ::hadoop::hdfs::RenameSnapshotRequestProto;
+  using ::hadoop::hdfs::RenameSnapshotResponseProto;
+
+  LOG_TRACE(kFileSystem,
+      << "NameNodeOperations::RenameSnapshot(" << FMT_THIS_ADDR << ", path=" << path <<
+      ", old_name=" << old_name << ", new_name=" << new_name << ") called");
+
+  if (path.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'path' cannot be empty"));
+    return;
+  }
+  if (old_name.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'old_name' cannot be empty"));
+    return;
+  }
+  if (new_name.empty()) {
+    handler(Status::InvalidArgument("RenameSnapshot: argument 'new_name' cannot be empty"));
+    return;
+  }
+
+  RenameSnapshotRequestProto req;
+  req.set_snapshotroot(path);
+  req.set_snapshotoldname(old_name);
+  req.set_snapshotnewname(new_name);
+
+  auto resp = std::make_shared<RenameSnapshotResponseProto>();
+
+  namenode_.RenameSnapshot(&req, resp,
+      [handler](const Status &stat) {
+        handler(stat);
+      });
+}
+
 void NameNodeOperations::AllowSnapshot(const std::string & path, std::function<void(const Status &)> handler) {
   using ::hadoop::hdfs::AllowSnapshotRequestProto;
   using ::hadoop::hdfs::AllowSnapshotResponseProto;
@@ -621,6 +698,17 @@ void NameNodeOperations::HdfsFileStatusProtoToStatInfo(
   stat_info.children_num = fs.childrennum();
 }
 
+void NameNodeOperations::ContentSummaryProtoToContentSummary(
+    hdfs::ContentSummary & content_summary,
+    const ::hadoop::hdfs::ContentSummaryProto & csp) {
+  content_summary.length = csp.length();
+  content_summary.filecount = csp.filecount();
+  content_summary.directorycount = csp.directorycount();
+  content_summary.quota = csp.quota();
+  content_summary.spaceconsumed = csp.spaceconsumed();
+  content_summary.spacequota = csp.spacequota();
+}
+
 void NameNodeOperations::GetFsStatsResponseProtoToFsInfo(
     hdfs::FsInfo & fs_info,
     const std::shared_ptr<::hadoop::hdfs::GetFsStatsResponseProto> & fs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h
index 59b3512..e289d5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/namenode_operations.h
@@ -21,6 +21,7 @@
 #include "rpc/rpc_engine.h"
 #include "hdfspp/statinfo.h"
 #include "hdfspp/fsinfo.h"
+#include "hdfspp/content_summary.h"
 #include "common/namenode_info.h"
 #include "ClientNamenodeProtocol.pb.h"
 #include "ClientNamenodeProtocol.hrpc.inl"
@@ -67,7 +68,10 @@ public:
     std::function<void(const Status &)> handler);
 
   void GetFileInfo(const std::string & path,
-      std::function<void(const Status &, const StatInfo &)> handler);
+    std::function<void(const Status &, const StatInfo &)> handler);
+
+  void GetContentSummary(const std::string & path,
+    std::function<void(const Status &, const ContentSummary &)> handler);
 
   void GetFsStats(std::function<void(const Status &, const FsInfo &)> handler);
 
@@ -97,6 +101,9 @@ public:
   void DeleteSnapshot(const std::string & path, const std::string & name,
       std::function<void(const Status &)> handler);
 
+  void RenameSnapshot(const std::string & path, const std::string & old_name, const std::string & new_name,
+      std::function<void(const Status &)> handler);
+
   void AllowSnapshot(const std::string & path,
       std::function<void(const Status &)> handler);
 
@@ -107,6 +114,7 @@ public:
 
 private:
   static void HdfsFileStatusProtoToStatInfo(hdfs::StatInfo & si, const ::hadoop::hdfs::HdfsFileStatusProto & fs);
+  static void ContentSummaryProtoToContentSummary(hdfs::ContentSummary & content_summary, const ::hadoop::hdfs::ContentSummaryProto & csp);
   static void DirectoryListingProtoToStatInfo(std::shared_ptr<std::vector<StatInfo>> stat_infos, const ::hadoop::hdfs::DirectoryListingProto & dl);
   static void GetFsStatsResponseProtoToFsInfo(hdfs::FsInfo & fs_info, const std::shared_ptr<::hadoop::hdfs::GetFsStatsResponseProto> & fs);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
index 7d94944..19d95b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc
@@ -121,6 +121,8 @@ TEST_F(HdfsExtTest, TestSnapshotOperations) {
   EXPECT_EQ((int) std::errc::invalid_argument, errno);
   EXPECT_EQ(-1, hdfsDeleteSnapshot(fs, nullptr, "Bad"));
   EXPECT_EQ((int) std::errc::invalid_argument, errno);
+  EXPECT_EQ(-1, hdfsRenameSnapshot(fs, nullptr, "Bad", "Bad"));
+  EXPECT_EQ((int) std::errc::invalid_argument, errno);
   EXPECT_EQ(-1, hdfsDisallowSnapshot(fs, nullptr));
   EXPECT_EQ((int) std::errc::invalid_argument, errno);
 
@@ -136,6 +138,8 @@ TEST_F(HdfsExtTest, TestSnapshotOperations) {
   EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
   EXPECT_EQ(-1, hdfsDeleteSnapshot(fs, path.c_str(), "Bad"));
   EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
+  EXPECT_EQ(-1, hdfsRenameSnapshot(fs, path.c_str(), "Bad", "Bad"));
+  EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
   EXPECT_EQ(-1, hdfsDisallowSnapshot(fs, path.c_str()));
   EXPECT_EQ((int) std::errc::no_such_file_or_directory, errno);
 
@@ -147,6 +151,8 @@ TEST_F(HdfsExtTest, TestSnapshotOperations) {
   EXPECT_EQ((int) std::errc::not_a_directory, errno);
   EXPECT_EQ(-1, hdfsDeleteSnapshot(fs, path.c_str(), "Bad"));
   EXPECT_EQ((int) std::errc::not_a_directory, errno);
+  EXPECT_EQ(-1, hdfsRenameSnapshot(fs, path.c_str(), "Bad", "Bad"));
+  EXPECT_EQ((int) std::errc::not_a_directory, errno);
   EXPECT_EQ(-1, hdfsDisallowSnapshot(fs, path.c_str()));
   EXPECT_EQ((int) std::errc::not_a_directory, errno);
 
@@ -167,8 +173,11 @@ TEST_F(HdfsExtTest, TestSnapshotOperations) {
   EXPECT_STREQ("Good", file_infos[0].mName);
   hdfsFreeFileInfo(file_infos, 1);
 
+  //Verify snapshot renamed
+  EXPECT_EQ(0, hdfsRenameSnapshot(fs, dirName.c_str(), "Good", "Best"));
+
   //Verify snapshot deleted
-  EXPECT_EQ(0, hdfsDeleteSnapshot(fs, dirName.c_str(), "Good"));
+  EXPECT_EQ(0, hdfsDeleteSnapshot(fs, dirName.c_str(), "Best"));
   EXPECT_EQ(nullptr, file_infos = hdfsListDirectory(fs, snapDir.c_str(), &size));
   EXPECT_EQ(0, size);
   hdfsFreeFileInfo(file_infos, 0);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
index ddba67f..1edfedc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
@@ -504,6 +504,10 @@ int hdfsDeleteSnapshot(hdfsFS fs, const char* path, const char* name) {
   return libhdfspp_hdfsDeleteSnapshot(fs->libhdfsppRep, path, name);
 }
 
+int hdfsRenameSnapshot(hdfsFS fs, const char* path, const char* old_name, const char* new_name) {
+  return libhdfspp_hdfsRenameSnapshot(fs->libhdfsppRep, path, old_name, new_name);
+}
+
 int hdfsAllowSnapshot(hdfsFS fs, const char* path) {
   return libhdfspp_hdfsAllowSnapshot(fs->libhdfsppRep, path);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
index 644ff13..5a67fce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
@@ -97,5 +97,6 @@
 #undef hdfsFind
 #undef hdfsCreateSnapshot
 #undef hdfsDeleteSnapshot
+#undef hdfsRenameSnapshot
 #undef hdfsAllowSnapshot
 #undef hdfsDisallowSnapshot

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
index c186d63..c9bed90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
@@ -97,5 +97,6 @@
 #define hdfsFind libhdfspp_hdfsFind
 #define hdfsCreateSnapshot libhdfspp_hdfsCreateSnapshot
 #define hdfsDeleteSnapshot libhdfspp_hdfsDeleteSnapshot
+#define hdfsRenameSnapshot libhdfspp_hdfsRenameSnapshot
 #define hdfsAllowSnapshot libhdfspp_hdfsAllowSnapshot
 #define hdfsDisallowSnapshot libhdfspp_hdfsDisallowSnapshot

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
index f0817eb..4a46c7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
@@ -23,20 +23,71 @@ set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
 include_directories( ${LIBHDFSPP_DIR}/include )
 link_directories( ${LIBHDFSPP_DIR}/lib )
 
-add_library(tools_common_obj OBJECT tools_common.cpp)
+add_library(tools_common_obj OBJECT tools_common.cc)
 add_library(tools_common $<TARGET_OBJECTS:tools_common_obj>)
 
-add_executable(hdfs_cat hdfs_cat.cpp)
+add_executable(hdfs_cat hdfs_cat.cc)
 target_link_libraries(hdfs_cat tools_common hdfspp)
 
-add_executable(hdfs_chgrp hdfs_chgrp.cpp)
+add_executable(hdfs_chgrp hdfs_chgrp.cc)
 target_link_libraries(hdfs_chgrp tools_common hdfspp)
 
-add_executable(hdfs_chown hdfs_chown.cpp)
+add_executable(hdfs_chown hdfs_chown.cc)
 target_link_libraries(hdfs_chown tools_common hdfspp)
 
-add_executable(hdfs_chmod hdfs_chmod.cpp)
+add_executable(hdfs_chmod hdfs_chmod.cc)
 target_link_libraries(hdfs_chmod tools_common hdfspp)
 
-add_executable(hdfs_find hdfs_find.cpp)
-target_link_libraries(hdfs_find tools_common hdfspp)
\ No newline at end of file
+add_executable(hdfs_find hdfs_find.cc)
+target_link_libraries(hdfs_find tools_common hdfspp)
+
+add_executable(hdfs_mkdir hdfs_mkdir.cc)
+target_link_libraries(hdfs_mkdir tools_common hdfspp)
+
+add_executable(hdfs_rm hdfs_rm.cc)
+target_link_libraries(hdfs_rm tools_common hdfspp)
+
+add_executable(hdfs_ls hdfs_ls.cc)
+target_link_libraries(hdfs_ls tools_common hdfspp)
+
+add_executable(hdfs_stat hdfs_stat.cc)
+target_link_libraries(hdfs_stat tools_common hdfspp)
+
+add_executable(hdfs_count hdfs_count.cc)
+target_link_libraries(hdfs_count tools_common hdfspp)
+
+add_executable(hdfs_df hdfs_df.cc)
+target_link_libraries(hdfs_df tools_common hdfspp)
+
+add_executable(hdfs_du hdfs_du.cc)
+target_link_libraries(hdfs_du tools_common hdfspp)
+
+add_executable(hdfs_get hdfs_get.cc)
+target_link_libraries(hdfs_get tools_common hdfspp)
+
+add_executable(hdfs_copyToLocal hdfs_copyToLocal.cc)
+target_link_libraries(hdfs_copyToLocal tools_common hdfspp)
+
+add_executable(hdfs_moveToLocal hdfs_moveToLocal.cc)
+target_link_libraries(hdfs_moveToLocal tools_common hdfspp)
+
+add_executable(hdfs_setrep hdfs_setrep.cc)
+target_link_libraries(hdfs_setrep tools_common hdfspp)
+
+add_executable(hdfs_allowSnapshot hdfs_allowSnapshot.cc)
+target_link_libraries(hdfs_allowSnapshot tools_common hdfspp)
+
+add_executable(hdfs_disallowSnapshot hdfs_disallowSnapshot.cc)
+target_link_libraries(hdfs_disallowSnapshot tools_common hdfspp)
+
+add_executable(hdfs_createSnapshot hdfs_createSnapshot.cc)
+target_link_libraries(hdfs_createSnapshot tools_common hdfspp)
+
+add_executable(hdfs_renameSnapshot hdfs_renameSnapshot.cc)
+target_link_libraries(hdfs_renameSnapshot tools_common hdfspp)
+
+add_executable(hdfs_deleteSnapshot hdfs_deleteSnapshot.cc)
+target_link_libraries(hdfs_deleteSnapshot tools_common hdfspp)
+
+add_executable(hdfs_tail hdfs_tail.cc)
+target_link_libraries(hdfs_tail tools_common hdfspp)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_allowSnapshot.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_allowSnapshot.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_allowSnapshot.cc
new file mode 100644
index 0000000..00709e4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_allowSnapshot.cc
@@ -0,0 +1,90 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_allowSnapshot [OPTION] PATH"
+      << std::endl
+      << std::endl << "Allowing snapshots of a directory at PATH to be created."
+      << std::endl << "If the operation completes successfully, the directory becomes snapshottable."
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_allowSnapshot hdfs://localhost.localdomain:8020/dir"
+      << std::endl << "hdfs_allowSnapshot /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->AllowSnapshot(uri->get_path());
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cc
new file mode 100644
index 0000000..ec347cb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cc
@@ -0,0 +1,87 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_cat [OPTION] FILE"
+      << std::endl
+      << std::endl << "Concatenate FILE to standard output."
+      << std::endl
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_cat hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_cat /dir/file"
+      << std::endl;
+}
+
+#define BUF_SIZE 4096
+
+int main(int argc, char *argv[]) {
+  if (argc != 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  readFile(fs, uri->get_path(), 0, stdout, false);
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cpp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cpp
deleted file mode 100644
index 166a7bf..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_cat.cpp
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing,
-  software distributed under the License is distributed on an
-  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  KIND, either express or implied.  See the License for the
-  specific language governing permissions and limitations
-  under the License.
-*/
-
-#include <google/protobuf/stubs/common.h>
-#include <unistd.h>
-#include "tools_common.h"
-
-void usage(){
-  std::cout << "Usage: hdfs_cat [OPTION] FILE"
-      << std::endl
-      << std::endl << "Concatenate FILE to standard output."
-      << std::endl
-      << std::endl << "  -h  display this help and exit"
-      << std::endl
-      << std::endl << "Examples:"
-      << std::endl << "hdfs_cat hdfs://localhost.localdomain:9433/dir/file"
-      << std::endl << "hdfs_cat /dir/file"
-      << std::endl;
-}
-
-#define BUF_SIZE 4096
-
-int main(int argc, char *argv[]) {
-  if (argc != 2) {
-    usage();
-    exit(EXIT_FAILURE);
-  }
-
-  int input;
-
-  //Using GetOpt to read in the values
-  opterr = 0;
-  while ((input = getopt(argc, argv, "h")) != -1) {
-    switch (input)
-    {
-    case 'h':
-      usage();
-      exit(EXIT_SUCCESS);
-      break;
-    case '?':
-      if (isprint(optopt))
-        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
-      else
-        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
-      usage();
-      exit(EXIT_FAILURE);
-    default:
-      exit(EXIT_FAILURE);
-    }
-  }
-
-  std::string uri_path = argv[optind];
-
-  //Building a URI object from the given uri_path
-  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
-  if (!uri) {
-    std::cerr << "Malformed URI: " << uri_path << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  //TODO: HDFS-9539 Currently options can be returned empty
-  hdfs::Options options = *hdfs::getOptions();
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), options);
-  if (!fs) {
-    std::cerr << "Could not connect the file system. " << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  hdfs::FileHandle *file_raw = nullptr;
-  hdfs::Status status = fs->Open(uri->get_path(), &file_raw);
-  if (!status.ok()) {
-    std::cerr << "Could not open file " << uri->get_path() << ". " << status.ToString() << std::endl;
-    exit(EXIT_FAILURE);
-  }
-  //wrapping file_raw into a unique pointer to guarantee deletion
-  std::unique_ptr<hdfs::FileHandle> file(file_raw);
-
-  char input_buffer[BUF_SIZE];
-  ssize_t total_bytes_read = 0;
-  size_t last_bytes_read = 0;
-
-  do{
-    //Reading file chunks
-    status = file->Read(input_buffer, sizeof(input_buffer), &last_bytes_read);
-    if(status.ok()) {
-      //Writing file chunks to stdout
-      fwrite(input_buffer, last_bytes_read, 1, stdout);
-      total_bytes_read += last_bytes_read;
-    } else {
-      if(status.is_invalid_offset()){
-        //Reached the end of the file
-        break;
-      } else {
-        std::cerr << "Error reading the file: " << status.ToString() << std::endl;
-        exit(EXIT_FAILURE);
-      }
-    }
-  } while (last_bytes_read > 0);
-
-  // Clean up static data and prevent valgrind memory leaks
-  google::protobuf::ShutdownProtobufLibrary();
-  return 0;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cc
new file mode 100644
index 0000000..3b3cd50
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cc
@@ -0,0 +1,189 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include <future>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_chgrp [OPTION] GROUP FILE"
+      << std::endl
+      << std::endl << "Change the group association of each FILE to GROUP."
+      << std::endl << "The user must be the owner of files. Additional information is in the Permissions Guide:"
+      << std::endl << "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html"
+      << std::endl
+      << std::endl << "  -R  operate on files and directories recursively"
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_chgrp -R new_group hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_chgrp new_group /dir/file"
+      << std::endl;
+}
+
+struct SetOwnerState {
+  const std::string username;
+  const std::string groupname;
+  const std::function<void(const hdfs::Status &)> handler;
+  //The request counter is incremented once every time SetOwner async call is made
+  uint64_t request_counter;
+  //This boolean will be set when find returns the last result
+  bool find_is_done;
+  //Final status to be returned
+  hdfs::Status status;
+  //Shared variables will need protection with a lock
+  std::mutex lock;
+  SetOwnerState(const std::string & username_, const std::string & groupname_,
+                const std::function<void(const hdfs::Status &)> & handler_,
+              uint64_t request_counter_, bool find_is_done_)
+      : username(username_),
+        groupname(groupname_),
+        handler(handler_),
+        request_counter(request_counter_),
+        find_is_done(find_is_done_),
+        status(),
+        lock() {
+  }
+};
+
+int main(int argc, char *argv[]) {
+  //We should have 3 or 4 parameters
+  if (argc != 3 && argc != 4) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool recursive = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'R':
+      recursive = 1;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string group = argv[optind];
+  //Owner stays the same, just group association changes.
+  std::string owner = "";
+  std::string uri_path = argv[optind + 1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  /* wrap async FileSystem::SetOwner with promise to make it a blocking call */
+  std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
+  std::future<hdfs::Status> future(promise->get_future());
+  auto handler = [promise](const hdfs::Status &s) {
+    promise->set_value(s);
+  };
+
+  if(!recursive){
+    fs->SetOwner(uri->get_path(), owner, group, handler);
+  }
+  else {
+    //Allocating shared state, which includes:
+    //username and groupname to be set, handler to be called, request counter, and a boolean to keep track if find is done
+    std::shared_ptr<SetOwnerState> state = std::make_shared<SetOwnerState>(owner, group, handler, 0, false);
+
+    // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
+    // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
+    auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
+
+      //For each result returned by Find we call async SetOwner with the handler below.
+      //SetOwner DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetOwner.
+      auto handlerSetOwner = [state](const hdfs::Status &status_set_owner) {
+        std::lock_guard<std::mutex> guard(state->lock);
+
+        //Decrement the counter once since we are done with this async call
+        if (!status_set_owner.ok() && state->status.ok()){
+          //We make sure we set state->status only on the first error.
+          state->status = status_set_owner;
+        }
+        state->request_counter--;
+        if(state->request_counter == 0 && state->find_is_done){
+          state->handler(state->status); //exit
+        }
+      };
+      if(!stat_infos.empty() && state->status.ok()) {
+        for (hdfs::StatInfo const& s : stat_infos) {
+          //Launch an asynchronous call to SetOwner for every returned result
+          state->request_counter++;
+          fs->SetOwner(s.full_path, state->username, state->groupname, handlerSetOwner);
+        }
+      }
+
+      //Lock this section because handlerSetOwner might be accessing the same
+      //shared variables simultaneously
+      std::lock_guard<std::mutex> guard(state->lock);
+      if (!status_find.ok() && state->status.ok()){
+        //We make sure we set state->status only on the first error.
+        state->status = status_find;
+      }
+      if(!has_more_results){
+        state->find_is_done = true;
+        if(state->request_counter == 0){
+          state->handler(state->status); //exit
+        }
+        return false;
+      }
+      return true;
+    };
+
+    //Asynchronous call to Find
+    fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
+  }
+
+  /* block until promise is set */
+  hdfs::Status status = future.get();
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cpp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cpp
deleted file mode 100644
index 2bb6843..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chgrp.cpp
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing,
-  software distributed under the License is distributed on an
-  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  KIND, either express or implied.  See the License for the
-  specific language governing permissions and limitations
-  under the License.
-*/
-
-#include <google/protobuf/stubs/common.h>
-#include <unistd.h>
-#include <future>
-#include "tools_common.h"
-
-void usage(){
-  std::cout << "Usage: hdfs_chgrp [OPTION] GROUP FILE"
-      << std::endl
-      << std::endl << "Change the group association of each FILE to GROUP."
-      << std::endl << "The user must be the owner of files. Additional information is in the Permissions Guide:"
-      << std::endl << "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html"
-      << std::endl
-      << std::endl << "  -R  operate on files and directories recursively"
-      << std::endl << "  -h  display this help and exit"
-      << std::endl
-      << std::endl << "Examples:"
-      << std::endl << "hdfs_chgrp -R new_group hdfs://localhost.localdomain:9433/dir/file"
-      << std::endl << "hdfs_chgrp new_group /dir/file"
-      << std::endl;
-}
-
-struct SetOwnerState {
-  const std::string username;
-  const std::string groupname;
-  const std::function<void(const hdfs::Status &)> handler;
-  //The request counter is incremented once every time SetOwner async call is made
-  uint64_t request_counter;
-  //This boolean will be set when find returns the last result
-  bool find_is_done;
-  //Final status to be returned
-  hdfs::Status status;
-  //Shared variables will need protection with a lock
-  std::mutex lock;
-  SetOwnerState(const std::string & username_, const std::string & groupname_,
-                const std::function<void(const hdfs::Status &)> & handler_,
-              uint64_t request_counter_, bool find_is_done_)
-      : username(username_),
-        groupname(groupname_),
-        handler(handler_),
-        request_counter(request_counter_),
-        find_is_done(find_is_done_),
-        status(),
-        lock() {
-  }
-};
-
-int main(int argc, char *argv[]) {
-  //We should have 3 or 4 parameters
-  if (argc != 3 && argc != 4) {
-    usage();
-    exit(EXIT_FAILURE);
-  }
-
-  bool recursive = false;
-  int input;
-
-  //Using GetOpt to read in the values
-  opterr = 0;
-  while ((input = getopt(argc, argv, "Rh")) != -1) {
-    switch (input)
-    {
-    case 'R':
-      recursive = 1;
-      break;
-    case 'h':
-      usage();
-      exit(EXIT_SUCCESS);
-      break;
-    case '?':
-      if (isprint(optopt))
-        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
-      else
-        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
-      usage();
-      exit(EXIT_FAILURE);
-    default:
-      exit(EXIT_FAILURE);
-    }
-  }
-  std::string group = argv[optind];
-  //Owner stays the same, just group association changes.
-  std::string owner = "";
-  std::string uri_path = argv[optind + 1];
-
-  //Building a URI object from the given uri_path
-  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
-  if (!uri) {
-    std::cerr << "Malformed URI: " << uri_path << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  //TODO: HDFS-9539 Currently options can be returned empty
-  hdfs::Options options = *hdfs::getOptions();
-
-  //TODO: HDFS-9539 - until then we increase the time-out to allow all recursive async calls to finish
-  options.rpc_timeout = std::numeric_limits<int>::max();
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), options);
-  if (!fs) {
-    std::cerr << "Could not connect the file system. " << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  /* wrap async FileSystem::SetOwner with promise to make it a blocking call */
-  std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
-  std::future<hdfs::Status> future(promise->get_future());
-  auto handler = [promise](const hdfs::Status &s) {
-    promise->set_value(s);
-  };
-
-  if(!recursive){
-    fs->SetOwner(uri->get_path(), owner, group, handler);
-  }
-  else {
-    //Allocating shared state, which includes:
-    //username and groupname to be set, handler to be called, request counter, and a boolean to keep track if find is done
-    std::shared_ptr<SetOwnerState> state = std::make_shared<SetOwnerState>(owner, group, handler, 0, false);
-
-    // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
-    // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
-    auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
-
-      //For each result returned by Find we call async SetOwner with the handler below.
-      //SetOwner DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetOwner.
-      auto handlerSetOwner = [state](const hdfs::Status &status_set_owner) {
-        std::lock_guard<std::mutex> guard(state->lock);
-
-        //Decrement the counter once since we are done with this async call
-        if (!status_set_owner.ok() && state->status.ok()){
-          //We make sure we set state->status only on the first error.
-          state->status = status_set_owner;
-        }
-        state->request_counter--;
-        if(state->request_counter == 0 && state->find_is_done){
-          state->handler(state->status); //exit
-        }
-      };
-      if(!stat_infos.empty() && state->status.ok()) {
-        for (hdfs::StatInfo const& s : stat_infos) {
-          //Launch an asynchronous call to SetOwner for every returned result
-          state->request_counter++;
-          fs->SetOwner(s.full_path, state->username, state->groupname, handlerSetOwner);
-        }
-      }
-
-      //Lock this section because handlerSetOwner might be accessing the same
-      //shared variables simultaneously
-      std::lock_guard<std::mutex> guard(state->lock);
-      if (!status_find.ok() && state->status.ok()){
-        //We make sure we set state->status only on the first error.
-        state->status = status_find;
-      }
-      if(!has_more_results){
-        state->find_is_done = true;
-        if(state->request_counter == 0){
-          state->handler(state->status); //exit
-        }
-        return false;
-      }
-      return true;
-    };
-
-    //Asynchronous call to Find
-    fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
-  }
-
-  /* block until promise is set */
-  hdfs::Status status = future.get();
-  if (!status.ok()) {
-    std::cerr << "Error: " << status.ToString() << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  // Clean up static data and prevent valgrind memory leaks
-  google::protobuf::ShutdownProtobufLibrary();
-  return 0;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cc
new file mode 100644
index 0000000..69d61a5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cc
@@ -0,0 +1,187 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include <future>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_chmod [OPTION] <MODE[,MODE]... | OCTALMODE> FILE"
+      << std::endl
+      << std::endl << "Change the permissions of each FILE to MODE."
+      << std::endl << "The user must be the owner of the file, or else a super-user."
+      << std::endl << "Additional information is in the Permissions Guide:"
+      << std::endl << "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html"
+      << std::endl
+      << std::endl << "  -R  operate on files and directories recursively"
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_chmod -R 755 hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_chmod 777 /dir/file"
+      << std::endl;
+}
+
+struct SetPermissionState {
+  const uint16_t permissions;
+  const std::function<void(const hdfs::Status &)> handler;
+  //The request counter is incremented once every time SetOwner async call is made
+  uint64_t request_counter;
+  //This boolean will be set when find returns the last result
+  bool find_is_done;
+  //Final status to be returned
+  hdfs::Status status;
+  //Shared variables will need protection with a lock
+  std::mutex lock;
+  SetPermissionState(const uint16_t permissions_, const std::function<void(const hdfs::Status &)> & handler_,
+              uint64_t request_counter_, bool find_is_done_)
+      : permissions(permissions_),
+        handler(handler_),
+        request_counter(request_counter_),
+        find_is_done(find_is_done_),
+        status(),
+        lock() {
+  }
+};
+
+int main(int argc, char *argv[]) {
+  //We should have 3 or 4 parameters
+  if (argc != 3 && argc != 4) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool recursive = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'R':
+      recursive = 1;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string permissions = argv[optind];
+  std::string uri_path = argv[optind + 1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  /* wrap async FileSystem::SetPermission with promise to make it a blocking call */
+  std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
+  std::future<hdfs::Status> future(promise->get_future());
+  auto handler = [promise](const hdfs::Status &s) {
+    promise->set_value(s);
+  };
+
+  //strtol() is reading the value with base 8, NULL because we are reading in just one value.
+  uint16_t perm = strtol(permissions.c_str(), NULL, 8);
+  if(!recursive){
+    fs->SetPermission(uri->get_path(), perm, handler);
+  }
+  else {
+    //Allocating shared state, which includes:
+    //permissions to be set, handler to be called, request counter, and a boolean to keep track if find is done
+    std::shared_ptr<SetPermissionState> state = std::make_shared<SetPermissionState>(perm, handler, 0, false);
+
+    // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
+    // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
+    auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
+
+      //For each result returned by Find we call async SetPermission with the handler below.
+      //SetPermission DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetPermission.
+      auto handlerSetPermission = [state](const hdfs::Status &status_set_permission) {
+        std::lock_guard<std::mutex> guard(state->lock);
+
+        //Decrement the counter once since we are done with this async call
+        if (!status_set_permission.ok() && state->status.ok()){
+          //We make sure we set state->status only on the first error.
+          state->status = status_set_permission;
+        }
+        state->request_counter--;
+        if(state->request_counter == 0 && state->find_is_done){
+          state->handler(state->status); //exit
+        }
+      };
+      if(!stat_infos.empty() && state->status.ok()) {
+        for (hdfs::StatInfo const& s : stat_infos) {
+          //Launch an asynchronous call to SetPermission for every returned result
+          state->request_counter++;
+          fs->SetPermission(s.full_path, state->permissions, handlerSetPermission);
+        }
+      }
+
+      //Lock this section because handlerSetPermission might be accessing the same
+      //shared variables simultaneously
+      std::lock_guard<std::mutex> guard(state->lock);
+      if (!status_find.ok() && state->status.ok()){
+        //We make sure we set state->status only on the first error.
+        state->status = status_find;
+      }
+      if(!has_more_results){
+        state->find_is_done = true;
+        if(state->request_counter == 0){
+          state->handler(state->status); //exit
+        }
+        return false;
+      }
+      return true;
+    };
+
+    //Asynchronous call to Find
+    fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
+  }
+
+  /* block until promise is set */
+  hdfs::Status status = future.get();
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cpp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cpp
deleted file mode 100644
index 0a001d6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chmod.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing,
-  software distributed under the License is distributed on an
-  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  KIND, either express or implied.  See the License for the
-  specific language governing permissions and limitations
-  under the License.
-*/
-
-#include <google/protobuf/stubs/common.h>
-#include <unistd.h>
-#include <future>
-#include "tools_common.h"
-
-void usage(){
-  std::cout << "Usage: hdfs_chmod [OPTION] <MODE[,MODE]... | OCTALMODE> FILE"
-      << std::endl
-      << std::endl << "Change the permissions of each FILE to MODE."
-      << std::endl << "The user must be the owner of the file, or else a super-user."
-      << std::endl << "Additional information is in the Permissions Guide:"
-      << std::endl << "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html"
-      << std::endl
-      << std::endl << "  -R  operate on files and directories recursively"
-      << std::endl << "  -h  display this help and exit"
-      << std::endl
-      << std::endl << "Examples:"
-      << std::endl << "hdfs_chmod -R 755 hdfs://localhost.localdomain:9433/dir/file"
-      << std::endl << "hdfs_chmod 777 /dir/file"
-      << std::endl;
-}
-
-struct SetPermissionState {
-  const uint16_t permissions;
-  const std::function<void(const hdfs::Status &)> handler;
-  //The request counter is incremented once every time SetOwner async call is made
-  uint64_t request_counter;
-  //This boolean will be set when find returns the last result
-  bool find_is_done;
-  //Final status to be returned
-  hdfs::Status status;
-  //Shared variables will need protection with a lock
-  std::mutex lock;
-  SetPermissionState(const uint16_t permissions_, const std::function<void(const hdfs::Status &)> & handler_,
-              uint64_t request_counter_, bool find_is_done_)
-      : permissions(permissions_),
-        handler(handler_),
-        request_counter(request_counter_),
-        find_is_done(find_is_done_),
-        status(),
-        lock() {
-  }
-};
-
-int main(int argc, char *argv[]) {
-  //We should have 3 or 4 parameters
-  if (argc != 3 && argc != 4) {
-    usage();
-    exit(EXIT_FAILURE);
-  }
-
-  bool recursive = false;
-  int input;
-
-  //Using GetOpt to read in the values
-  opterr = 0;
-  while ((input = getopt(argc, argv, "Rh")) != -1) {
-    switch (input)
-    {
-    case 'R':
-      recursive = 1;
-      break;
-    case 'h':
-      usage();
-      exit(EXIT_SUCCESS);
-      break;
-    case '?':
-      if (isprint(optopt))
-        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
-      else
-        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
-      usage();
-      exit(EXIT_FAILURE);
-    default:
-      exit(EXIT_FAILURE);
-    }
-  }
-  std::string permissions = argv[optind];
-  std::string uri_path = argv[optind + 1];
-
-  //Building a URI object from the given uri_path
-  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
-  if (!uri) {
-    std::cerr << "Malformed URI: " << uri_path << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  //TODO: HDFS-9539 Currently options can be returned empty
-  hdfs::Options options = *hdfs::getOptions();
-
-  //TODO: HDFS-9539 - until then we increase the time-out to allow all recursive async calls to finish
-  options.rpc_timeout = std::numeric_limits<int>::max();
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), options);
-  if (!fs) {
-    std::cerr << "Could not connect the file system. " << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  /* wrap async FileSystem::SetPermission with promise to make it a blocking call */
-  std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
-  std::future<hdfs::Status> future(promise->get_future());
-  auto handler = [promise](const hdfs::Status &s) {
-    promise->set_value(s);
-  };
-
-  //strtol() is reading the value with base 8, NULL because we are reading in just one value.
-  uint16_t perm = strtol(permissions.c_str(), NULL, 8);
-  if(!recursive){
-    fs->SetPermission(uri->get_path(), perm, handler);
-  }
-  else {
-    //Allocating shared state, which includes:
-    //username and groupname to be set, handler to be called, request counter, and a boolean to keep track if find is done
-    std::shared_ptr<SetPermissionState> state = std::make_shared<SetPermissionState>(perm, handler, 0, false);
-
-    // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
-    // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
-    auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
-
-      //For each result returned by Find we call async SetOwner with the handler below.
-      //SetOwner DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetOwner.
-      auto handlerSetOwner = [state](const hdfs::Status &status_set_owner) {
-        std::lock_guard<std::mutex> guard(state->lock);
-
-        //Decrement the counter once since we are done with this async call
-        if (!status_set_owner.ok() && state->status.ok()){
-          //We make sure we set state->status only on the first error.
-          state->status = status_set_owner;
-        }
-        state->request_counter--;
-        if(state->request_counter == 0 && state->find_is_done){
-          state->handler(state->status); //exit
-        }
-      };
-      if(!stat_infos.empty() && state->status.ok()) {
-        for (hdfs::StatInfo const& s : stat_infos) {
-          //Launch an asynchronous call to SetOwner for every returned result
-          state->request_counter++;
-          fs->SetPermission(s.full_path, state->permissions, handlerSetOwner);
-        }
-      }
-
-      //Lock this section because handlerSetOwner might be accessing the same
-      //shared variables simultaneously
-      std::lock_guard<std::mutex> guard(state->lock);
-      if (!status_find.ok() && state->status.ok()){
-        //We make sure we set state->status only on the first error.
-        state->status = status_find;
-      }
-      if(!has_more_results){
-        state->find_is_done = true;
-        if(state->request_counter == 0){
-          state->handler(state->status); //exit
-        }
-        return false;
-      }
-      return true;
-    };
-
-    //Asynchronous call to Find
-    fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
-  }
-
-  /* block until promise is set */
-  hdfs::Status status = future.get();
-  if (!status.ok()) {
-    std::cerr << "Error: " << status.ToString() << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  // Clean up static data and prevent valgrind memory leaks
-  google::protobuf::ShutdownProtobufLibrary();
-  return 0;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40e3290b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cc
new file mode 100644
index 0000000..59ff9c3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cc
@@ -0,0 +1,199 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include <future>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_chown [OPTION] [OWNER][:[GROUP]] FILE"
+      << std::endl
+      << std::endl << "Change the owner and/or group of each FILE to OWNER and/or GROUP."
+      << std::endl << "The user must be a super-user. Additional information is in the Permissions Guide:"
+      << std::endl << "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html"
+      << std::endl
+      << std::endl << "  -R  operate on files and directories recursively"
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Owner is unchanged if missing.  Group is unchanged if missing."
+      << std::endl << "OWNER and GROUP may be numeric as well as symbolic."
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_chown -R new_owner:new_group hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_chown new_owner /dir/file"
+      << std::endl;
+}
+
+struct SetOwnerState {
+  const std::string username;
+  const std::string groupname;
+  const std::function<void(const hdfs::Status &)> handler;
+  //The request counter is incremented once every time SetOwner async call is made
+  uint64_t request_counter;
+  //This boolean will be set when find returns the last result
+  bool find_is_done;
+  //Final status to be returned
+  hdfs::Status status;
+  //Shared variables will need protection with a lock
+  std::mutex lock;
+  SetOwnerState(const std::string & username_, const std::string & groupname_,
+                const std::function<void(const hdfs::Status &)> & handler_,
+              uint64_t request_counter_, bool find_is_done_)
+      : username(username_),
+        groupname(groupname_),
+        handler(handler_),
+        request_counter(request_counter_),
+        find_is_done(find_is_done_),
+        status(),
+        lock() {
+  }
+};
+
+int main(int argc, char *argv[]) {
+  //We should have 3 or 4 parameters
+  if (argc != 3 && argc != 4) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool recursive = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'R':
+      recursive = 1;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string owner_and_group = argv[optind];
+  std::string uri_path = argv[optind + 1];
+
+  std::string owner, group;
+  size_t owner_end = owner_and_group.find(":");
+  if(owner_end == std::string::npos) {
+    owner = owner_and_group;
+  } else {
+    owner = owner_and_group.substr(0, owner_end);
+    group = owner_and_group.substr(owner_end + 1);
+  }
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  /* wrap async FileSystem::SetOwner with promise to make it a blocking call */
+  std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
+  std::future<hdfs::Status> future(promise->get_future());
+  auto handler = [promise](const hdfs::Status &s) {
+    promise->set_value(s);
+  };
+
+  if(!recursive){
+    fs->SetOwner(uri->get_path(), owner, group, handler);
+  }
+  else {
+    //Allocating shared state, which includes:
+    //username and groupname to be set, handler to be called, request counter, and a boolean to keep track if find is done
+    std::shared_ptr<SetOwnerState> state = std::make_shared<SetOwnerState>(owner, group, handler, 0, false);
+
+    // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
+    // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
+    auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
+
+      //For each result returned by Find we call async SetOwner with the handler below.
+      //SetOwner DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetOwner.
+      auto handlerSetOwner = [state](const hdfs::Status &status_set_owner) {
+        std::lock_guard<std::mutex> guard(state->lock);
+
+        //Decrement the counter once since we are done with this async call
+        if (!status_set_owner.ok() && state->status.ok()){
+          //We make sure we set state->status only on the first error.
+          state->status = status_set_owner;
+        }
+        state->request_counter--;
+        if(state->request_counter == 0 && state->find_is_done){
+          state->handler(state->status); //exit
+        }
+      };
+      if(!stat_infos.empty() && state->status.ok()) {
+        for (hdfs::StatInfo const& s : stat_infos) {
+          //Launch an asynchronous call to SetOwner for every returned result
+          state->request_counter++;
+          fs->SetOwner(s.full_path, state->username, state->groupname, handlerSetOwner);
+        }
+      }
+
+      //Lock this section because handlerSetOwner might be accessing the same
+      //shared variables simultaneously
+      std::lock_guard<std::mutex> guard(state->lock);
+      if (!status_find.ok() && state->status.ok()){
+        //We make sure we set state->status only on the first error.
+        state->status = status_find;
+      }
+      if(!has_more_results){
+        state->find_is_done = true;
+        if(state->request_counter == 0){
+          state->handler(state->status); //exit
+        }
+        return false;
+      }
+      return true;
+    };
+
+    //Asynchronous call to Find
+    fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
+  }
+
+  /* block until promise is set */
+  hdfs::Status status = future.get();
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message