hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hanishakon...@apache.org
Subject [25/50] [abbrv] hadoop git commit: HDFS-10785: libhdfs++: Implement the rest of the tools. Contributed by Anatoli Schein
Date Mon, 26 Mar 2018 18:13:44 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cpp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cpp
deleted file mode 100644
index 08724c6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_chown.cpp
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing,
-  software distributed under the License is distributed on an
-  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  KIND, either express or implied.  See the License for the
-  specific language governing permissions and limitations
-  under the License.
-*/
-
-#include <google/protobuf/stubs/common.h>
-#include <unistd.h>
-#include <future>
-#include "tools_common.h"
-
-void usage(){
-  std::cout << "Usage: hdfs_chown [OPTION] [OWNER][:[GROUP]] FILE"
-      << std::endl
-      << std::endl << "Change the owner and/or group of each FILE to OWNER and/or GROUP."
-      << std::endl << "The user must be a super-user. Additional information is in the Permissions Guide:"
-      << std::endl << "https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html"
-      << std::endl
-      << std::endl << "  -R  operate on files and directories recursively"
-      << std::endl << "  -h  display this help and exit"
-      << std::endl
-      << std::endl << "Owner is unchanged if missing.  Group is unchanged if missing."
-      << std::endl << "OWNER and GROUP may be numeric as well as symbolic."
-      << std::endl
-      << std::endl << "Examples:"
-      << std::endl << "hdfs_chown -R new_owner:new_group hdfs://localhost.localdomain:9433/dir/file"
-      << std::endl << "hdfs_chown new_owner /dir/file"
-      << std::endl;
-}
-
-struct SetOwnerState {
-  const std::string username;
-  const std::string groupname;
-  const std::function<void(const hdfs::Status &)> handler;
-  //The request counter is incremented once every time SetOwner async call is made
-  uint64_t request_counter;
-  //This boolean will be set when find returns the last result
-  bool find_is_done;
-  //Final status to be returned
-  hdfs::Status status;
-  //Shared variables will need protection with a lock
-  std::mutex lock;
-  SetOwnerState(const std::string & username_, const std::string & groupname_,
-                const std::function<void(const hdfs::Status &)> & handler_,
-              uint64_t request_counter_, bool find_is_done_)
-      : username(username_),
-        groupname(groupname_),
-        handler(handler_),
-        request_counter(request_counter_),
-        find_is_done(find_is_done_),
-        status(),
-        lock() {
-  }
-};
-
-int main(int argc, char *argv[]) {
-  //We should have 3 or 4 parameters
-  if (argc != 3 && argc != 4) {
-    usage();
-    exit(EXIT_FAILURE);
-  }
-
-  bool recursive = false;
-  int input;
-
-  //Using GetOpt to read in the values
-  opterr = 0;
-  while ((input = getopt(argc, argv, "Rh")) != -1) {
-    switch (input)
-    {
-    case 'R':
-      recursive = 1;
-      break;
-    case 'h':
-      usage();
-      exit(EXIT_SUCCESS);
-      break;
-    case '?':
-      if (isprint(optopt))
-        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
-      else
-        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
-      usage();
-      exit(EXIT_FAILURE);
-    default:
-      exit(EXIT_FAILURE);
-    }
-  }
-  std::string owner_and_group = argv[optind];
-  std::string uri_path = argv[optind + 1];
-
-  std::string owner, group;
-  size_t owner_end = owner_and_group.find(":");
-  if(owner_end == std::string::npos) {
-    owner = owner_and_group;
-  } else {
-    owner = owner_and_group.substr(0, owner_end);
-    group = owner_and_group.substr(owner_end + 1);
-  }
-
-  //Building a URI object from the given uri_path
-  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
-  if (!uri) {
-    std::cerr << "Malformed URI: " << uri_path << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  //TODO: HDFS-9539 Currently options can be returned empty
-  hdfs::Options options = *hdfs::getOptions();
-
-  //TODO: HDFS-9539 - until then we increase the time-out to allow all recursive async calls to finish
-  options.rpc_timeout = std::numeric_limits<int>::max();
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), options);
-  if (!fs) {
-    std::cerr << "Could not connect the file system. " << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  /* wrap async FileSystem::SetOwner with promise to make it a blocking call */
-  std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
-  std::future<hdfs::Status> future(promise->get_future());
-  auto handler = [promise](const hdfs::Status &s) {
-    promise->set_value(s);
-  };
-
-  if(!recursive){
-    fs->SetOwner(uri->get_path(), owner, group, handler);
-  }
-  else {
-    //Allocating shared state, which includes:
-    //username and groupname to be set, handler to be called, request counter, and a boolean to keep track if find is done
-    std::shared_ptr<SetOwnerState> state = std::make_shared<SetOwnerState>(owner, group, handler, 0, false);
-
-    // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
-    // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
-    auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
-
-      //For each result returned by Find we call async SetOwner with the handler below.
-      //SetOwner DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetOwner.
-      auto handlerSetOwner = [state](const hdfs::Status &status_set_owner) {
-        std::lock_guard<std::mutex> guard(state->lock);
-
-        //Decrement the counter once since we are done with this async call
-        if (!status_set_owner.ok() && state->status.ok()){
-          //We make sure we set state->status only on the first error.
-          state->status = status_set_owner;
-        }
-        state->request_counter--;
-        if(state->request_counter == 0 && state->find_is_done){
-          state->handler(state->status); //exit
-        }
-      };
-      if(!stat_infos.empty() && state->status.ok()) {
-        for (hdfs::StatInfo const& s : stat_infos) {
-          //Launch an asynchronous call to SetOwner for every returned result
-          state->request_counter++;
-          fs->SetOwner(s.full_path, state->username, state->groupname, handlerSetOwner);
-        }
-      }
-
-      //Lock this section because handlerSetOwner might be accessing the same
-      //shared variables simultaneously
-      std::lock_guard<std::mutex> guard(state->lock);
-      if (!status_find.ok() && state->status.ok()){
-        //We make sure we set state->status only on the first error.
-        state->status = status_find;
-      }
-      if(!has_more_results){
-        state->find_is_done = true;
-        if(state->request_counter == 0){
-          state->handler(state->status); //exit
-        }
-        return false;
-      }
-      return true;
-    };
-
-    //Asynchronous call to Find
-    fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
-  }
-
-  /* block until promise is set */
-  hdfs::Status status = future.get();
-  if (!status.ok()) {
-    std::cerr << "Error: " << status.ToString() << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  // Clean up static data and prevent valgrind memory leaks
-  google::protobuf::ShutdownProtobufLibrary();
-  return 0;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_copyToLocal.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_copyToLocal.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_copyToLocal.cc
new file mode 100644
index 0000000..493ff62
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_copyToLocal.cc
@@ -0,0 +1,92 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_copyToLocal [OPTION] SRC_FILE DST_FILE"
+      << std::endl
+      << std::endl << "Copy SRC_FILE from hdfs to DST_FILE on the local file system."
+      << std::endl
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_copyToLocal hdfs://localhost.localdomain:8020/dir/file /home/usr/myfile"
+      << std::endl << "hdfs_copyToLocal /dir/file /home/usr/dir/file"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  if (argc > 4) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+
+  std::string uri_path = argv[optind];
+  std::string dest = argv[optind+1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::FILE* dst_file = std::fopen(dest.c_str(), "wb");
+  if(!dst_file){
+    std::cerr << "Unable to open the destination file: " << dest << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  readFile(fs, uri->get_path(), 0, dst_file, false);
+  std::fclose(dst_file);
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_count.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_count.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_count.cc
new file mode 100644
index 0000000..e43596c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_count.cc
@@ -0,0 +1,97 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_count [OPTION] FILE"
+      << std::endl
+      << std::endl << "Count the number of directories, files and bytes under the path that match the specified FILE pattern."
+      << std::endl << "The output columns with -count are: DIR_COUNT, FILE_COUNT, CONTENT_SIZE, PATHNAME"
+      << std::endl
+      << std::endl << "  -q    output additional columns before the rest: QUOTA, SPACE_QUOTA, SPACE_CONSUMED"
+      << std::endl << "  -h    display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_count hdfs://localhost.localdomain:8020/dir"
+      << std::endl << "hdfs_count -q /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool quota = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "qh")) != -1) {
+    switch (input)
+    {
+    case 'q':
+      quota = true;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::ContentSummary content_summary;
+  hdfs::Status status = fs->GetContentSummary(uri->get_path(), content_summary);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  std::cout << content_summary.str(quota) << std::endl;
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_createSnapshot.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_createSnapshot.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_createSnapshot.cc
new file mode 100644
index 0000000..fd079a2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_createSnapshot.cc
@@ -0,0 +1,99 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_createSnapshot [OPTION] PATH"
+      << std::endl
+      << std::endl << "Create a snapshot of a snapshottable directory."
+      << std::endl << "This operation requires owner privilege of the snapshottable directory."
+      << std::endl
+      << std::endl << "  -n NAME   The snapshot name. When it is omitted, a default name is generated"
+      << std::endl << "             using a timestamp with the format:"
+      << std::endl << "             \"'s'yyyyMMdd-HHmmss.SSS\", e.g. s20130412-151029.033"
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_createSnapshot hdfs://localhost.localdomain:8020/dir"
+      << std::endl << "hdfs_createSnapshot -n MySnapshot /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+  std::string name;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "hn:")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case 'n':
+      name = optarg;
+      break;
+    case '?':
+      if (optopt == 'n')
+        std::cerr << "Option -" << (char) optopt << " requires an argument." << std::endl;
+      else if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->CreateSnapshot(uri->get_path(), name);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_deleteSnapshot.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_deleteSnapshot.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_deleteSnapshot.cc
new file mode 100644
index 0000000..2082878
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_deleteSnapshot.cc
@@ -0,0 +1,91 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_deleteSnapshot [OPTION] PATH NAME"
+      << std::endl
+      << std::endl << "Delete a snapshot NAME from a snapshottable directory."
+      << std::endl << "This operation requires owner privilege of the snapshottable directory."
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_deleteSnapshot hdfs://localhost.localdomain:8020/dir mySnapshot"
+      << std::endl << "hdfs_deleteSnapshot /dir1/dir2 mySnapshot"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+  std::string name = argv[optind+1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->DeleteSnapshot(uri->get_path(), name);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_df.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_df.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_df.cc
new file mode 100644
index 0000000..6170e93
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_df.cc
@@ -0,0 +1,93 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_df [OPTION] PATH"
+      << std::endl
+      << std::endl << "Displays size, used space, and available space of"
+      << std::endl << "the entire filesystem where PATH is located"
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_df hdfs://localhost.localdomain:8020/"
+      << std::endl << "hdfs_df /"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::FsInfo fs_info;
+
+  hdfs::Status status = fs->GetFsStats(fs_info);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  std::cout << fs_info.str("hdfs://" + fs->get_cluster_name()) << std::endl;
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_disallowSnapshot.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_disallowSnapshot.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_disallowSnapshot.cc
new file mode 100644
index 0000000..c181719
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_disallowSnapshot.cc
@@ -0,0 +1,90 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_disallowSnapshot [OPTION] PATH"
+      << std::endl
+      << std::endl << "Disallowing snapshots of a directory at PATH to be created."
+      << std::endl << "All snapshots of the directory must be deleted before disallowing snapshots."
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_disallowSnapshot hdfs://localhost.localdomain:8020/dir"
+      << std::endl << "hdfs_disallowSnapshot /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->DisallowSnapshot(uri->get_path());
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_du.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_du.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_du.cc
new file mode 100644
index 0000000..30269e3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_du.cc
@@ -0,0 +1,180 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include <future>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_du [OPTION] PATH"
+      << std::endl
+      << std::endl << "Displays sizes of files and directories contained in the given PATH"
+      << std::endl << "or the length of a file in case PATH is just a file"
+      << std::endl
+      << std::endl << "  -R        operate on files and directories recursively"
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_du hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_du -R /dir1/dir2"
+      << std::endl;
+}
+
+struct GetContentSummaryState {
+  const std::function<void(const hdfs::Status &)> handler;
+  //The request counter is incremented once every time GetContentSummary async call is made
+  uint64_t request_counter;
+  //This boolean will be set when find returns the last result
+  bool find_is_done;
+  //Final status to be returned
+  hdfs::Status status;
+  //Shared variables will need protection with a lock
+  std::mutex lock;
+  GetContentSummaryState(const std::function<void(const hdfs::Status &)> & handler_,
+              uint64_t request_counter_, bool find_is_done_)
+      : handler(handler_),
+        request_counter(request_counter_),
+        find_is_done(find_is_done_),
+        status(),
+        lock() {
+  }
+};
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool recursive = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'R':
+      recursive = true;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  /* wrap async FileSystem::GetContentSummary with promise to make it a blocking call */
+  std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
+  std::future<hdfs::Status> future(promise->get_future());
+  auto handler = [promise](const hdfs::Status &s) {
+    promise->set_value(s);
+  };
+
+  //Allocating shared state, which includes:
+  //handler to be called, request counter, and a boolean to keep track if find is done
+  std::shared_ptr<GetContentSummaryState> state = std::make_shared<GetContentSummaryState>(handler, 0, false);
+
+  // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
+  // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
+  auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
+
+    //For each result returned by Find we call async GetContentSummary with the handler below.
+    //GetContentSummary DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerGetContentSummary.
+    auto handlerGetContentSummary = [state](const hdfs::Status &status_get_summary, const hdfs::ContentSummary &si) {
+      std::lock_guard<std::mutex> guard(state->lock);
+      std::cout << si.str_du() << std::endl;
+      //Decrement the counter once since we are done with this async call
+      if (!status_get_summary.ok() && state->status.ok()){
+        //We make sure we set state->status only on the first error.
+        state->status = status_get_summary;
+      }
+      state->request_counter--;
+      if(state->request_counter == 0 && state->find_is_done){
+        state->handler(state->status); //exit
+      }
+    };
+    if(!stat_infos.empty() && state->status.ok()) {
+      for (hdfs::StatInfo const& s : stat_infos) {
+        //Launch an asynchronous call to GetContentSummary for every returned result
+        state->request_counter++;
+        fs->GetContentSummary(s.full_path, handlerGetContentSummary);
+      }
+    }
+
+    //Lock this section because handlerGetContentSummary might be accessing the same
+    //shared variables simultaneously
+    std::lock_guard<std::mutex> guard(state->lock);
+    if (!status_find.ok() && state->status.ok()){
+      //We make sure we set state->status only on the first error.
+      state->status = status_find;
+    }
+    if(!has_more_results){
+      state->find_is_done = true;
+      if(state->request_counter == 0){
+        state->handler(state->status); //exit
+      }
+      return false;
+    }
+    return true;
+  };
+
+  if(!recursive){
+    //Asynchronous call to Find
+    fs->GetListing(uri->get_path(), handlerFind);
+  } else {
+    //Asynchronous call to Find
+    fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
+  }
+
+  /* block until promise is set */
+  hdfs::Status status = future.get();
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cc
new file mode 100644
index 0000000..ef2ad7b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cc
@@ -0,0 +1,150 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include <future>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_find [OPTION] PATH"
+      << std::endl
+      << std::endl << "Finds all files recursively starting from the"
+      << std::endl << "specified PATH and prints their file paths."
+      << std::endl << "This hdfs_find tool mimics the POSIX find."
+      << std::endl
+      << std::endl << "Both PATH and NAME can have wild-cards."
+      << std::endl
+      << std::endl << "  -n NAME       if provided all results will be matching the NAME pattern"
+      << std::endl << "                otherwise, the implicit '*' will be used"
+      << std::endl << "                NAME allows wild-cards"
+      << std::endl
+      << std::endl << "  -m MAX_DEPTH  if provided the maximum depth to recurse after the end of"
+      << std::endl << "                the path is reached will be limited by MAX_DEPTH"
+      << std::endl << "                otherwise, the maximum depth to recurse is unbound"
+      << std::endl << "                MAX_DEPTH can be set to 0 for pure globbing and ignoring"
+      << std::endl << "                the NAME option (no recursion after the end of the path)"
+      << std::endl
+      << std::endl << "  -h            display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_find hdfs://localhost.localdomain:8020/dir?/tree* -n some?file*name"
+      << std::endl << "hdfs_find / -n file_name -m 3"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+  //If NAME is not specified we use implicit "*"
+  std::string name = "*";
+  //If MAX_DEPTH is not specified we use the max value of uint_32_t
+  uint32_t max_depth = hdfs::FileSystem::GetDefaultFindMaxDepth();
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "hn:m:")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case 'n':
+      name = optarg;
+      break;
+    case 'm':
+      max_depth = std::stoi(optarg);
+      break;
+    case '?':
+      if (optopt == 'n' || optopt == 'm')
+        std::cerr << "Option -" << (char) optopt << " requires an argument." << std::endl;
+      else if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<std::promise<void>> promise = std::make_shared<std::promise<void>>();
+  std::future<void> future(promise->get_future());
+  hdfs::Status status = hdfs::Status::OK();
+
+  /**
+    * Keep requesting more until we get the entire listing. Set the promise
+    * when we have the entire listing to stop.
+    *
+    * Find guarantees that the handler will only be called once at a time,
+    * so we do not need any locking here. It also guarantees that the handler will be
+    * only called once with has_more_results set to false.
+    */
+  auto handler = [promise, &status]
+                  (const hdfs::Status &s, const std::vector<hdfs::StatInfo> & si, bool has_more_results) -> bool {
+    //Print result chunks as they arrive
+    if(!si.empty()) {
+      for (hdfs::StatInfo const& s : si) {
+        std::cout << s.str() << std::endl;
+      }
+    }
+    if(!s.ok() && status.ok()){
+      //We make sure we set 'status' only on the first error.
+      status = s;
+    }
+    if (!has_more_results) {
+      promise->set_value();  //set promise
+      return false;         //request stop sending results
+    }
+    return true;  //request more results
+  };
+
+  //Asynchronous call to Find
+  fs->Find(uri->get_path(), name, max_depth, handler);
+
+  //block until promise is set
+  future.get();
+  if(!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cpp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cpp b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cpp
deleted file mode 100644
index eca79c6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_find.cpp
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing,
-  software distributed under the License is distributed on an
-  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  KIND, either express or implied.  See the License for the
-  specific language governing permissions and limitations
-  under the License.
-*/
-
-#include <google/protobuf/stubs/common.h>
-#include <unistd.h>
-#include <future>
-#include "tools_common.h"
-
-void usage(){
-  std::cout << "Usage: hdfs_find [OPTION] PATH"
-      << std::endl
-      << std::endl << "Finds all files recursively starting from the"
-      << std::endl << "specified PATH and prints their file paths."
-      << std::endl << "This hdfs_find tool mimics the POSIX find."
-      << std::endl
-      << std::endl << "Both PATH and NAME can have wild-cards."
-      << std::endl
-      << std::endl << "  -n NAME       if provided all results will be matching the NAME pattern"
-      << std::endl << "                otherwise, the implicit '*' will be used"
-      << std::endl << "                NAME allows wild-cards"
-      << std::endl
-      << std::endl << "  -m MAX_DEPTH  if provided the maximum depth to recurse after the end of"
-      << std::endl << "                the path is reached will be limited by MAX_DEPTH"
-      << std::endl << "                otherwise, the maximum depth to recurse is unbound"
-      << std::endl << "                MAX_DEPTH can be set to 0 for pure globbing and ignoring"
-      << std::endl << "                the NAME option (no recursion after the end of the path)"
-      << std::endl
-      << std::endl << "  -h            display this help and exit"
-      << std::endl
-      << std::endl << "Examples:"
-      << std::endl << "hdfs_find hdfs://localhost.localdomain:9433/dir?/tree* -n some?file*name"
-      << std::endl << "hdfs_find / -n file_name -m 3"
-      << std::endl;
-}
-
-int main(int argc, char *argv[]) {
-  //We should have at least 2 arguments
-  if (argc < 2) {
-    usage();
-    exit(EXIT_FAILURE);
-  }
-
-  int input;
-  //If NAME is not specified we use implicit "*"
-  std::string name = "*";
-  //If MAX_DEPTH is not specified we use the max value of uint_32_t
-  uint32_t max_depth = hdfs::FileSystem::GetDefaultFindMaxDepth();
-
-  //Using GetOpt to read in the values
-  opterr = 0;
-  while ((input = getopt(argc, argv, "hn:m:")) != -1) {
-    switch (input)
-    {
-    case 'h':
-      usage();
-      exit(EXIT_SUCCESS);
-      break;
-    case 'n':
-      name = optarg;
-      break;
-    case 'm':
-      max_depth = std::stoi(optarg);
-      break;
-    case '?':
-      if (optopt == 'n' || optopt == 'm')
-        std::cerr << "Option -" << (char) optopt << " requires an argument." << std::endl;
-      else if (isprint(optopt))
-        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
-      else
-        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
-      usage();
-      exit(EXIT_FAILURE);
-    default:
-      exit(EXIT_FAILURE);
-    }
-  }
-  std::string uri_path = argv[optind];
-
-  //Building a URI object from the given uri_path
-  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
-  if (!uri) {
-    std::cerr << "Malformed URI: " << uri_path << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  //TODO: HDFS-9539 Currently options can be returned empty
-  hdfs::Options options = *hdfs::getOptions();
-
-  //TODO: HDFS-9539 - until then we increase the time-out to allow all recursive async calls to finish
-  options.rpc_timeout = std::numeric_limits<int>::max();
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), options);
-  if (!fs) {
-    std::cerr << "Could not connect the file system. " << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  std::promise<void> promise;
-  std::future<void> future(promise.get_future());
-  hdfs::Status status = hdfs::Status::OK();
-
-  /**
-    * Keep requesting more until we get the entire listing. Set the promise
-    * when we have the entire listing to stop.
-    *
-    * Find guarantees that the handler will only be called once at a time,
-    * so we do not need any locking here
-    */
-  auto handler = [&promise, &status]
-                  (const hdfs::Status &s, const std::vector<hdfs::StatInfo> & si, bool has_more_results) -> bool {
-    //Print result chunks as they arrive
-    if(!si.empty()) {
-      for (hdfs::StatInfo const& s : si) {
-        std::cout << s.full_path << std::endl;
-      }
-    }
-    if(!s.ok() && status.ok()){
-      //We make sure we set 'status' only on the first error.
-      status = s;
-    }
-    if (!has_more_results) {
-      promise.set_value();  //set promise
-      return false;         //request stop sending results
-    }
-    return true;  //request more results
-  };
-
-  //Asynchronous call to Find
-  fs->Find(uri->get_path(), name, max_depth, handler);
-
-  //block until promise is set
-  future.get();
-  if(!status.ok()) {
-    std::cerr << "Error: " << status.ToString() << std::endl;
-  }
-
-  // Clean up static data and prevent valgrind memory leaks
-  google::protobuf::ShutdownProtobufLibrary();
-  return 0;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_get.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_get.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_get.cc
new file mode 100644
index 0000000..f1ff1c8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_get.cc
@@ -0,0 +1,92 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_get [OPTION] SRC_FILE DST_FILE"
+      << std::endl
+      << std::endl << "Copy SRC_FILE from hdfs to DST_FILE on the local file system."
+      << std::endl
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_get hdfs://localhost.localdomain:8020/dir/file /home/usr/myfile"
+      << std::endl << "hdfs_get /dir/file /home/usr/dir/file"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  if (argc > 4) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+
+  std::string uri_path = argv[optind];
+  std::string dest = argv[optind+1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::FILE* dst_file = std::fopen(dest.c_str(), "wb");
+  if(!dst_file){
+    std::cerr << "Unable to open the destination file: " << dest << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  readFile(fs, uri->get_path(), 0, dst_file, false);
+  std::fclose(dst_file);
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_ls.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_ls.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_ls.cc
new file mode 100644
index 0000000..710dbf7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_ls.cc
@@ -0,0 +1,134 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include <future>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_ls [OPTION] FILE"
+      << std::endl
+      << std::endl << "List information about the FILEs."
+      << std::endl
+      << std::endl << "  -R        list subdirectories recursively"
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_ls hdfs://localhost.localdomain:8020/dir"
+      << std::endl << "hdfs_ls -R /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool recursive = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'R':
+      recursive = true;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<std::promise<void>> promise = std::make_shared<std::promise<void>>();
+  std::future<void> future(promise->get_future());
+  hdfs::Status status = hdfs::Status::OK();
+
+  /**
+    * Keep requesting more until we get the entire listing. Set the promise
+    * when we have the entire listing to stop.
+    *
+    * Find and GetListing guarantee that the handler will only be called once at a time,
+    * so we do not need any locking here. They also guarantee that the handler will be
+    * only called once with has_more_results set to false.
+    */
+  auto handler = [promise, &status]
+                  (const hdfs::Status &s, const std::vector<hdfs::StatInfo> & si, bool has_more_results) -> bool {
+    //Print result chunks as they arrive
+    if(!si.empty()) {
+      for (hdfs::StatInfo const& s : si) {
+        std::cout << s.str() << std::endl;
+      }
+    }
+    if(!s.ok() && status.ok()){
+      //We make sure we set 'status' only on the first error.
+      status = s;
+    }
+    if (!has_more_results) {
+      promise->set_value();  //set promise
+      return false;         //request stop sending results
+    }
+    return true;  //request more results
+  };
+
+  if(!recursive){
+    //Asynchronous call to GetListing
+    fs->GetListing(uri->get_path(), handler);
+  } else {
+    //Asynchronous call to Find
+    fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handler);
+  }
+
+  //block until promise is set
+  future.get();
+  if(!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_mkdir.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_mkdir.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_mkdir.cc
new file mode 100644
index 0000000..c4f08e3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_mkdir.cc
@@ -0,0 +1,102 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_mkdir [OPTION] DIRECTORY"
+      << std::endl
+      << std::endl << "Create the DIRECTORY(ies), if they do not already exist."
+      << std::endl
+      << std::endl << "  -p        make parent directories as needed"
+      << std::endl << "  -m  MODE  set file mode (octal permissions) for the new DIRECTORY(ies)"
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_mkdir hdfs://localhost.localdomain:8020/dir1/dir2"
+      << std::endl << "hdfs_mkdir -p /extant_dir/non_extant_dir/non_extant_dir/new_dir"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool create_parents = false;
+  uint16_t permissions = hdfs::FileSystem::GetDefaultPermissionMask();
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "pm:h")) != -1) {
+    switch (input)
+    {
+    case 'p':
+      create_parents = true;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case 'm':
+      //Get octal permissions for the new DIRECTORY(ies)
+      permissions = strtol(optarg, NULL, 8);
+      break;
+    case '?':
+      if (optopt == 'm')
+        std::cerr << "Option -" << (char) optopt << " requires an argument." << std::endl;
+      else if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->Mkdirs(uri->get_path(), permissions, create_parents);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_moveToLocal.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_moveToLocal.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_moveToLocal.cc
new file mode 100644
index 0000000..5bb0754
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_moveToLocal.cc
@@ -0,0 +1,94 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_moveToLocal [OPTION] SRC_FILE DST_FILE"
+      << std::endl
+      << std::endl << "Move SRC_FILE from hdfs to DST_FILE on the local file system."
+      << std::endl << "Moving is done by copying SRC_FILE to DST_FILE, and then"
+      << std::endl << "deleting DST_FILE if copy succeeded."
+      << std::endl
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_moveToLocal hdfs://localhost.localdomain:8020/dir/file /home/usr/myfile"
+      << std::endl << "hdfs_moveToLocal /dir/file /home/usr/dir/file"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  if (argc > 4) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+
+  std::string uri_path = argv[optind];
+  std::string dest = argv[optind+1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::FILE* dst_file = std::fopen(dest.c_str(), "wb");
+  if(!dst_file){
+    std::cerr << "Unable to open the destination file: " << dest << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  readFile(fs, uri->get_path(), 0, dst_file, true);
+  std::fclose(dst_file);
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_renameSnapshot.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_renameSnapshot.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_renameSnapshot.cc
new file mode 100644
index 0000000..3262b34
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_renameSnapshot.cc
@@ -0,0 +1,92 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_renameSnapshot [OPTION] PATH OLD_NAME NEW_NAME"
+      << std::endl
+      << std::endl << "Rename a snapshot from OLD_NAME to NEW_NAME."
+      << std::endl << "This operation requires owner privilege of the snapshottable directory."
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_renameSnapshot hdfs://localhost.localdomain:8020/dir oldDir newDir"
+      << std::endl << "hdfs_renameSnapshot /dir1/dir2 oldSnap newSnap"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+  std::string old_name = argv[optind+1];
+  std::string new_name = argv[optind+2];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->RenameSnapshot(uri->get_path(), old_name, new_name);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_rm.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_rm.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_rm.cc
new file mode 100644
index 0000000..ddea730
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_rm.cc
@@ -0,0 +1,94 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_rm [OPTION] FILE"
+      << std::endl
+      << std::endl << "Remove (unlink) the FILE(s) or directory(ies)."
+      << std::endl
+      << std::endl << "  -R        remove directories and their contents recursively"
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_rm hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_rm -R /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool recursive = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "Rh")) != -1) {
+    switch (input)
+    {
+    case 'R':
+      recursive = true;
+      break;
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::Status status = fs->Delete(uri->get_path(), recursive);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_setrep.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_setrep.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_setrep.cc
new file mode 100644
index 0000000..97aaee2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_setrep.cc
@@ -0,0 +1,176 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include <future>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_setrep [OPTION] NUM_REPLICAS PATH"
+      << std::endl
+      << std::endl << "Changes the replication factor of a file at PATH. If PATH is a directory then the command"
+      << std::endl << "recursively changes the replication factor of all files under the directory tree rooted at PATH."
+      << std::endl
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_setrep 5 hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_setrep 3 /dir1/dir2"
+      << std::endl;
+}
+
+struct SetReplicationState {
+  const uint16_t replication;
+  const std::function<void(const hdfs::Status &)> handler;
+  //The request counter is incremented once every time SetReplication async call is made
+  uint64_t request_counter;
+  //This boolean will be set when find returns the last result
+  bool find_is_done;
+  //Final status to be returned
+  hdfs::Status status;
+  //Shared variables will need protection with a lock
+  std::mutex lock;
+  SetReplicationState(const uint16_t replication_, const std::function<void(const hdfs::Status &)> & handler_,
+              uint64_t request_counter_, bool find_is_done_)
+      : replication(replication_),
+        handler(handler_),
+        request_counter(request_counter_),
+        find_is_done(find_is_done_),
+        status(),
+        lock() {
+  }
+};
+
+int main(int argc, char *argv[]) {
+  //We should have 3 or 4 parameters
+  if (argc < 3) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string repl = argv[optind];
+  std::string uri_path = argv[optind + 1];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), true);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  /* wrap async FileSystem::SetReplication with promise to make it a blocking call */
+  std::shared_ptr<std::promise<hdfs::Status>> promise = std::make_shared<std::promise<hdfs::Status>>();
+  std::future<hdfs::Status> future(promise->get_future());
+  auto handler = [promise](const hdfs::Status &s) {
+    promise->set_value(s);
+  };
+
+  uint16_t replication = std::stoi(repl.c_str(), NULL, 8);
+  //Allocating shared state, which includes:
+  //replication to be set, handler to be called, request counter, and a boolean to keep track if find is done
+  std::shared_ptr<SetReplicationState> state = std::make_shared<SetReplicationState>(replication, handler, 0, false);
+
+  // Keep requesting more from Find until we process the entire listing. Call handler when Find is done and reques counter is 0.
+  // Find guarantees that the handler will only be called once at a time so we do not need locking in handlerFind.
+  auto handlerFind = [fs, state](const hdfs::Status &status_find, const std::vector<hdfs::StatInfo> & stat_infos, bool has_more_results) -> bool {
+
+    //For each result returned by Find we call async SetReplication with the handler below.
+    //SetReplication DOES NOT guarantee that the handler will only be called once at a time, so we DO need locking in handlerSetReplication.
+    auto handlerSetReplication = [state](const hdfs::Status &status_set_replication) {
+      std::lock_guard<std::mutex> guard(state->lock);
+
+      //Decrement the counter once since we are done with this async call
+      if (!status_set_replication.ok() && state->status.ok()){
+        //We make sure we set state->status only on the first error.
+        state->status = status_set_replication;
+      }
+      state->request_counter--;
+      if(state->request_counter == 0 && state->find_is_done){
+        state->handler(state->status); //exit
+      }
+    };
+    if(!stat_infos.empty() && state->status.ok()) {
+      for (hdfs::StatInfo const& s : stat_infos) {
+        //Launch an asynchronous call to SetReplication for every returned file
+        if(s.file_type == hdfs::StatInfo::IS_FILE){
+          state->request_counter++;
+          fs->SetReplication(s.full_path, state->replication, handlerSetReplication);
+        }
+      }
+    }
+
+    //Lock this section because handlerSetReplication might be accessing the same
+    //shared variables simultaneously
+    std::lock_guard<std::mutex> guard(state->lock);
+    if (!status_find.ok() && state->status.ok()){
+      //We make sure we set state->status only on the first error.
+      state->status = status_find;
+    }
+    if(!has_more_results){
+      state->find_is_done = true;
+      if(state->request_counter == 0){
+        state->handler(state->status); //exit
+      }
+      return false;
+    }
+    return true;
+  };
+
+  //Asynchronous call to Find
+  fs->Find(uri->get_path(), "*", hdfs::FileSystem::GetDefaultFindMaxDepth(), handlerFind);
+
+  /* block until promise is set */
+  hdfs::Status status = future.get();
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_stat.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_stat.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_stat.cc
new file mode 100644
index 0000000..c910571
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_stat.cc
@@ -0,0 +1,91 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_rm [OPTION] FILE"
+      << std::endl
+      << std::endl << "Display FILE status."
+      << std::endl
+      << std::endl << "  -h        display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_rm hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_rm -R /dir1/dir2"
+      << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+  //We should have at least 2 arguments
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "h")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  hdfs::StatInfo stat_info;
+  hdfs::Status status = fs->GetFileInfo(uri->get_path(), stat_info);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+  std::cout << stat_info.str() << std::endl;
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55b3fdfe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_tail.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_tail.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_tail.cc
new file mode 100644
index 0000000..2fc1969
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_tail.cc
@@ -0,0 +1,128 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <google/protobuf/stubs/common.h>
+#include <unistd.h>
+#include "tools_common.h"
+
+void usage(){
+  std::cout << "Usage: hdfs_tail [OPTION] FILE"
+      << std::endl
+      << std::endl << "Displays last kilobyte of the file to stdout."
+      << std::endl
+      << std::endl << "  -f  output appended data as the file grows, as in Unix"
+      << std::endl << "  -h  display this help and exit"
+      << std::endl
+      << std::endl << "Examples:"
+      << std::endl << "hdfs_tail hdfs://localhost.localdomain:8020/dir/file"
+      << std::endl << "hdfs_tail /dir/file"
+      << std::endl;
+}
+
+#define TAIL_SIZE 1024
+#define REFRESH_RATE 1 //seconds
+
+int main(int argc, char *argv[]) {
+  if (argc < 2) {
+    usage();
+    exit(EXIT_FAILURE);
+  }
+
+  bool follow = false;
+  int input;
+
+  //Using GetOpt to read in the values
+  opterr = 0;
+  while ((input = getopt(argc, argv, "hf")) != -1) {
+    switch (input)
+    {
+    case 'h':
+      usage();
+      exit(EXIT_SUCCESS);
+    case 'f':
+      follow = true;
+      break;
+    case '?':
+      if (isprint(optopt))
+        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
+      else
+        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
+      usage();
+      exit(EXIT_FAILURE);
+    default:
+      exit(EXIT_FAILURE);
+    }
+  }
+
+  std::string uri_path = argv[optind];
+
+  //Building a URI object from the given uri_path
+  hdfs::optional<hdfs::URI> uri = hdfs::URI::parse_from_string(uri_path);
+  if (!uri) {
+    std::cerr << "Malformed URI: " << uri_path << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri.value(), false);
+  if (!fs) {
+    std::cerr << "Could not connect the file system. " << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  //We need to get the size of the file using stat
+  hdfs::StatInfo stat_info;
+  hdfs::Status status = fs->GetFileInfo(uri->get_path(), stat_info);
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    exit(EXIT_FAILURE);
+  }
+
+  //Determine where to start reading
+  off_t offset = 0;
+  if(stat_info.length > TAIL_SIZE){
+    offset = stat_info.length - TAIL_SIZE;
+  }
+
+  do {
+    off_t current_length = (off_t) stat_info.length;
+    readFile(fs, uri->get_path(), offset, stdout, false);
+
+    //Exit if -f flag was not set
+    if(!follow){
+      break;
+    }
+
+    do{
+      //Sleep for the REFRESH_RATE
+      sleep(REFRESH_RATE);
+      //Use stat to check the new filesize.
+      status = fs->GetFileInfo(uri->get_path(), stat_info);
+      if (!status.ok()) {
+        std::cerr << "Error: " << status.ToString() << std::endl;
+        exit(EXIT_FAILURE);
+      }
+      //If file became longer, loop back and print the difference
+    }
+    while((off_t) stat_info.length <= current_length);
+  } while (true);
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return 0;
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message