hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [35/50] [abbrv] hadoop git commit: HDFS-8724. Import third_party libraries into the repository.
Date Tue, 07 Jul 2015 21:32:22 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_select_interrupter.ipp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_select_interrupter.ipp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_select_interrupter.ipp
new file mode 100644
index 0000000..bd6f363
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/socket_select_interrupter.ipp
@@ -0,0 +1,175 @@
+//
+// detail/impl/socket_select_interrupter.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
+#define ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if !defined(ASIO_WINDOWS_RUNTIME)
+
+#if defined(ASIO_WINDOWS) \
+  || defined(__CYGWIN__) \
+  || defined(__SYMBIAN32__)
+
+#include <cstdlib>
+#include "asio/detail/socket_holder.hpp"
+#include "asio/detail/socket_ops.hpp"
+#include "asio/detail/socket_select_interrupter.hpp"
+#include "asio/detail/throw_error.hpp"
+#include "asio/error.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+socket_select_interrupter::socket_select_interrupter()
+{
+  open_descriptors();
+}
+
+void socket_select_interrupter::open_descriptors()
+{
+  asio::error_code ec;
+  socket_holder acceptor(socket_ops::socket(
+        AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
+  if (acceptor.get() == invalid_socket)
+    asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  int opt = 1;
+  socket_ops::state_type acceptor_state = 0;
+  socket_ops::setsockopt(acceptor.get(), acceptor_state,
+      SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec);
+
+  using namespace std; // For memset.
+  sockaddr_in4_type addr;
+  std::size_t addr_len = sizeof(addr);
+  memset(&addr, 0, sizeof(addr));
+  addr.sin_family = AF_INET;
+  addr.sin_addr.s_addr = inet_addr("127.0.0.1");
+  addr.sin_port = 0;
+  if (socket_ops::bind(acceptor.get(), (const socket_addr_type*)&addr,
+        addr_len, ec) == socket_error_retval)
+    asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  if (socket_ops::getsockname(acceptor.get(), (socket_addr_type*)&addr,
+        &addr_len, ec) == socket_error_retval)
+    asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  // Some broken firewalls on Windows will intermittently cause getsockname to
+  // return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We
+  // explicitly specify the target address here to work around this problem.
+  addr.sin_addr.s_addr = inet_addr("127.0.0.1");
+
+  if (socket_ops::listen(acceptor.get(),
+        SOMAXCONN, ec) == socket_error_retval)
+    asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  socket_holder client(socket_ops::socket(
+        AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
+  if (client.get() == invalid_socket)
+    asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  if (socket_ops::connect(client.get(), (const socket_addr_type*)&addr,
+        addr_len, ec) == socket_error_retval)
+    asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec));
+  if (server.get() == invalid_socket)
+    asio::detail::throw_error(ec, "socket_select_interrupter");
+  
+  ioctl_arg_type non_blocking = 1;
+  socket_ops::state_type client_state = 0;
+  if (socket_ops::ioctl(client.get(), client_state,
+        FIONBIO, &non_blocking, ec))
+    asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  opt = 1;
+  socket_ops::setsockopt(client.get(), client_state,
+      IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
+
+  non_blocking = 1;
+  socket_ops::state_type server_state = 0;
+  if (socket_ops::ioctl(server.get(), server_state,
+        FIONBIO, &non_blocking, ec))
+    asio::detail::throw_error(ec, "socket_select_interrupter");
+
+  opt = 1;
+  socket_ops::setsockopt(server.get(), server_state,
+      IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
+
+  read_descriptor_ = server.release();
+  write_descriptor_ = client.release();
+}
+
+socket_select_interrupter::~socket_select_interrupter()
+{
+  close_descriptors();
+}
+
+void socket_select_interrupter::close_descriptors()
+{
+  asio::error_code ec;
+  socket_ops::state_type state = socket_ops::internal_non_blocking;
+  if (read_descriptor_ != invalid_socket)
+    socket_ops::close(read_descriptor_, state, true, ec);
+  if (write_descriptor_ != invalid_socket)
+    socket_ops::close(write_descriptor_, state, true, ec);
+}
+
+void socket_select_interrupter::recreate()
+{
+  close_descriptors();
+
+  write_descriptor_ = invalid_socket;
+  read_descriptor_ = invalid_socket;
+
+  open_descriptors();
+}
+
+void socket_select_interrupter::interrupt()
+{
+  char byte = 0;
+  socket_ops::buf b;
+  socket_ops::init_buf(b, &byte, 1);
+  asio::error_code ec;
+  socket_ops::send(write_descriptor_, &b, 1, 0, ec);
+}
+
+bool socket_select_interrupter::reset()
+{
+  char data[1024];
+  socket_ops::buf b;
+  socket_ops::init_buf(b, data, sizeof(data));
+  asio::error_code ec;
+  int bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec);
+  bool was_interrupted = (bytes_read > 0);
+  while (bytes_read == sizeof(data))
+    bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec);
+  return was_interrupted;
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // defined(ASIO_WINDOWS)
+       // || defined(__CYGWIN__)
+       // || defined(__SYMBIAN32__)
+
+#endif // !defined(ASIO_WINDOWS_RUNTIME)
+
+#endif // ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/strand_service.hpp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/strand_service.hpp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/strand_service.hpp
new file mode 100644
index 0000000..bed0eca
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/strand_service.hpp
@@ -0,0 +1,118 @@
+//
+// detail/impl/strand_service.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
+#define ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/addressof.hpp"
+#include "asio/detail/call_stack.hpp"
+#include "asio/detail/completion_handler.hpp"
+#include "asio/detail/fenced_block.hpp"
+#include "asio/detail/handler_alloc_helpers.hpp"
+#include "asio/detail/handler_invoke_helpers.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+inline strand_service::strand_impl::strand_impl()
+  : operation(&strand_service::do_complete),
+    locked_(false)
+{
+}
+
+struct strand_service::on_dispatch_exit
+{
+  io_service_impl* io_service_;
+  strand_impl* impl_;
+
+  ~on_dispatch_exit()
+  {
+    impl_->mutex_.lock();
+    impl_->ready_queue_.push(impl_->waiting_queue_);
+    bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();
+    impl_->mutex_.unlock();
+
+    if (more_handlers)
+      io_service_->post_immediate_completion(impl_, false);
+  }
+};
+
+template <typename Handler>
+void strand_service::dispatch(strand_service::implementation_type& impl,
+    Handler& handler)
+{
+  // If we are already in the strand then the handler can run immediately.
+  if (call_stack<strand_impl>::contains(impl))
+  {
+    fenced_block b(fenced_block::full);
+    asio_handler_invoke_helpers::invoke(handler, handler);
+    return;
+  }
+
+  // Allocate and construct an operation to wrap the handler.
+  typedef completion_handler<Handler> op;
+  typename op::ptr p = { asio::detail::addressof(handler),
+    asio_handler_alloc_helpers::allocate(
+      sizeof(op), handler), 0 };
+  p.p = new (p.v) op(handler);
+
+  ASIO_HANDLER_CREATION((p.p, "strand", impl, "dispatch"));
+
+  bool dispatch_immediately = do_dispatch(impl, p.p);
+  operation* o = p.p;
+  p.v = p.p = 0;
+
+  if (dispatch_immediately)
+  {
+    // Indicate that this strand is executing on the current thread.
+    call_stack<strand_impl>::context ctx(impl);
+
+    // Ensure the next handler, if any, is scheduled on block exit.
+    on_dispatch_exit on_exit = { &io_service_, impl };
+    (void)on_exit;
+
+    completion_handler<Handler>::do_complete(
+        &io_service_, o, asio::error_code(), 0);
+  }
+}
+
+// Request the io_service to invoke the given handler and return immediately.
+template <typename Handler>
+void strand_service::post(strand_service::implementation_type& impl,
+    Handler& handler)
+{
+  bool is_continuation =
+    asio_handler_cont_helpers::is_continuation(handler);
+
+  // Allocate and construct an operation to wrap the handler.
+  typedef completion_handler<Handler> op;
+  typename op::ptr p = { asio::detail::addressof(handler),
+    asio_handler_alloc_helpers::allocate(
+      sizeof(op), handler), 0 };
+  p.p = new (p.v) op(handler);
+
+  ASIO_HANDLER_CREATION((p.p, "strand", impl, "post"));
+
+  do_post(impl, p.p, is_continuation);
+  p.v = p.p = 0;
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/strand_service.ipp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/strand_service.ipp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/strand_service.ipp
new file mode 100644
index 0000000..0083e86
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/strand_service.ipp
@@ -0,0 +1,176 @@
+//
+// detail/impl/strand_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
+#define ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include "asio/detail/call_stack.hpp"
+#include "asio/detail/strand_service.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+struct strand_service::on_do_complete_exit
+{
+  io_service_impl* owner_;
+  strand_impl* impl_;
+
+  ~on_do_complete_exit()
+  {
+    impl_->mutex_.lock();
+    impl_->ready_queue_.push(impl_->waiting_queue_);
+    bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();
+    impl_->mutex_.unlock();
+
+    if (more_handlers)
+      owner_->post_immediate_completion(impl_, true);
+  }
+};
+
+strand_service::strand_service(asio::io_service& io_service)
+  : asio::detail::service_base<strand_service>(io_service),
+    io_service_(asio::use_service<io_service_impl>(io_service)),
+    mutex_(),
+    salt_(0)
+{
+}
+
+void strand_service::shutdown_service()
+{
+  op_queue<operation> ops;
+
+  asio::detail::mutex::scoped_lock lock(mutex_);
+
+  for (std::size_t i = 0; i < num_implementations; ++i)
+  {
+    if (strand_impl* impl = implementations_[i].get())
+    {
+      ops.push(impl->waiting_queue_);
+      ops.push(impl->ready_queue_);
+    }
+  }
+}
+
+void strand_service::construct(strand_service::implementation_type& impl)
+{
+  asio::detail::mutex::scoped_lock lock(mutex_);
+
+  std::size_t salt = salt_++;
+#if defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
+  std::size_t index = salt;
+#else // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
+  std::size_t index = reinterpret_cast<std::size_t>(&impl);
+  index += (reinterpret_cast<std::size_t>(&impl) >> 3);
+  index ^= salt + 0x9e3779b9 + (index << 6) + (index >> 2);
+#endif // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
+  index = index % num_implementations;
+
+  if (!implementations_[index].get())
+    implementations_[index].reset(new strand_impl);
+  impl = implementations_[index].get();
+}
+
+bool strand_service::running_in_this_thread(
+    const implementation_type& impl) const
+{
+  return call_stack<strand_impl>::contains(impl) != 0;
+}
+
+bool strand_service::do_dispatch(implementation_type& impl, operation* op)
+{
+  // If we are running inside the io_service, and no other handler already
+  // holds the strand lock, then the handler can run immediately.
+  bool can_dispatch = io_service_.can_dispatch();
+  impl->mutex_.lock();
+  if (can_dispatch && !impl->locked_)
+  {
+    // Immediate invocation is allowed.
+    impl->locked_ = true;
+    impl->mutex_.unlock();
+    return true;
+  }
+
+  if (impl->locked_)
+  {
+    // Some other handler already holds the strand lock. Enqueue for later.
+    impl->waiting_queue_.push(op);
+    impl->mutex_.unlock();
+  }
+  else
+  {
+    // The handler is acquiring the strand lock and so is responsible for
+    // scheduling the strand.
+    impl->locked_ = true;
+    impl->mutex_.unlock();
+    impl->ready_queue_.push(op);
+    io_service_.post_immediate_completion(impl, false);
+  }
+
+  return false;
+}
+
+void strand_service::do_post(implementation_type& impl,
+    operation* op, bool is_continuation)
+{
+  impl->mutex_.lock();
+  if (impl->locked_)
+  {
+    // Some other handler already holds the strand lock. Enqueue for later.
+    impl->waiting_queue_.push(op);
+    impl->mutex_.unlock();
+  }
+  else
+  {
+    // The handler is acquiring the strand lock and so is responsible for
+    // scheduling the strand.
+    impl->locked_ = true;
+    impl->mutex_.unlock();
+    impl->ready_queue_.push(op);
+    io_service_.post_immediate_completion(impl, is_continuation);
+  }
+}
+
+void strand_service::do_complete(io_service_impl* owner, operation* base,
+    const asio::error_code& ec, std::size_t /*bytes_transferred*/)
+{
+  if (owner)
+  {
+    strand_impl* impl = static_cast<strand_impl*>(base);
+
+    // Indicate that this strand is executing on the current thread.
+    call_stack<strand_impl>::context ctx(impl);
+
+    // Ensure the next handler, if any, is scheduled on block exit.
+    on_do_complete_exit on_exit = { owner, impl };
+    (void)on_exit;
+
+    // Run all ready handlers. No lock is required since the ready queue is
+    // accessed only within the strand.
+    while (operation* o = impl->ready_queue_.front())
+    {
+      impl->ready_queue_.pop();
+      o->complete(*owner, ec, 0);
+    }
+  }
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/task_io_service.hpp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/task_io_service.hpp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/task_io_service.hpp
new file mode 100644
index 0000000..9e8bc8f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/task_io_service.hpp
@@ -0,0 +1,78 @@
+//
+// detail/impl/task_io_service.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP
+#define ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/addressof.hpp"
+#include "asio/detail/completion_handler.hpp"
+#include "asio/detail/fenced_block.hpp"
+#include "asio/detail/handler_alloc_helpers.hpp"
+#include "asio/detail/handler_cont_helpers.hpp"
+#include "asio/detail/handler_invoke_helpers.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+template <typename Handler>
+void task_io_service::dispatch(Handler& handler)
+{
+  if (thread_call_stack::contains(this))
+  {
+    fenced_block b(fenced_block::full);
+    asio_handler_invoke_helpers::invoke(handler, handler);
+  }
+  else
+  {
+    // Allocate and construct an operation to wrap the handler.
+    typedef completion_handler<Handler> op;
+    typename op::ptr p = { asio::detail::addressof(handler),
+      asio_handler_alloc_helpers::allocate(
+        sizeof(op), handler), 0 };
+    p.p = new (p.v) op(handler);
+
+    ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch"));
+
+    do_dispatch(p.p);
+    p.v = p.p = 0;
+  }
+}
+
+template <typename Handler>
+void task_io_service::post(Handler& handler)
+{
+  bool is_continuation =
+    asio_handler_cont_helpers::is_continuation(handler);
+
+  // Allocate and construct an operation to wrap the handler.
+  typedef completion_handler<Handler> op;
+  typename op::ptr p = { asio::detail::addressof(handler),
+    asio_handler_alloc_helpers::allocate(
+      sizeof(op), handler), 0 };
+  p.p = new (p.v) op(handler);
+
+  ASIO_HANDLER_CREATION((p.p, "io_service", this, "post"));
+
+  post_immediate_completion(p.p, is_continuation);
+  p.v = p.p = 0;
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // ASIO_DETAIL_IMPL_TASK_IO_SERVICE_HPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/task_io_service.ipp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/task_io_service.ipp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/task_io_service.ipp
new file mode 100644
index 0000000..baf27c8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/task_io_service.ipp
@@ -0,0 +1,474 @@
+//
+// detail/impl/task_io_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
+#define ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if !defined(ASIO_HAS_IOCP)
+
+#include "asio/detail/event.hpp"
+#include "asio/detail/limits.hpp"
+#include "asio/detail/reactor.hpp"
+#include "asio/detail/task_io_service.hpp"
+#include "asio/detail/task_io_service_thread_info.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+struct task_io_service::task_cleanup
+{
+  ~task_cleanup()
+  {
+    if (this_thread_->private_outstanding_work > 0)
+    {
+      asio::detail::increment(
+          task_io_service_->outstanding_work_,
+          this_thread_->private_outstanding_work);
+    }
+    this_thread_->private_outstanding_work = 0;
+
+    // Enqueue the completed operations and reinsert the task at the end of
+    // the operation queue.
+    lock_->lock();
+    task_io_service_->task_interrupted_ = true;
+    task_io_service_->op_queue_.push(this_thread_->private_op_queue);
+    task_io_service_->op_queue_.push(&task_io_service_->task_operation_);
+  }
+
+  task_io_service* task_io_service_;
+  mutex::scoped_lock* lock_;
+  thread_info* this_thread_;
+};
+
+struct task_io_service::work_cleanup
+{
+  ~work_cleanup()
+  {
+    if (this_thread_->private_outstanding_work > 1)
+    {
+      asio::detail::increment(
+          task_io_service_->outstanding_work_,
+          this_thread_->private_outstanding_work - 1);
+    }
+    else if (this_thread_->private_outstanding_work < 1)
+    {
+      task_io_service_->work_finished();
+    }
+    this_thread_->private_outstanding_work = 0;
+
+#if defined(ASIO_HAS_THREADS)
+    if (!this_thread_->private_op_queue.empty())
+    {
+      lock_->lock();
+      task_io_service_->op_queue_.push(this_thread_->private_op_queue);
+    }
+#endif // defined(ASIO_HAS_THREADS)
+  }
+
+  task_io_service* task_io_service_;
+  mutex::scoped_lock* lock_;
+  thread_info* this_thread_;
+};
+
+task_io_service::task_io_service(
+    asio::io_service& io_service, std::size_t concurrency_hint)
+  : asio::detail::service_base<task_io_service>(io_service),
+    one_thread_(concurrency_hint == 1),
+    mutex_(),
+    task_(0),
+    task_interrupted_(true),
+    outstanding_work_(0),
+    stopped_(false),
+    shutdown_(false)
+{
+  ASIO_HANDLER_TRACKING_INIT;
+}
+
+void task_io_service::shutdown_service()
+{
+  mutex::scoped_lock lock(mutex_);
+  shutdown_ = true;
+  lock.unlock();
+
+  // Destroy handler objects.
+  while (!op_queue_.empty())
+  {
+    operation* o = op_queue_.front();
+    op_queue_.pop();
+    if (o != &task_operation_)
+      o->destroy();
+  }
+
+  // Reset to initial state.
+  task_ = 0;
+}
+
+void task_io_service::init_task()
+{
+  mutex::scoped_lock lock(mutex_);
+  if (!shutdown_ && !task_)
+  {
+    task_ = &use_service<reactor>(this->get_io_service());
+    op_queue_.push(&task_operation_);
+    wake_one_thread_and_unlock(lock);
+  }
+}
+
+std::size_t task_io_service::run(asio::error_code& ec)
+{
+  ec = asio::error_code();
+  if (outstanding_work_ == 0)
+  {
+    stop();
+    return 0;
+  }
+
+  thread_info this_thread;
+  this_thread.private_outstanding_work = 0;
+  thread_call_stack::context ctx(this, this_thread);
+
+  mutex::scoped_lock lock(mutex_);
+
+  std::size_t n = 0;
+  for (; do_run_one(lock, this_thread, ec); lock.lock())
+    if (n != (std::numeric_limits<std::size_t>::max)())
+      ++n;
+  return n;
+}
+
+std::size_t task_io_service::run_one(asio::error_code& ec)
+{
+  ec = asio::error_code();
+  if (outstanding_work_ == 0)
+  {
+    stop();
+    return 0;
+  }
+
+  thread_info this_thread;
+  this_thread.private_outstanding_work = 0;
+  thread_call_stack::context ctx(this, this_thread);
+
+  mutex::scoped_lock lock(mutex_);
+
+  return do_run_one(lock, this_thread, ec);
+}
+
+std::size_t task_io_service::poll(asio::error_code& ec)
+{
+  ec = asio::error_code();
+  if (outstanding_work_ == 0)
+  {
+    stop();
+    return 0;
+  }
+
+  thread_info this_thread;
+  this_thread.private_outstanding_work = 0;
+  thread_call_stack::context ctx(this, this_thread);
+
+  mutex::scoped_lock lock(mutex_);
+
+#if defined(ASIO_HAS_THREADS)
+  // We want to support nested calls to poll() and poll_one(), so any handlers
+  // that are already on a thread-private queue need to be put on to the main
+  // queue now.
+  if (one_thread_)
+    if (thread_info* outer_thread_info = ctx.next_by_key())
+      op_queue_.push(outer_thread_info->private_op_queue);
+#endif // defined(ASIO_HAS_THREADS)
+
+  std::size_t n = 0;
+  for (; do_poll_one(lock, this_thread, ec); lock.lock())
+    if (n != (std::numeric_limits<std::size_t>::max)())
+      ++n;
+  return n;
+}
+
+std::size_t task_io_service::poll_one(asio::error_code& ec)
+{
+  ec = asio::error_code();
+  if (outstanding_work_ == 0)
+  {
+    stop();
+    return 0;
+  }
+
+  thread_info this_thread;
+  this_thread.private_outstanding_work = 0;
+  thread_call_stack::context ctx(this, this_thread);
+
+  mutex::scoped_lock lock(mutex_);
+
+#if defined(ASIO_HAS_THREADS)
+  // We want to support nested calls to poll() and poll_one(), so any handlers
+  // that are already on a thread-private queue need to be put on to the main
+  // queue now.
+  if (one_thread_)
+    if (thread_info* outer_thread_info = ctx.next_by_key())
+      op_queue_.push(outer_thread_info->private_op_queue);
+#endif // defined(ASIO_HAS_THREADS)
+
+  return do_poll_one(lock, this_thread, ec);
+}
+
+void task_io_service::stop()
+{
+  mutex::scoped_lock lock(mutex_);
+  stop_all_threads(lock);
+}
+
+bool task_io_service::stopped() const
+{
+  mutex::scoped_lock lock(mutex_);
+  return stopped_;
+}
+
+void task_io_service::reset()
+{
+  mutex::scoped_lock lock(mutex_);
+  stopped_ = false;
+}
+
+void task_io_service::post_immediate_completion(
+    task_io_service::operation* op, bool is_continuation)
+{
+#if defined(ASIO_HAS_THREADS)
+  if (one_thread_ || is_continuation)
+  {
+    if (thread_info* this_thread = thread_call_stack::contains(this))
+    {
+      ++this_thread->private_outstanding_work;
+      this_thread->private_op_queue.push(op);
+      return;
+    }
+  }
+#else // defined(ASIO_HAS_THREADS)
+  (void)is_continuation;
+#endif // defined(ASIO_HAS_THREADS)
+
+  work_started();
+  mutex::scoped_lock lock(mutex_);
+  op_queue_.push(op);
+  wake_one_thread_and_unlock(lock);
+}
+
+void task_io_service::post_deferred_completion(task_io_service::operation* op)
+{
+#if defined(ASIO_HAS_THREADS)
+  if (one_thread_)
+  {
+    if (thread_info* this_thread = thread_call_stack::contains(this))
+    {
+      this_thread->private_op_queue.push(op);
+      return;
+    }
+  }
+#endif // defined(ASIO_HAS_THREADS)
+
+  mutex::scoped_lock lock(mutex_);
+  op_queue_.push(op);
+  wake_one_thread_and_unlock(lock);
+}
+
+void task_io_service::post_deferred_completions(
+    op_queue<task_io_service::operation>& ops)
+{
+  if (!ops.empty())
+  {
+#if defined(ASIO_HAS_THREADS)
+    if (one_thread_)
+    {
+      if (thread_info* this_thread = thread_call_stack::contains(this))
+      {
+        this_thread->private_op_queue.push(ops);
+        return;
+      }
+    }
+#endif // defined(ASIO_HAS_THREADS)
+
+    mutex::scoped_lock lock(mutex_);
+    op_queue_.push(ops);
+    wake_one_thread_and_unlock(lock);
+  }
+}
+
+void task_io_service::do_dispatch(
+    task_io_service::operation* op)
+{
+  work_started();
+  mutex::scoped_lock lock(mutex_);
+  op_queue_.push(op);
+  wake_one_thread_and_unlock(lock);
+}
+
+void task_io_service::abandon_operations(
+    op_queue<task_io_service::operation>& ops)
+{
+  op_queue<task_io_service::operation> ops2;
+  ops2.push(ops);
+}
+
+std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock,
+    task_io_service::thread_info& this_thread,
+    const asio::error_code& ec)
+{
+  while (!stopped_)
+  {
+    if (!op_queue_.empty())
+    {
+      // Prepare to execute first handler from queue.
+      operation* o = op_queue_.front();
+      op_queue_.pop();
+      bool more_handlers = (!op_queue_.empty());
+
+      if (o == &task_operation_)
+      {
+        task_interrupted_ = more_handlers;
+
+        if (more_handlers && !one_thread_)
+          wakeup_event_.unlock_and_signal_one(lock);
+        else
+          lock.unlock();
+
+        task_cleanup on_exit = { this, &lock, &this_thread };
+        (void)on_exit;
+
+        // Run the task. May throw an exception. Only block if the operation
+        // queue is empty and we're not polling, otherwise we want to return
+        // as soon as possible.
+        task_->run(!more_handlers, this_thread.private_op_queue);
+      }
+      else
+      {
+        std::size_t task_result = o->task_result_;
+
+        if (more_handlers && !one_thread_)
+          wake_one_thread_and_unlock(lock);
+        else
+          lock.unlock();
+
+        // Ensure the count of outstanding work is decremented on block exit.
+        work_cleanup on_exit = { this, &lock, &this_thread };
+        (void)on_exit;
+
+        // Complete the operation. May throw an exception. Deletes the object.
+        o->complete(*this, ec, task_result);
+
+        return 1;
+      }
+    }
+    else
+    {
+      wakeup_event_.clear(lock);
+      wakeup_event_.wait(lock);
+    }
+  }
+
+  return 0;
+}
+
+std::size_t task_io_service::do_poll_one(mutex::scoped_lock& lock,
+    task_io_service::thread_info& this_thread,
+    const asio::error_code& ec)
+{
+  if (stopped_)
+    return 0;
+
+  operation* o = op_queue_.front();
+  if (o == &task_operation_)
+  {
+    op_queue_.pop();
+    lock.unlock();
+
+    {
+      task_cleanup c = { this, &lock, &this_thread };
+      (void)c;
+
+      // Run the task. May throw an exception. Only block if the operation
+      // queue is empty and we're not polling, otherwise we want to return
+      // as soon as possible.
+      task_->run(false, this_thread.private_op_queue);
+    }
+
+    o = op_queue_.front();
+    if (o == &task_operation_)
+    {
+      wakeup_event_.maybe_unlock_and_signal_one(lock);
+      return 0;
+    }
+  }
+
+  if (o == 0)
+    return 0;
+
+  op_queue_.pop();
+  bool more_handlers = (!op_queue_.empty());
+
+  std::size_t task_result = o->task_result_;
+
+  if (more_handlers && !one_thread_)
+    wake_one_thread_and_unlock(lock);
+  else
+    lock.unlock();
+
+  // Ensure the count of outstanding work is decremented on block exit.
+  work_cleanup on_exit = { this, &lock, &this_thread };
+  (void)on_exit;
+
+  // Complete the operation. May throw an exception. Deletes the object.
+  o->complete(*this, ec, task_result);
+
+  return 1;
+}
+
+void task_io_service::stop_all_threads(
+    mutex::scoped_lock& lock)
+{
+  stopped_ = true;
+  wakeup_event_.signal_all(lock);
+
+  if (!task_interrupted_ && task_)
+  {
+    task_interrupted_ = true;
+    task_->interrupt();
+  }
+}
+
+void task_io_service::wake_one_thread_and_unlock(
+    mutex::scoped_lock& lock)
+{
+  if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
+  {
+    if (!task_interrupted_ && task_)
+    {
+      task_interrupted_ = true;
+      task_->interrupt();
+    }
+    lock.unlock();
+  }
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // !defined(ASIO_HAS_IOCP)
+
+#endif // ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/throw_error.ipp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/throw_error.ipp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/throw_error.ipp
new file mode 100644
index 0000000..94b5853
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/throw_error.ipp
@@ -0,0 +1,60 @@
+//
+// detail/impl/throw_error.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_THROW_ERROR_IPP
+#define ASIO_DETAIL_IMPL_THROW_ERROR_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include "asio/detail/throw_error.hpp"
+#include "asio/detail/throw_exception.hpp"
+#include "asio/system_error.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+void do_throw_error(const asio::error_code& err)
+{
+  asio::system_error e(err);
+  asio::detail::throw_exception(e);
+}
+
+void do_throw_error(const asio::error_code& err, const char* location)
+{
+  // boostify: non-boost code starts here
+#if defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR)
+  // Microsoft's implementation of std::system_error is non-conformant in that
+  // it ignores the error code's message when a "what" string is supplied. We'll
+  // work around this by explicitly formatting the "what" string.
+  std::string what_msg = location;
+  what_msg += ": ";
+  what_msg += err.message();
+  asio::system_error e(err, what_msg);
+  asio::detail::throw_exception(e);
+#else // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR)
+  // boostify: non-boost code ends here
+  asio::system_error e(err, location);
+  asio::detail::throw_exception(e);
+  // boostify: non-boost code starts here
+#endif // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR)
+  // boostify: non-boost code ends here
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // ASIO_DETAIL_IMPL_THROW_ERROR_IPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/timer_queue_ptime.ipp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/timer_queue_ptime.ipp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/timer_queue_ptime.ipp
new file mode 100644
index 0000000..97c87b5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/timer_queue_ptime.ipp
@@ -0,0 +1,84 @@
+//
+// detail/impl/timer_queue_ptime.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
+#define ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include "asio/detail/timer_queue_ptime.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+#if defined(ASIO_HAS_BOOST_DATE_TIME)
+
+namespace asio {
+namespace detail {
+
+timer_queue<time_traits<boost::posix_time::ptime> >::timer_queue()
+{
+}
+
+timer_queue<time_traits<boost::posix_time::ptime> >::~timer_queue()
+{
+}
+
+bool timer_queue<time_traits<boost::posix_time::ptime> >::enqueue_timer(
+    const time_type& time, per_timer_data& timer, wait_op* op)
+{
+  return impl_.enqueue_timer(time, timer, op);
+}
+
+bool timer_queue<time_traits<boost::posix_time::ptime> >::empty() const
+{
+  return impl_.empty();
+}
+
+long timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_msec(
+    long max_duration) const
+{
+  return impl_.wait_duration_msec(max_duration);
+}
+
+long timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_usec(
+    long max_duration) const
+{
+  return impl_.wait_duration_usec(max_duration);
+}
+
+void timer_queue<time_traits<boost::posix_time::ptime> >::get_ready_timers(
+    op_queue<operation>& ops)
+{
+  impl_.get_ready_timers(ops);
+}
+
+void timer_queue<time_traits<boost::posix_time::ptime> >::get_all_timers(
+    op_queue<operation>& ops)
+{
+  impl_.get_all_timers(ops);
+}
+
+std::size_t timer_queue<time_traits<boost::posix_time::ptime> >::cancel_timer(
+    per_timer_data& timer, op_queue<operation>& ops, std::size_t max_cancelled)
+{
+  return impl_.cancel_timer(timer, ops, max_cancelled);
+}
+
+} // namespace detail
+} // namespace asio
+
+#endif // defined(ASIO_HAS_BOOST_DATE_TIME)
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/timer_queue_set.ipp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/timer_queue_set.ipp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/timer_queue_set.ipp
new file mode 100644
index 0000000..a8b59e5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/timer_queue_set.ipp
@@ -0,0 +1,101 @@
+//
+// detail/impl/timer_queue_set.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
+#define ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+#include "asio/detail/timer_queue_set.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+timer_queue_set::timer_queue_set()
+  : first_(0)
+{
+}
+
+void timer_queue_set::insert(timer_queue_base* q)
+{
+  q->next_ = first_;
+  first_ = q;
+}
+
+void timer_queue_set::erase(timer_queue_base* q)
+{
+  if (first_)
+  {
+    if (q == first_)
+    {
+      first_ = q->next_;
+      q->next_ = 0;
+      return;
+    }
+
+    for (timer_queue_base* p = first_; p->next_; p = p->next_)
+    {
+      if (p->next_ == q)
+      {
+        p->next_ = q->next_;
+        q->next_ = 0;
+        return;
+      }
+    }
+  }
+}
+
+bool timer_queue_set::all_empty() const
+{
+  for (timer_queue_base* p = first_; p; p = p->next_)
+    if (!p->empty())
+      return false;
+  return true;
+}
+
+long timer_queue_set::wait_duration_msec(long max_duration) const
+{
+  long min_duration = max_duration;
+  for (timer_queue_base* p = first_; p; p = p->next_)
+    min_duration = p->wait_duration_msec(min_duration);
+  return min_duration;
+}
+
+long timer_queue_set::wait_duration_usec(long max_duration) const
+{
+  long min_duration = max_duration;
+  for (timer_queue_base* p = first_; p; p = p->next_)
+    min_duration = p->wait_duration_usec(min_duration);
+  return min_duration;
+}
+
+void timer_queue_set::get_ready_timers(op_queue<operation>& ops)
+{
+  for (timer_queue_base* p = first_; p; p = p->next_)
+    p->get_ready_timers(ops);
+}
+
+void timer_queue_set::get_all_timers(op_queue<operation>& ops)
+{
+  for (timer_queue_base* p = first_; p; p = p->next_)
+    p->get_all_timers(ops);
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_event.ipp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_event.ipp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_event.ipp
new file mode 100644
index 0000000..29f9af9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_event.ipp
@@ -0,0 +1,67 @@
+//
+// detail/win_event.ipp
+// ~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_WIN_EVENT_IPP
+#define ASIO_DETAIL_IMPL_WIN_EVENT_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_WINDOWS)
+
+#include "asio/detail/throw_error.hpp"
+#include "asio/detail/win_event.hpp"
+#include "asio/error.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+win_event::win_event()
+  : state_(0)
+{
+  events_[0] = ::CreateEvent(0, true, false, 0);
+  if (!events_[0])
+  {
+    DWORD last_error = ::GetLastError();
+    asio::error_code ec(last_error,
+        asio::error::get_system_category());
+    asio::detail::throw_error(ec, "event");
+  }
+
+  events_[1] = ::CreateEvent(0, false, false, 0);
+  if (!events_[1])
+  {
+    DWORD last_error = ::GetLastError();
+    ::CloseHandle(events_[0]);
+    asio::error_code ec(last_error,
+        asio::error::get_system_category());
+    asio::detail::throw_error(ec, "event");
+  }
+}
+
+win_event::~win_event()
+{
+  ::CloseHandle(events_[0]);
+  ::CloseHandle(events_[1]);
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // defined(ASIO_WINDOWS)
+
+#endif // ASIO_DETAIL_IMPL_WIN_EVENT_IPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_handle_service.ipp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_handle_service.ipp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_handle_service.ipp
new file mode 100644
index 0000000..1281050
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_handle_service.ipp
@@ -0,0 +1,528 @@
+//
+// detail/impl/win_iocp_handle_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
+#define ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_IOCP)
+
+#include "asio/detail/win_iocp_handle_service.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+class win_iocp_handle_service::overlapped_wrapper
+  : public OVERLAPPED
+{
+public:
+  explicit overlapped_wrapper(asio::error_code& ec)
+  {
+    Internal = 0;
+    InternalHigh = 0;
+    Offset = 0;
+    OffsetHigh = 0;
+
+    // Create a non-signalled manual-reset event, for GetOverlappedResult.
+    hEvent = ::CreateEvent(0, TRUE, FALSE, 0);
+    if (hEvent)
+    {
+      // As documented in GetQueuedCompletionStatus, setting the low order
+      // bit of this event prevents our synchronous writes from being treated
+      // as completion port events.
+      DWORD_PTR tmp = reinterpret_cast<DWORD_PTR>(hEvent);
+      hEvent = reinterpret_cast<HANDLE>(tmp | 1);
+    }
+    else
+    {
+      DWORD last_error = ::GetLastError();
+      ec = asio::error_code(last_error,
+          asio::error::get_system_category());
+    }
+  }
+
+  ~overlapped_wrapper()
+  {
+    if (hEvent)
+    {
+      ::CloseHandle(hEvent);
+    }
+  }
+};
+
+win_iocp_handle_service::win_iocp_handle_service(
+    asio::io_service& io_service)
+  : iocp_service_(asio::use_service<win_iocp_io_service>(io_service)),
+    mutex_(),
+    impl_list_(0)
+{
+}
+
+void win_iocp_handle_service::shutdown_service()
+{
+  // Close all implementations, causing all operations to complete.
+  asio::detail::mutex::scoped_lock lock(mutex_);
+  implementation_type* impl = impl_list_;
+  while (impl)
+  {
+    close_for_destruction(*impl);
+    impl = impl->next_;
+  }
+}
+
+void win_iocp_handle_service::construct(
+    win_iocp_handle_service::implementation_type& impl)
+{
+  impl.handle_ = INVALID_HANDLE_VALUE;
+  impl.safe_cancellation_thread_id_ = 0;
+
+  // Insert implementation into linked list of all implementations.
+  asio::detail::mutex::scoped_lock lock(mutex_);
+  impl.next_ = impl_list_;
+  impl.prev_ = 0;
+  if (impl_list_)
+    impl_list_->prev_ = &impl;
+  impl_list_ = &impl;
+}
+
+void win_iocp_handle_service::move_construct(
+    win_iocp_handle_service::implementation_type& impl,
+    win_iocp_handle_service::implementation_type& other_impl)
+{
+  impl.handle_ = other_impl.handle_;
+  other_impl.handle_ = INVALID_HANDLE_VALUE;
+
+  impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+  other_impl.safe_cancellation_thread_id_ = 0;
+
+  // Insert implementation into linked list of all implementations.
+  asio::detail::mutex::scoped_lock lock(mutex_);
+  impl.next_ = impl_list_;
+  impl.prev_ = 0;
+  if (impl_list_)
+    impl_list_->prev_ = &impl;
+  impl_list_ = &impl;
+}
+
+void win_iocp_handle_service::move_assign(
+    win_iocp_handle_service::implementation_type& impl,
+    win_iocp_handle_service& other_service,
+    win_iocp_handle_service::implementation_type& other_impl)
+{
+  close_for_destruction(impl);
+
+  if (this != &other_service)
+  {
+    // Remove implementation from linked list of all implementations.
+    asio::detail::mutex::scoped_lock lock(mutex_);
+    if (impl_list_ == &impl)
+      impl_list_ = impl.next_;
+    if (impl.prev_)
+      impl.prev_->next_ = impl.next_;
+    if (impl.next_)
+      impl.next_->prev_= impl.prev_;
+    impl.next_ = 0;
+    impl.prev_ = 0;
+  }
+
+  impl.handle_ = other_impl.handle_;
+  other_impl.handle_ = INVALID_HANDLE_VALUE;
+
+  impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+  other_impl.safe_cancellation_thread_id_ = 0;
+
+  if (this != &other_service)
+  {
+    // Insert implementation into linked list of all implementations.
+    asio::detail::mutex::scoped_lock lock(other_service.mutex_);
+    impl.next_ = other_service.impl_list_;
+    impl.prev_ = 0;
+    if (other_service.impl_list_)
+      other_service.impl_list_->prev_ = &impl;
+    other_service.impl_list_ = &impl;
+  }
+}
+
+void win_iocp_handle_service::destroy(
+    win_iocp_handle_service::implementation_type& impl)
+{
+  close_for_destruction(impl);
+  
+  // Remove implementation from linked list of all implementations.
+  asio::detail::mutex::scoped_lock lock(mutex_);
+  if (impl_list_ == &impl)
+    impl_list_ = impl.next_;
+  if (impl.prev_)
+    impl.prev_->next_ = impl.next_;
+  if (impl.next_)
+    impl.next_->prev_= impl.prev_;
+  impl.next_ = 0;
+  impl.prev_ = 0;
+}
+
+asio::error_code win_iocp_handle_service::assign(
+    win_iocp_handle_service::implementation_type& impl,
+    const native_handle_type& handle, asio::error_code& ec)
+{
+  if (is_open(impl))
+  {
+    ec = asio::error::already_open;
+    return ec;
+  }
+
+  if (iocp_service_.register_handle(handle, ec))
+    return ec;
+
+  impl.handle_ = handle;
+  ec = asio::error_code();
+  return ec;
+}
+
+asio::error_code win_iocp_handle_service::close(
+    win_iocp_handle_service::implementation_type& impl,
+    asio::error_code& ec)
+{
+  if (is_open(impl))
+  {
+    ASIO_HANDLER_OPERATION(("handle", &impl, "close"));
+
+    if (!::CloseHandle(impl.handle_))
+    {
+      DWORD last_error = ::GetLastError();
+      ec = asio::error_code(last_error,
+          asio::error::get_system_category());
+    }
+    else
+    {
+      ec = asio::error_code();
+    }
+
+    impl.handle_ = INVALID_HANDLE_VALUE;
+    impl.safe_cancellation_thread_id_ = 0;
+  }
+  else
+  {
+    ec = asio::error_code();
+  }
+
+  return ec;
+}
+
+asio::error_code win_iocp_handle_service::cancel(
+    win_iocp_handle_service::implementation_type& impl,
+    asio::error_code& ec)
+{
+  if (!is_open(impl))
+  {
+    ec = asio::error::bad_descriptor;
+    return ec;
+  }
+
+  ASIO_HANDLER_OPERATION(("handle", &impl, "cancel"));
+
+  if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
+        ::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
+  {
+    // The version of Windows supports cancellation from any thread.
+    typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);
+    cancel_io_ex_t cancel_io_ex = (cancel_io_ex_t)cancel_io_ex_ptr;
+    if (!cancel_io_ex(impl.handle_, 0))
+    {
+      DWORD last_error = ::GetLastError();
+      if (last_error == ERROR_NOT_FOUND)
+      {
+        // ERROR_NOT_FOUND means that there were no operations to be
+        // cancelled. We swallow this error to match the behaviour on other
+        // platforms.
+        ec = asio::error_code();
+      }
+      else
+      {
+        ec = asio::error_code(last_error,
+            asio::error::get_system_category());
+      }
+    }
+    else
+    {
+      ec = asio::error_code();
+    }
+  }
+  else if (impl.safe_cancellation_thread_id_ == 0)
+  {
+    // No operations have been started, so there's nothing to cancel.
+    ec = asio::error_code();
+  }
+  else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())
+  {
+    // Asynchronous operations have been started from the current thread only,
+    // so it is safe to try to cancel them using CancelIo.
+    if (!::CancelIo(impl.handle_))
+    {
+      DWORD last_error = ::GetLastError();
+      ec = asio::error_code(last_error,
+          asio::error::get_system_category());
+    }
+    else
+    {
+      ec = asio::error_code();
+    }
+  }
+  else
+  {
+    // Asynchronous operations have been started from more than one thread,
+    // so cancellation is not safe.
+    ec = asio::error::operation_not_supported;
+  }
+
+  return ec;
+}
+
+size_t win_iocp_handle_service::do_write(
+    win_iocp_handle_service::implementation_type& impl, uint64_t offset,
+    const asio::const_buffer& buffer, asio::error_code& ec)
+{
+  if (!is_open(impl))
+  {
+    ec = asio::error::bad_descriptor;
+    return 0;
+  }
+
+  // A request to write 0 bytes on a handle is a no-op.
+  if (asio::buffer_size(buffer) == 0)
+  {
+    ec = asio::error_code();
+    return 0;
+  }
+
+  overlapped_wrapper overlapped(ec);
+  if (ec)
+  {
+    return 0;
+  }
+
+  // Write the data. 
+  overlapped.Offset = offset & 0xFFFFFFFF;
+  overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
+  BOOL ok = ::WriteFile(impl.handle_,
+      asio::buffer_cast<LPCVOID>(buffer),
+      static_cast<DWORD>(asio::buffer_size(buffer)), 0, &overlapped);
+  if (!ok) 
+  {
+    DWORD last_error = ::GetLastError();
+    if (last_error != ERROR_IO_PENDING)
+    {
+      ec = asio::error_code(last_error,
+          asio::error::get_system_category());
+      return 0;
+    }
+  }
+
+  // Wait for the operation to complete.
+  DWORD bytes_transferred = 0;
+  ok = ::GetOverlappedResult(impl.handle_,
+      &overlapped, &bytes_transferred, TRUE);
+  if (!ok)
+  {
+    DWORD last_error = ::GetLastError();
+    ec = asio::error_code(last_error,
+        asio::error::get_system_category());
+    return 0;
+  }
+
+  ec = asio::error_code();
+  return bytes_transferred;
+}
+
+void win_iocp_handle_service::start_write_op(
+    win_iocp_handle_service::implementation_type& impl, uint64_t offset,
+    const asio::const_buffer& buffer, operation* op)
+{
+  update_cancellation_thread_id(impl);
+  iocp_service_.work_started();
+
+  if (!is_open(impl))
+  {
+    iocp_service_.on_completion(op, asio::error::bad_descriptor);
+  }
+  else if (asio::buffer_size(buffer) == 0)
+  {
+    // A request to write 0 bytes on a handle is a no-op.
+    iocp_service_.on_completion(op);
+  }
+  else
+  {
+    DWORD bytes_transferred = 0;
+    op->Offset = offset & 0xFFFFFFFF;
+    op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
+    BOOL ok = ::WriteFile(impl.handle_,
+        asio::buffer_cast<LPCVOID>(buffer),
+        static_cast<DWORD>(asio::buffer_size(buffer)),
+        &bytes_transferred, op);
+    DWORD last_error = ::GetLastError();
+    if (!ok && last_error != ERROR_IO_PENDING
+        && last_error != ERROR_MORE_DATA)
+    {
+      iocp_service_.on_completion(op, last_error, bytes_transferred);
+    }
+    else
+    {
+      iocp_service_.on_pending(op);
+    }
+  }
+}
+
+size_t win_iocp_handle_service::do_read(
+    win_iocp_handle_service::implementation_type& impl, uint64_t offset,
+    const asio::mutable_buffer& buffer, asio::error_code& ec)
+{
+  if (!is_open(impl))
+  {
+    ec = asio::error::bad_descriptor;
+    return 0;
+  }
+  
+  // A request to read 0 bytes on a stream handle is a no-op.
+  if (asio::buffer_size(buffer) == 0)
+  {
+    ec = asio::error_code();
+    return 0;
+  }
+
+  overlapped_wrapper overlapped(ec);
+  if (ec)
+  {
+    return 0;
+  }
+
+  // Read some data.
+  overlapped.Offset = offset & 0xFFFFFFFF;
+  overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
+  BOOL ok = ::ReadFile(impl.handle_,
+      asio::buffer_cast<LPVOID>(buffer),
+      static_cast<DWORD>(asio::buffer_size(buffer)), 0, &overlapped);
+  if (!ok) 
+  {
+    DWORD last_error = ::GetLastError();
+    if (last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA)
+    {
+      if (last_error == ERROR_HANDLE_EOF)
+      {
+        ec = asio::error::eof;
+      }
+      else
+      {
+        ec = asio::error_code(last_error,
+            asio::error::get_system_category());
+      }
+      return 0;
+    }
+  }
+
+  // Wait for the operation to complete.
+  DWORD bytes_transferred = 0;
+  ok = ::GetOverlappedResult(impl.handle_,
+      &overlapped, &bytes_transferred, TRUE);
+  if (!ok)
+  {
+    DWORD last_error = ::GetLastError();
+    if (last_error != ERROR_MORE_DATA)
+    {
+      if (last_error == ERROR_HANDLE_EOF)
+      {
+        ec = asio::error::eof;
+      }
+      else
+      {
+        ec = asio::error_code(last_error,
+            asio::error::get_system_category());
+      }
+    }
+    return 0;
+  }
+
+  ec = asio::error_code();
+  return bytes_transferred;
+}
+
+void win_iocp_handle_service::start_read_op(
+    win_iocp_handle_service::implementation_type& impl, uint64_t offset,
+    const asio::mutable_buffer& buffer, operation* op)
+{
+  update_cancellation_thread_id(impl);
+  iocp_service_.work_started();
+
+  if (!is_open(impl))
+  {
+    iocp_service_.on_completion(op, asio::error::bad_descriptor);
+  }
+  else if (asio::buffer_size(buffer) == 0)
+  {
+    // A request to read 0 bytes on a handle is a no-op.
+    iocp_service_.on_completion(op);
+  }
+  else
+  {
+    DWORD bytes_transferred = 0;
+    op->Offset = offset & 0xFFFFFFFF;
+    op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
+    BOOL ok = ::ReadFile(impl.handle_,
+        asio::buffer_cast<LPVOID>(buffer),
+        static_cast<DWORD>(asio::buffer_size(buffer)),
+        &bytes_transferred, op);
+    DWORD last_error = ::GetLastError();
+    if (!ok && last_error != ERROR_IO_PENDING
+        && last_error != ERROR_MORE_DATA)
+    {
+      iocp_service_.on_completion(op, last_error, bytes_transferred);
+    }
+    else
+    {
+      iocp_service_.on_pending(op);
+    }
+  }
+}
+
+void win_iocp_handle_service::update_cancellation_thread_id(
+    win_iocp_handle_service::implementation_type& impl)
+{
+  if (impl.safe_cancellation_thread_id_ == 0)
+    impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();
+  else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())
+    impl.safe_cancellation_thread_id_ = ~DWORD(0);
+}
+
+void win_iocp_handle_service::close_for_destruction(implementation_type& impl)
+{
+  if (is_open(impl))
+  {
+    ASIO_HANDLER_OPERATION(("handle", &impl, "close"));
+
+    ::CloseHandle(impl.handle_);
+    impl.handle_ = INVALID_HANDLE_VALUE;
+    impl.safe_cancellation_thread_id_ = 0;
+  }
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // defined(ASIO_HAS_IOCP)
+
+#endif // ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_io_service.hpp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_io_service.hpp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_io_service.hpp
new file mode 100644
index 0000000..02d346d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_io_service.hpp
@@ -0,0 +1,130 @@
+//
+// detail/impl/win_iocp_io_service.hpp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP
+#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_IOCP)
+
+#include "asio/detail/addressof.hpp"
+#include "asio/detail/completion_handler.hpp"
+#include "asio/detail/fenced_block.hpp"
+#include "asio/detail/handler_alloc_helpers.hpp"
+#include "asio/detail/handler_invoke_helpers.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+template <typename Handler>
+void win_iocp_io_service::dispatch(Handler& handler)
+{
+  if (thread_call_stack::contains(this))
+  {
+    fenced_block b(fenced_block::full);
+    asio_handler_invoke_helpers::invoke(handler, handler);
+  }
+  else
+  {
+    // Allocate and construct an operation to wrap the handler.
+    typedef completion_handler<Handler> op;
+    typename op::ptr p = { asio::detail::addressof(handler),
+      asio_handler_alloc_helpers::allocate(
+        sizeof(op), handler), 0 };
+    p.p = new (p.v) op(handler);
+
+    ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch"));
+
+    post_immediate_completion(p.p, false);
+    p.v = p.p = 0;
+  }
+}
+
+template <typename Handler>
+void win_iocp_io_service::post(Handler& handler)
+{
+  // Allocate and construct an operation to wrap the handler.
+  typedef completion_handler<Handler> op;
+  typename op::ptr p = { asio::detail::addressof(handler),
+    asio_handler_alloc_helpers::allocate(
+      sizeof(op), handler), 0 };
+  p.p = new (p.v) op(handler);
+
+  ASIO_HANDLER_CREATION((p.p, "io_service", this, "post"));
+
+  post_immediate_completion(p.p, false);
+  p.v = p.p = 0;
+}
+
+template <typename Time_Traits>
+void win_iocp_io_service::add_timer_queue(
+    timer_queue<Time_Traits>& queue)
+{
+  do_add_timer_queue(queue);
+}
+
+template <typename Time_Traits>
+void win_iocp_io_service::remove_timer_queue(
+    timer_queue<Time_Traits>& queue)
+{
+  do_remove_timer_queue(queue);
+}
+
+template <typename Time_Traits>
+void win_iocp_io_service::schedule_timer(timer_queue<Time_Traits>& queue,
+    const typename Time_Traits::time_type& time,
+    typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
+{
+  // If the service has been shut down we silently discard the timer.
+  if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
+  {
+    post_immediate_completion(op, false);
+    return;
+  }
+
+  mutex::scoped_lock lock(dispatch_mutex_);
+
+  bool earliest = queue.enqueue_timer(time, timer, op);
+  work_started();
+  if (earliest)
+    update_timeout();
+}
+
+template <typename Time_Traits>
+std::size_t win_iocp_io_service::cancel_timer(timer_queue<Time_Traits>& queue,
+    typename timer_queue<Time_Traits>::per_timer_data& timer,
+    std::size_t max_cancelled)
+{
+  // If the service has been shut down we silently ignore the cancellation.
+  if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
+    return 0;
+
+  mutex::scoped_lock lock(dispatch_mutex_);
+  op_queue<win_iocp_operation> ops;
+  std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
+  post_deferred_completions(ops);
+  return n;
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // defined(ASIO_HAS_IOCP)
+
+#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_HPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_io_service.ipp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_io_service.ipp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_io_service.ipp
new file mode 100644
index 0000000..a4f56dc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_io_service.ipp
@@ -0,0 +1,531 @@
+//
+// detail/impl/win_iocp_io_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP
+#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_IOCP)
+
+#include "asio/error.hpp"
+#include "asio/io_service.hpp"
+#include "asio/detail/handler_alloc_helpers.hpp"
+#include "asio/detail/handler_invoke_helpers.hpp"
+#include "asio/detail/limits.hpp"
+#include "asio/detail/throw_error.hpp"
+#include "asio/detail/win_iocp_io_service.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+struct win_iocp_io_service::work_finished_on_block_exit
+{
+  ~work_finished_on_block_exit()
+  {
+    io_service_->work_finished();
+  }
+
+  win_iocp_io_service* io_service_;
+};
+
+struct win_iocp_io_service::timer_thread_function
+{
+  void operator()()
+  {
+    while (::InterlockedExchangeAdd(&io_service_->shutdown_, 0) == 0)
+    {
+      if (::WaitForSingleObject(io_service_->waitable_timer_.handle,
+            INFINITE) == WAIT_OBJECT_0)
+      {
+        ::InterlockedExchange(&io_service_->dispatch_required_, 1);
+        ::PostQueuedCompletionStatus(io_service_->iocp_.handle,
+            0, wake_for_dispatch, 0);
+      }
+    }
+  }
+
+  win_iocp_io_service* io_service_;
+};
+
+win_iocp_io_service::win_iocp_io_service(
+    asio::io_service& io_service, size_t concurrency_hint)
+  : asio::detail::service_base<win_iocp_io_service>(io_service),
+    iocp_(),
+    outstanding_work_(0),
+    stopped_(0),
+    stop_event_posted_(0),
+    shutdown_(0),
+    gqcs_timeout_(get_gqcs_timeout()),
+    dispatch_required_(0)
+{
+  ASIO_HANDLER_TRACKING_INIT;
+
+  iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,
+      static_cast<DWORD>(concurrency_hint < DWORD(~0)
+        ? concurrency_hint : DWORD(~0)));
+  if (!iocp_.handle)
+  {
+    DWORD last_error = ::GetLastError();
+    asio::error_code ec(last_error,
+        asio::error::get_system_category());
+    asio::detail::throw_error(ec, "iocp");
+  }
+}
+
+void win_iocp_io_service::shutdown_service()
+{
+  ::InterlockedExchange(&shutdown_, 1);
+
+  if (timer_thread_.get())
+  {
+    LARGE_INTEGER timeout;
+    timeout.QuadPart = 1;
+    ::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE);
+  }
+
+  while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0)
+  {
+    op_queue<win_iocp_operation> ops;
+    timer_queues_.get_all_timers(ops);
+    ops.push(completed_ops_);
+    if (!ops.empty())
+    {
+      while (win_iocp_operation* op = ops.front())
+      {
+        ops.pop();
+        ::InterlockedDecrement(&outstanding_work_);
+        op->destroy();
+      }
+    }
+    else
+    {
+      DWORD bytes_transferred = 0;
+      dword_ptr_t completion_key = 0;
+      LPOVERLAPPED overlapped = 0;
+      ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred,
+          &completion_key, &overlapped, gqcs_timeout_);
+      if (overlapped)
+      {
+        ::InterlockedDecrement(&outstanding_work_);
+        static_cast<win_iocp_operation*>(overlapped)->destroy();
+      }
+    }
+  }
+
+  if (timer_thread_.get())
+    timer_thread_->join();
+}
+
+asio::error_code win_iocp_io_service::register_handle(
+    HANDLE handle, asio::error_code& ec)
+{
+  if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0)
+  {
+    DWORD last_error = ::GetLastError();
+    ec = asio::error_code(last_error,
+        asio::error::get_system_category());
+  }
+  else
+  {
+    ec = asio::error_code();
+  }
+  return ec;
+}
+
+size_t win_iocp_io_service::run(asio::error_code& ec)
+{
+  if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
+  {
+    stop();
+    ec = asio::error_code();
+    return 0;
+  }
+
+  win_iocp_thread_info this_thread;
+  thread_call_stack::context ctx(this, this_thread);
+
+  size_t n = 0;
+  while (do_one(true, ec))
+    if (n != (std::numeric_limits<size_t>::max)())
+      ++n;
+  return n;
+}
+
+size_t win_iocp_io_service::run_one(asio::error_code& ec)
+{
+  if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
+  {
+    stop();
+    ec = asio::error_code();
+    return 0;
+  }
+
+  win_iocp_thread_info this_thread;
+  thread_call_stack::context ctx(this, this_thread);
+
+  return do_one(true, ec);
+}
+
+size_t win_iocp_io_service::poll(asio::error_code& ec)
+{
+  if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
+  {
+    stop();
+    ec = asio::error_code();
+    return 0;
+  }
+
+  win_iocp_thread_info this_thread;
+  thread_call_stack::context ctx(this, this_thread);
+
+  size_t n = 0;
+  while (do_one(false, ec))
+    if (n != (std::numeric_limits<size_t>::max)())
+      ++n;
+  return n;
+}
+
+size_t win_iocp_io_service::poll_one(asio::error_code& ec)
+{
+  if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
+  {
+    stop();
+    ec = asio::error_code();
+    return 0;
+  }
+
+  win_iocp_thread_info this_thread;
+  thread_call_stack::context ctx(this, this_thread);
+
+  return do_one(false, ec);
+}
+
+void win_iocp_io_service::stop()
+{
+  if (::InterlockedExchange(&stopped_, 1) == 0)
+  {
+    if (::InterlockedExchange(&stop_event_posted_, 1) == 0)
+    {
+      if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
+      {
+        DWORD last_error = ::GetLastError();
+        asio::error_code ec(last_error,
+            asio::error::get_system_category());
+        asio::detail::throw_error(ec, "pqcs");
+      }
+    }
+  }
+}
+
+void win_iocp_io_service::post_deferred_completion(win_iocp_operation* op)
+{
+  // Flag the operation as ready.
+  op->ready_ = 1;
+
+  // Enqueue the operation on the I/O completion port.
+  if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))
+  {
+    // Out of resources. Put on completed queue instead.
+    mutex::scoped_lock lock(dispatch_mutex_);
+    completed_ops_.push(op);
+    ::InterlockedExchange(&dispatch_required_, 1);
+  }
+}
+
+void win_iocp_io_service::post_deferred_completions(
+    op_queue<win_iocp_operation>& ops)
+{
+  while (win_iocp_operation* op = ops.front())
+  {
+    ops.pop();
+
+    // Flag the operation as ready.
+    op->ready_ = 1;
+
+    // Enqueue the operation on the I/O completion port.
+    if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))
+    {
+      // Out of resources. Put on completed queue instead.
+      mutex::scoped_lock lock(dispatch_mutex_);
+      completed_ops_.push(op);
+      completed_ops_.push(ops);
+      ::InterlockedExchange(&dispatch_required_, 1);
+    }
+  }
+}
+
+void win_iocp_io_service::abandon_operations(
+    op_queue<win_iocp_operation>& ops)
+{
+  while (win_iocp_operation* op = ops.front())
+  {
+    ops.pop();
+    ::InterlockedDecrement(&outstanding_work_);
+    op->destroy();
+  }
+}
+
+void win_iocp_io_service::on_pending(win_iocp_operation* op)
+{
+  if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
+  {
+    // Enqueue the operation on the I/O completion port.
+    if (!::PostQueuedCompletionStatus(iocp_.handle,
+          0, overlapped_contains_result, op))
+    {
+      // Out of resources. Put on completed queue instead.
+      mutex::scoped_lock lock(dispatch_mutex_);
+      completed_ops_.push(op);
+      ::InterlockedExchange(&dispatch_required_, 1);
+    }
+  }
+}
+
+void win_iocp_io_service::on_completion(win_iocp_operation* op,
+    DWORD last_error, DWORD bytes_transferred)
+{
+  // Flag that the operation is ready for invocation.
+  op->ready_ = 1;
+
+  // Store results in the OVERLAPPED structure.
+  op->Internal = reinterpret_cast<ulong_ptr_t>(
+      &asio::error::get_system_category());
+  op->Offset = last_error;
+  op->OffsetHigh = bytes_transferred;
+
+  // Enqueue the operation on the I/O completion port.
+  if (!::PostQueuedCompletionStatus(iocp_.handle,
+        0, overlapped_contains_result, op))
+  {
+    // Out of resources. Put on completed queue instead.
+    mutex::scoped_lock lock(dispatch_mutex_);
+    completed_ops_.push(op);
+    ::InterlockedExchange(&dispatch_required_, 1);
+  }
+}
+
+void win_iocp_io_service::on_completion(win_iocp_operation* op,
+    const asio::error_code& ec, DWORD bytes_transferred)
+{
+  // Flag that the operation is ready for invocation.
+  op->ready_ = 1;
+
+  // Store results in the OVERLAPPED structure.
+  op->Internal = reinterpret_cast<ulong_ptr_t>(&ec.category());
+  op->Offset = ec.value();
+  op->OffsetHigh = bytes_transferred;
+
+  // Enqueue the operation on the I/O completion port.
+  if (!::PostQueuedCompletionStatus(iocp_.handle,
+        0, overlapped_contains_result, op))
+  {
+    // Out of resources. Put on completed queue instead.
+    mutex::scoped_lock lock(dispatch_mutex_);
+    completed_ops_.push(op);
+    ::InterlockedExchange(&dispatch_required_, 1);
+  }
+}
+
+size_t win_iocp_io_service::do_one(bool block, asio::error_code& ec)
+{
+  for (;;)
+  {
+    // Try to acquire responsibility for dispatching timers and completed ops.
+    if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1)
+    {
+      mutex::scoped_lock lock(dispatch_mutex_);
+
+      // Dispatch pending timers and operations.
+      op_queue<win_iocp_operation> ops;
+      ops.push(completed_ops_);
+      timer_queues_.get_ready_timers(ops);
+      post_deferred_completions(ops);
+      update_timeout();
+    }
+
+    // Get the next operation from the queue.
+    DWORD bytes_transferred = 0;
+    dword_ptr_t completion_key = 0;
+    LPOVERLAPPED overlapped = 0;
+    ::SetLastError(0);
+    BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred,
+        &completion_key, &overlapped, block ? gqcs_timeout_ : 0);
+    DWORD last_error = ::GetLastError();
+
+    if (overlapped)
+    {
+      win_iocp_operation* op = static_cast<win_iocp_operation*>(overlapped);
+      asio::error_code result_ec(last_error,
+          asio::error::get_system_category());
+
+      // We may have been passed the last_error and bytes_transferred in the
+      // OVERLAPPED structure itself.
+      if (completion_key == overlapped_contains_result)
+      {
+        result_ec = asio::error_code(static_cast<int>(op->Offset),
+            *reinterpret_cast<asio::error_category*>(op->Internal));
+        bytes_transferred = op->OffsetHigh;
+      }
+
+      // Otherwise ensure any result has been saved into the OVERLAPPED
+      // structure.
+      else
+      {
+        op->Internal = reinterpret_cast<ulong_ptr_t>(&result_ec.category());
+        op->Offset = result_ec.value();
+        op->OffsetHigh = bytes_transferred;
+      }
+
+      // Dispatch the operation only if ready. The operation may not be ready
+      // if the initiating function (e.g. a call to WSARecv) has not yet
+      // returned. This is because the initiating function still wants access
+      // to the operation's OVERLAPPED structure.
+      if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
+      {
+        // Ensure the count of outstanding work is decremented on block exit.
+        work_finished_on_block_exit on_exit = { this };
+        (void)on_exit;
+
+        op->complete(*this, result_ec, bytes_transferred);
+        ec = asio::error_code();
+        return 1;
+      }
+    }
+    else if (!ok)
+    {
+      if (last_error != WAIT_TIMEOUT)
+      {
+        ec = asio::error_code(last_error,
+            asio::error::get_system_category());
+        return 0;
+      }
+
+      // If we're not polling we need to keep going until we get a real handler.
+      if (block)
+        continue;
+
+      ec = asio::error_code();
+      return 0;
+    }
+    else if (completion_key == wake_for_dispatch)
+    {
+      // We have been woken up to try to acquire responsibility for dispatching
+      // timers and completed operations.
+    }
+    else
+    {
+      // Indicate that there is no longer an in-flight stop event.
+      ::InterlockedExchange(&stop_event_posted_, 0);
+
+      // The stopped_ flag is always checked to ensure that any leftover
+      // stop events from a previous run invocation are ignored.
+      if (::InterlockedExchangeAdd(&stopped_, 0) != 0)
+      {
+        // Wake up next thread that is blocked on GetQueuedCompletionStatus.
+        if (::InterlockedExchange(&stop_event_posted_, 1) == 0)
+        {
+          if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
+          {
+            last_error = ::GetLastError();
+            ec = asio::error_code(last_error,
+                asio::error::get_system_category());
+            return 0;
+          }
+        }
+
+        ec = asio::error_code();
+        return 0;
+      }
+    }
+  }
+}
+
+DWORD win_iocp_io_service::get_gqcs_timeout()
+{
+  OSVERSIONINFO version_info;
+  version_info.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+  if (::GetVersionEx(&version_info))
+    if (version_info.dwMajorVersion >= 6)
+      return INFINITE;
+  return default_gqcs_timeout;
+}
+
+void win_iocp_io_service::do_add_timer_queue(timer_queue_base& queue)
+{
+  mutex::scoped_lock lock(dispatch_mutex_);
+
+  timer_queues_.insert(&queue);
+
+  if (!waitable_timer_.handle)
+  {
+    waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0);
+    if (waitable_timer_.handle == 0)
+    {
+      DWORD last_error = ::GetLastError();
+      asio::error_code ec(last_error,
+          asio::error::get_system_category());
+      asio::detail::throw_error(ec, "timer");
+    }
+
+    LARGE_INTEGER timeout;
+    timeout.QuadPart = -max_timeout_usec;
+    timeout.QuadPart *= 10;
+    ::SetWaitableTimer(waitable_timer_.handle,
+        &timeout, max_timeout_msec, 0, 0, FALSE);
+  }
+
+  if (!timer_thread_.get())
+  {
+    timer_thread_function thread_function = { this };
+    timer_thread_.reset(new thread(thread_function, 65536));
+  }
+}
+
+void win_iocp_io_service::do_remove_timer_queue(timer_queue_base& queue)
+{
+  mutex::scoped_lock lock(dispatch_mutex_);
+
+  timer_queues_.erase(&queue);
+}
+
+void win_iocp_io_service::update_timeout()
+{
+  if (timer_thread_.get())
+  {
+    // There's no point updating the waitable timer if the new timeout period
+    // exceeds the maximum timeout. In that case, we might as well wait for the
+    // existing period of the timer to expire.
+    long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec);
+    if (timeout_usec < max_timeout_usec)
+    {
+      LARGE_INTEGER timeout;
+      timeout.QuadPart = -timeout_usec;
+      timeout.QuadPart *= 10;
+      ::SetWaitableTimer(waitable_timer_.handle,
+          &timeout, max_timeout_msec, 0, 0, FALSE);
+    }
+  }
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // defined(ASIO_HAS_IOCP)
+
+#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_SERVICE_IPP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b1aba70/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_serial_port_service.ipp
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_serial_port_service.ipp b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_serial_port_service.ipp
new file mode 100644
index 0000000..4b2b671
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/native/libhdfspp/third_party/asio-1.10.2/include/asio/detail/impl/win_iocp_serial_port_service.ipp
@@ -0,0 +1,180 @@
+//
+// detail/impl/win_iocp_serial_port_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
+#define ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include "asio/detail/config.hpp"
+
+#if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)
+
+#include <cstring>
+#include "asio/detail/win_iocp_serial_port_service.hpp"
+
+#include "asio/detail/push_options.hpp"
+
+namespace asio {
+namespace detail {
+
+win_iocp_serial_port_service::win_iocp_serial_port_service(
+    asio::io_service& io_service)
+  : handle_service_(io_service)
+{
+}
+
+void win_iocp_serial_port_service::shutdown_service()
+{
+}
+
+asio::error_code win_iocp_serial_port_service::open(
+    win_iocp_serial_port_service::implementation_type& impl,
+    const std::string& device, asio::error_code& ec)
+{
+  if (is_open(impl))
+  {
+    ec = asio::error::already_open;
+    return ec;
+  }
+
+  // For convenience, add a leading \\.\ sequence if not already present.
+  std::string name = (device[0] == '\\') ? device : "\\\\.\\" + device;
+
+  // Open a handle to the serial port.
+  ::HANDLE handle = ::CreateFileA(name.c_str(),
+      GENERIC_READ | GENERIC_WRITE, 0, 0,
+      OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0);
+  if (handle == INVALID_HANDLE_VALUE)
+  {
+    DWORD last_error = ::GetLastError();
+    ec = asio::error_code(last_error,
+        asio::error::get_system_category());
+    return ec;
+  }
+
+  // Determine the initial serial port parameters.
+  using namespace std; // For memset.
+  ::DCB dcb;
+  memset(&dcb, 0, sizeof(DCB));
+  dcb.DCBlength = sizeof(DCB);
+  if (!::GetCommState(handle, &dcb))
+  {
+    DWORD last_error = ::GetLastError();
+    ::CloseHandle(handle);
+    ec = asio::error_code(last_error,
+        asio::error::get_system_category());
+    return ec;
+  }
+
+  // Set some default serial port parameters. This implementation does not
+  // support changing these, so they might as well be in a known state.
+  dcb.fBinary = TRUE; // Win32 only supports binary mode.
+  dcb.fDsrSensitivity = FALSE;
+  dcb.fNull = FALSE; // Do not ignore NULL characters.
+  dcb.fAbortOnError = FALSE; // Ignore serial framing errors.
+  if (!::SetCommState(handle, &dcb))
+  {
+    DWORD last_error = ::GetLastError();
+    ::CloseHandle(handle);
+    ec = asio::error_code(last_error,
+        asio::error::get_system_category());
+    return ec;
+  }
+
+  // Set up timeouts so that the serial port will behave similarly to a
+  // network socket. Reads wait for at least one byte, then return with
+  // whatever they have. Writes return once everything is out the door.
+  ::COMMTIMEOUTS timeouts;
+  timeouts.ReadIntervalTimeout = 1;
+  timeouts.ReadTotalTimeoutMultiplier = 0;
+  timeouts.ReadTotalTimeoutConstant = 0;
+  timeouts.WriteTotalTimeoutMultiplier = 0;
+  timeouts.WriteTotalTimeoutConstant = 0;
+  if (!::SetCommTimeouts(handle, &timeouts))
+  {
+    DWORD last_error = ::GetLastError();
+    ::CloseHandle(handle);
+    ec = asio::error_code(last_error,
+        asio::error::get_system_category());
+    return ec;
+  }
+
+  // We're done. Take ownership of the serial port handle.
+  if (handle_service_.assign(impl, handle, ec))
+    ::CloseHandle(handle);
+  return ec;
+}
+
+asio::error_code win_iocp_serial_port_service::do_set_option(
+    win_iocp_serial_port_service::implementation_type& impl,
+    win_iocp_serial_port_service::store_function_type store,
+    const void* option, asio::error_code& ec)
+{
+  using namespace std; // For memcpy.
+
+  ::DCB dcb;
+  memset(&dcb, 0, sizeof(DCB));
+  dcb.DCBlength = sizeof(DCB);
+  if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
+  {
+    DWORD last_error = ::GetLastError();
+    ec = asio::error_code(last_error,
+        asio::error::get_system_category());
+    return ec;
+  }
+
+  if (store(option, dcb, ec))
+    return ec;
+
+  if (!::SetCommState(handle_service_.native_handle(impl), &dcb))
+  {
+    DWORD last_error = ::GetLastError();
+    ec = asio::error_code(last_error,
+        asio::error::get_system_category());
+    return ec;
+  }
+
+  ec = asio::error_code();
+  return ec;
+}
+
+asio::error_code win_iocp_serial_port_service::do_get_option(
+    const win_iocp_serial_port_service::implementation_type& impl,
+    win_iocp_serial_port_service::load_function_type load,
+    void* option, asio::error_code& ec) const
+{
+  using namespace std; // For memset.
+
+  ::DCB dcb;
+  memset(&dcb, 0, sizeof(DCB));
+  dcb.DCBlength = sizeof(DCB);
+  if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
+  {
+    DWORD last_error = ::GetLastError();
+    ec = asio::error_code(last_error,
+        asio::error::get_system_category());
+    return ec;
+  }
+
+  return load(option, dcb, ec);
+}
+
+} // namespace detail
+} // namespace asio
+
+#include "asio/detail/pop_options.hpp"
+
+#endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)
+
+#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP


Mime
View raw message