kudu-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From danburk...@apache.org
Subject [3/5] incubator-kudu git commit: Replace boost::{lock, unique_lock, mutex} with std lib equivalents
Date Thu, 02 Jun 2016 00:52:55 GMT
Replace boost::{lock, unique_lock, mutex} with std lib equivalents

Change-Id: I0c27f72c726258793991006a728673af537414bb
Reviewed-on: http://gerrit.cloudera.org:8080/3262
Reviewed-by: Mike Percy <mpercy@apache.org>
Tested-by: Kudu Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-kudu/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-kudu/commit/cfa9a99f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-kudu/tree/cfa9a99f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-kudu/diff/cfa9a99f

Branch: refs/heads/master
Commit: cfa9a99f92d486c9d9afda82ce06f4f7f7efee4d
Parents: 0e9fd8c
Author: Dan Burkert <dan@cloudera.com>
Authored: Thu May 21 12:31:38 2015 -0700
Committer: Dan Burkert <dan@cloudera.com>
Committed: Thu Jun 2 00:51:53 2016 +0000

----------------------------------------------------------------------
 src/kudu/benchmarks/tpch/rpc_line_item_dao.cc   |  2 +-
 src/kudu/cfile/bloomfile.cc                     |  2 -
 src/kudu/consensus/consensus-test-util.h        | 71 ++++++++--------
 src/kudu/consensus/consensus_peers.cc           |  9 ++-
 src/kudu/consensus/consensus_queue.cc           | 42 +++++-----
 src/kudu/consensus/local_consensus.cc           | 14 ++--
 src/kudu/consensus/local_consensus.h            |  1 -
 src/kudu/consensus/log.cc                       | 22 ++---
 src/kudu/consensus/log_anchor_registry.cc       | 22 ++---
 src/kudu/consensus/log_reader.cc                | 32 ++++----
 src/kudu/consensus/mt-log-test.cc               |  6 +-
 src/kudu/consensus/peer_manager.cc              |  8 +-
 src/kudu/consensus/raft_consensus.cc            |  7 +-
 src/kudu/consensus/raft_consensus.h             |  5 +-
 src/kudu/experiments/rwlock-perf.cc             |  6 +-
 src/kudu/master/catalog_manager.cc              | 85 ++++++++++----------
 src/kudu/master/catalog_manager.h               |  1 -
 src/kudu/master/ts_descriptor.cc                | 41 +++++-----
 src/kudu/master/ts_manager.cc                   |  6 +-
 src/kudu/server/hybrid_clock.cc                 | 24 +++---
 src/kudu/server/webserver.cc                    | 20 ++---
 src/kudu/server/webserver.h                     |  3 +-
 src/kudu/tablet/compaction.h                    | 11 ++-
 src/kudu/tablet/diskrowset.cc                   |  7 +-
 src/kudu/tablet/diskrowset.h                    |  6 +-
 src/kudu/tablet/lock_manager-test.cc            |  9 +--
 src/kudu/tablet/lock_manager.cc                 | 17 ++--
 src/kudu/tablet/memrowset.h                     |  7 +-
 src/kudu/tablet/mock-rowsets.h                  |  3 +-
 src/kudu/tablet/mvcc-test.cc                    |  6 +-
 src/kudu/tablet/mvcc.cc                         | 38 ++++-----
 src/kudu/tablet/rowset.h                        |  8 +-
 src/kudu/tablet/tablet.cc                       | 68 ++++++++--------
 src/kudu/tablet/tablet.h                        |  6 +-
 src/kudu/tablet/tablet_bootstrap.cc             |  3 +-
 src/kudu/tablet/tablet_metadata.cc              | 38 ++++-----
 src/kudu/tablet/tablet_peer.cc                  | 24 +++---
 src/kudu/tablet/tablet_peer.h                   | 17 ++--
 src/kudu/tablet/tablet_peer_mm_ops.cc           | 15 ++--
 .../tablet/transactions/transaction_driver.cc   | 30 +++----
 .../tablet/transactions/write_transaction.cc    |  1 -
 src/kudu/tserver/scanners.cc                    | 13 +--
 src/kudu/tserver/scanners.h                     |  9 ++-
 src/kudu/tserver/ts_tablet_manager.cc           | 28 +++----
 src/kudu/tserver/ts_tablet_manager.h            |  2 +-
 src/kudu/twitter-demo/insert_consumer.cc        |  6 +-
 src/kudu/twitter-demo/twitter_streamer.cc       |  4 +-
 src/kudu/twitter-demo/twitter_streamer.h        |  4 +-
 src/kudu/util/boost_mutex_utils.h               |  8 +-
 src/kudu/util/locks.h                           |  2 +-
 src/kudu/util/mt-threadlocal-test.cc            | 14 ++--
 src/kudu/util/oid_generator.cc                  |  3 +-
 src/kudu/util/rw_semaphore-test.cc              |  7 +-
 src/kudu/util/rwc_lock-test.cc                  |  8 +-
 src/kudu/util/throttler.cc                      |  4 +-
 src/kudu/util/throttler.h                       |  2 -
 56 files changed, 431 insertions(+), 426 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/benchmarks/tpch/rpc_line_item_dao.cc
----------------------------------------------------------------------
diff --git a/src/kudu/benchmarks/tpch/rpc_line_item_dao.cc b/src/kudu/benchmarks/tpch/rpc_line_item_dao.cc
index 71818aa..f76af5b 100644
--- a/src/kudu/benchmarks/tpch/rpc_line_item_dao.cc
+++ b/src/kudu/benchmarks/tpch/rpc_line_item_dao.cc
@@ -15,7 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/thread/locks.hpp>
+#include <boost/function.hpp>
 #include <glog/logging.h>
 #include <vector>
 #include <utility>

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/cfile/bloomfile.cc
----------------------------------------------------------------------
diff --git a/src/kudu/cfile/bloomfile.cc b/src/kudu/cfile/bloomfile.cc
index 9390de3..5e47f9d 100644
--- a/src/kudu/cfile/bloomfile.cc
+++ b/src/kudu/cfile/bloomfile.cc
@@ -15,8 +15,6 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/thread/locks.hpp>
-#include <boost/thread/mutex.hpp>
 #include <mutex>
 #include <sched.h>
 #include <string>

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/consensus-test-util.h
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/consensus-test-util.h b/src/kudu/consensus/consensus-test-util.h
index 4a45634..4d50bd9 100644
--- a/src/kudu/consensus/consensus-test-util.h
+++ b/src/kudu/consensus/consensus-test-util.h
@@ -15,10 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/thread/locks.hpp>
+#include <boost/bind.hpp>
 #include <gmock/gmock.h>
 #include <map>
 #include <memory>
+#include <mutex>
 #include <string>
 #include <unordered_map>
 #include <utility>
@@ -131,7 +132,7 @@ class TestPeerProxy : public PeerProxy {
   // Register the RPC callback in order to call later.
   // We currently only support one request of each method being in flight at a time.
   virtual void RegisterCallback(Method method, const rpc::ResponseCallback& callback) {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     InsertOrDie(&callbacks_, method, callback);
   }
 
@@ -139,7 +140,7 @@ class TestPeerProxy : public PeerProxy {
   virtual void Respond(Method method) {
     rpc::ResponseCallback callback;
     {
-      boost::lock_guard<simple_spinlock> lock(lock_);
+      std::lock_guard<simple_spinlock> lock(lock_);
       callback = FindOrDie(callbacks_, method);
       CHECK_EQ(1, callbacks_.erase(method));
       // Drop the lock before submitting to the pool, since the callback itself may
@@ -299,7 +300,7 @@ class NoOpTestPeerProxy : public TestPeerProxy {
 
     response->Clear();
     {
-      boost::lock_guard<simple_spinlock> lock(lock_);
+      std::lock_guard<simple_spinlock> lock(lock_);
       if (OpIdLessThan(last_received_, request->preceding_id())) {
         ConsensusErrorPB* error = response->mutable_status()->mutable_error();
         error->set_code(ConsensusErrorPB::PRECEDING_ENTRY_DIDNT_MATCH);
@@ -325,7 +326,7 @@ class NoOpTestPeerProxy : public TestPeerProxy {
                                          rpc::RpcController* controller,
                                          const rpc::ResponseCallback& callback) OVERRIDE {
     {
-      boost::lock_guard<simple_spinlock> lock(lock_);
+      std::lock_guard<simple_spinlock> lock(lock_);
       response->set_responder_uuid(peer_pb_.permanent_uuid());
       response->set_responder_term(request->candidate_term());
       response->set_vote_granted(true);
@@ -334,7 +335,7 @@ class NoOpTestPeerProxy : public TestPeerProxy {
   }
 
   const OpId& last_received() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return last_received_;
   }
 
@@ -367,7 +368,7 @@ class TestPeerMapManager {
   explicit TestPeerMapManager(const RaftConfigPB& config) : config_(config) {}
 
   void AddPeer(const std::string& peer_uuid, const scoped_refptr<RaftConsensus>& peer) {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     InsertOrDie(&peers_, peer_uuid, peer);
   }
 
@@ -378,7 +379,7 @@ class TestPeerMapManager {
 
   Status GetPeerByUuid(const std::string& peer_uuid,
                        scoped_refptr<RaftConsensus>* peer_out) const {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     if (!FindCopy(peers_, peer_uuid, peer_out)) {
       return Status::NotFound("Other consensus instance was destroyed");
     }
@@ -386,12 +387,12 @@ class TestPeerMapManager {
   }
 
   void RemovePeer(const std::string& peer_uuid) {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     peers_.erase(peer_uuid);
   }
 
   TestPeerMap GetPeerMapCopy() const {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return peers_;
   }
 
@@ -402,7 +403,7 @@ class TestPeerMapManager {
     // destroys the test proxies which in turn reach into this class.
     TestPeerMap copy = peers_;
     {
-      boost::lock_guard<simple_spinlock> lock(lock_);
+      std::lock_guard<simple_spinlock> lock(lock_);
       peers_.clear();
     }
 
@@ -460,7 +461,7 @@ class LocalTestPeerProxy : public TestPeerProxy {
 
     bool miss_comm_copy;
     {
-      boost::lock_guard<simple_spinlock> lock(lock_);
+      std::lock_guard<simple_spinlock> lock(lock_);
       miss_comm_copy = miss_comm_;
       miss_comm_ = false;
     }
@@ -535,7 +536,7 @@ class LocalTestPeerProxy : public TestPeerProxy {
 
   void InjectCommFaultLeaderSide() {
     VLOG(2) << this << ": injecting fault next time";
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     miss_comm_ = true;
   }
 
@@ -706,121 +707,121 @@ class CounterHooks : public Consensus::ConsensusFaultHooks {
 
   virtual Status PreStart() OVERRIDE {
     if (current_hook_.get()) RETURN_NOT_OK(current_hook_->PreStart());
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     pre_start_calls_++;
     return Status::OK();
   }
 
   virtual Status PostStart() OVERRIDE {
     if (current_hook_.get()) RETURN_NOT_OK(current_hook_->PostStart());
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     post_start_calls_++;
     return Status::OK();
   }
 
   virtual Status PreConfigChange() OVERRIDE {
     if (current_hook_.get()) RETURN_NOT_OK(current_hook_->PreConfigChange());
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     pre_config_change_calls_++;
     return Status::OK();
   }
 
   virtual Status PostConfigChange() OVERRIDE {
     if (current_hook_.get()) RETURN_NOT_OK(current_hook_->PostConfigChange());
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     post_config_change_calls_++;
     return Status::OK();
   }
 
   virtual Status PreReplicate() OVERRIDE {
     if (current_hook_.get()) RETURN_NOT_OK(current_hook_->PreReplicate());
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     pre_replicate_calls_++;
     return Status::OK();
   }
 
   virtual Status PostReplicate() OVERRIDE {
     if (current_hook_.get()) RETURN_NOT_OK(current_hook_->PostReplicate());
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     post_replicate_calls_++;
     return Status::OK();
   }
 
   virtual Status PreUpdate() OVERRIDE {
     if (current_hook_.get()) RETURN_NOT_OK(current_hook_->PreUpdate());
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     pre_update_calls_++;
     return Status::OK();
   }
 
   virtual Status PostUpdate() OVERRIDE {
     if (current_hook_.get()) RETURN_NOT_OK(current_hook_->PostUpdate());
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     post_update_calls_++;
     return Status::OK();
   }
 
   virtual Status PreShutdown() OVERRIDE {
     if (current_hook_.get()) RETURN_NOT_OK(current_hook_->PreShutdown());
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     pre_shutdown_calls_++;
     return Status::OK();
   }
 
   virtual Status PostShutdown() OVERRIDE {
     if (current_hook_.get()) RETURN_NOT_OK(current_hook_->PostShutdown());
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     post_shutdown_calls_++;
     return Status::OK();
   }
 
   int num_pre_start_calls() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return pre_start_calls_;
   }
 
   int num_post_start_calls() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return post_start_calls_;
   }
 
   int num_pre_config_change_calls() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return pre_config_change_calls_;
   }
 
   int num_post_config_change_calls() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return post_config_change_calls_;
   }
 
   int num_pre_replicate_calls() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return pre_replicate_calls_;
   }
 
   int num_post_replicate_calls() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return post_replicate_calls_;
   }
 
   int num_pre_update_calls() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return pre_update_calls_;
   }
 
   int num_post_update_calls() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return post_update_calls_;
   }
 
   int num_pre_shutdown_calls() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return pre_shutdown_calls_;
   }
 
   int num_post_shutdown_calls() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return post_shutdown_calls_;
   }
 
@@ -844,14 +845,14 @@ class CounterHooks : public Consensus::ConsensusFaultHooks {
 class TestRaftConsensusQueueIface : public PeerMessageQueueObserver {
  public:
   bool IsMajorityReplicated(int64_t index) {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return index <= majority_replicated_index_;
   }
 
  protected:
   virtual void UpdateMajorityReplicated(const OpId& majority_replicated,
                                         OpId* committed_index) OVERRIDE {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     majority_replicated_index_ = majority_replicated.index();
     committed_index->CopyFrom(majority_replicated);
   }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/consensus_peers.cc
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/consensus_peers.cc b/src/kudu/consensus/consensus_peers.cc
index 76f82da..5220e53 100644
--- a/src/kudu/consensus/consensus_peers.cc
+++ b/src/kudu/consensus/consensus_peers.cc
@@ -21,6 +21,7 @@
 #include <boost/bind.hpp>
 #include <gflags/gflags.h>
 #include <glog/logging.h>
+#include <mutex>
 #include <string>
 #include <utility>
 #include <vector>
@@ -115,7 +116,7 @@ void Peer::SetTermForTest(int term) {
 }
 
 Status Peer::Init() {
-  boost::lock_guard<simple_spinlock> lock(peer_lock_);
+  std::lock_guard<simple_spinlock> lock(peer_lock_);
   queue_->TrackPeer(peer_pb_.permanent_uuid());
   RETURN_NOT_OK(heartbeater_.Start());
   state_ = kPeerStarted;
@@ -129,7 +130,7 @@ Status Peer::SignalRequest(bool even_if_queue_empty) {
     return Status::OK();
   }
   {
-    boost::lock_guard<simple_spinlock> l(peer_lock_);
+    std::lock_guard<simple_spinlock> l(peer_lock_);
 
     if (PREDICT_FALSE(state_ == kPeerClosed)) {
       sem_.Release();
@@ -343,7 +344,7 @@ void Peer::Close() {
 
   // If the peer is already closed return.
   {
-    boost::lock_guard<simple_spinlock> lock(peer_lock_);
+    std::lock_guard<simple_spinlock> lock(peer_lock_);
     if (state_ == kPeerClosed) return;
     DCHECK(state_ == kPeerRunning || state_ == kPeerStarted) << "Unexpected state: " << state_;
     state_ = kPeerClosed;
@@ -353,7 +354,7 @@ void Peer::Close() {
   // Acquire the semaphore to wait for any concurrent request to finish.
   // They will see the state_ == kPeerClosed and not start any new requests,
   // but we can't currently cancel the already-sent ones. (see KUDU-699)
-  boost::lock_guard<Semaphore> l(sem_);
+  std::lock_guard<Semaphore> l(sem_);
   queue_->UntrackPeer(peer_pb_.permanent_uuid());
   // We don't own the ops (the queue does).
   request_.mutable_ops()->ExtractSubrange(0, request_.ops_size(), nullptr);

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/consensus_queue.cc
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/consensus_queue.cc b/src/kudu/consensus/consensus_queue.cc
index 040a5c2..0a8f478 100644
--- a/src/kudu/consensus/consensus_queue.cc
+++ b/src/kudu/consensus/consensus_queue.cc
@@ -17,9 +17,9 @@
 #include "kudu/consensus/consensus_queue.h"
 
 #include <algorithm>
-#include <boost/thread/locks.hpp>
 #include <gflags/gflags.h>
 #include <iostream>
+#include <mutex>
 #include <string>
 #include <utility>
 
@@ -119,7 +119,7 @@ PeerMessageQueue::PeerMessageQueue(const scoped_refptr<MetricEntity>& metric_ent
 }
 
 void PeerMessageQueue::Init(const OpId& last_locally_replicated) {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   CHECK_EQ(queue_state_.state, kQueueConstructed);
   log_cache_.Init(last_locally_replicated);
   queue_state_.last_appended = last_locally_replicated;
@@ -130,7 +130,7 @@ void PeerMessageQueue::Init(const OpId& last_locally_replicated) {
 void PeerMessageQueue::SetLeaderMode(const OpId& committed_index,
                                      int64_t current_term,
                                      const RaftConfigPB& active_config) {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   CHECK(committed_index.IsInitialized());
   queue_state_.current_term = current_term;
   queue_state_.committed_index = committed_index;
@@ -155,7 +155,7 @@ void PeerMessageQueue::SetLeaderMode(const OpId& committed_index,
 }
 
 void PeerMessageQueue::SetNonLeaderMode() {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   queue_state_.active_config.reset();
   queue_state_.mode = NON_LEADER;
   queue_state_.majority_size_ = -1;
@@ -164,7 +164,7 @@ void PeerMessageQueue::SetNonLeaderMode() {
 }
 
 void PeerMessageQueue::TrackPeer(const string& uuid) {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   TrackPeerUnlocked(uuid);
 }
 
@@ -192,7 +192,7 @@ void PeerMessageQueue::TrackPeerUnlocked(const string& uuid) {
 }
 
 void PeerMessageQueue::UntrackPeer(const string& uuid) {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   TrackedPeer* peer = EraseKeyReturnValuePtr(&peers_map_, uuid);
   if (peer != nullptr) {
     delete peer;
@@ -229,7 +229,7 @@ void PeerMessageQueue::LocalPeerAppendFinished(const OpId& id,
   *fake_response.mutable_status()->mutable_last_received() = id;
   *fake_response.mutable_status()->mutable_last_received_current_leader() = id;
   {
-    boost::unique_lock<simple_spinlock> lock(queue_lock_);
+    std::unique_lock<simple_spinlock> lock(queue_lock_);
     fake_response.mutable_status()->set_last_committed_idx(queue_state_.committed_index.index());
   }
   bool junk;
@@ -246,7 +246,7 @@ Status PeerMessageQueue::AppendOperations(const vector<ReplicateRefPtr>& msgs,
                                           const StatusCallback& log_append_callback) {
 
   DFAKE_SCOPED_LOCK(append_fake_lock_);
-  boost::unique_lock<simple_spinlock> lock(queue_lock_);
+  std::unique_lock<simple_spinlock> lock(queue_lock_);
 
   OpId last_id = msgs.back()->get()->id();
 
@@ -652,17 +652,17 @@ PeerMessageQueue::TrackedPeer PeerMessageQueue::GetTrackedPeerForTests(string uu
 }
 
 OpId PeerMessageQueue::GetAllReplicatedIndexForTests() const {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   return queue_state_.all_replicated_opid;
 }
 
 OpId PeerMessageQueue::GetCommittedIndexForTests() const {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   return queue_state_.committed_index;
 }
 
 OpId PeerMessageQueue::GetMajorityReplicatedOpIdForTests() const {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   return queue_state_.majority_replicated_opid;
 }
 
@@ -679,7 +679,7 @@ void PeerMessageQueue::UpdateMetrics() {
 }
 
 void PeerMessageQueue::DumpToStrings(vector<string>* lines) const {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   DumpToStringsUnlocked(lines);
 }
 
@@ -696,7 +696,7 @@ void PeerMessageQueue::DumpToStringsUnlocked(vector<string>* lines) const {
 void PeerMessageQueue::DumpToHtml(std::ostream& out) const {
   using std::endl;
 
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   out << "<h3>Watermarks</h3>" << endl;
   out << "<table>" << endl;;
   out << "  <tr><th>Peer</th><th>Watermark</th></tr>" << endl;
@@ -717,7 +717,7 @@ void PeerMessageQueue::ClearUnlocked() {
 
 void PeerMessageQueue::Close() {
   observers_pool_->Shutdown();
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   ClearUnlocked();
 }
 
@@ -728,7 +728,7 @@ int64_t PeerMessageQueue::GetQueuedOperationsSizeBytesForTests() const {
 string PeerMessageQueue::ToString() const {
   // Even though metrics are thread-safe obtain the lock so that we get
   // a "consistent" snapshot of the metrics.
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   return ToStringUnlocked();
 }
 
@@ -740,7 +740,7 @@ string PeerMessageQueue::ToStringUnlocked() const {
 }
 
 void PeerMessageQueue::RegisterObserver(PeerMessageQueueObserver* observer) {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   auto iter = std::find(observers_.begin(), observers_.end(), observer);
   if (iter == observers_.end()) {
     observers_.push_back(observer);
@@ -748,7 +748,7 @@ void PeerMessageQueue::RegisterObserver(PeerMessageQueueObserver* observer) {
 }
 
 Status PeerMessageQueue::UnRegisterObserver(PeerMessageQueueObserver* observer) {
-  boost::lock_guard<simple_spinlock> lock(queue_lock_);
+  std::lock_guard<simple_spinlock> lock(queue_lock_);
   auto iter = std::find(observers_.begin(), observers_.end(), observer);
   if (iter == observers_.end()) {
     return Status::NotFound("Can't find observer.");
@@ -790,7 +790,7 @@ void PeerMessageQueue::NotifyObserversOfMajorityReplOpChangeTask(
     const OpId new_majority_replicated_op) {
   std::vector<PeerMessageQueueObserver*> copy;
   {
-    boost::lock_guard<simple_spinlock> lock(queue_lock_);
+    std::lock_guard<simple_spinlock> lock(queue_lock_);
     copy = observers_;
   }
 
@@ -802,7 +802,7 @@ void PeerMessageQueue::NotifyObserversOfMajorityReplOpChangeTask(
   }
 
   {
-    boost::lock_guard<simple_spinlock> lock(queue_lock_);
+    std::lock_guard<simple_spinlock> lock(queue_lock_);
     if (new_committed_index.IsInitialized() &&
         new_committed_index.index() > queue_state_.committed_index.index()) {
       queue_state_.committed_index.CopyFrom(new_committed_index);
@@ -814,7 +814,7 @@ void PeerMessageQueue::NotifyObserversOfTermChangeTask(int64_t term) {
   MAYBE_INJECT_RANDOM_LATENCY(FLAGS_consensus_inject_latency_ms_in_notifications);
   std::vector<PeerMessageQueueObserver*> copy;
   {
-    boost::lock_guard<simple_spinlock> lock(queue_lock_);
+    std::lock_guard<simple_spinlock> lock(queue_lock_);
     copy = observers_;
   }
   OpId new_committed_index;
@@ -838,7 +838,7 @@ void PeerMessageQueue::NotifyObserversOfFailedFollowerTask(const string& uuid,
   MAYBE_INJECT_RANDOM_LATENCY(FLAGS_consensus_inject_latency_ms_in_notifications);
   std::vector<PeerMessageQueueObserver*> observers_copy;
   {
-    boost::lock_guard<simple_spinlock> lock(queue_lock_);
+    std::lock_guard<simple_spinlock> lock(queue_lock_);
     observers_copy = observers_;
   }
   OpId new_committed_index;

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/local_consensus.cc
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/local_consensus.cc b/src/kudu/consensus/local_consensus.cc
index 104ebcb..2aa321e 100644
--- a/src/kudu/consensus/local_consensus.cc
+++ b/src/kudu/consensus/local_consensus.cc
@@ -17,8 +17,8 @@
 
 #include "kudu/consensus/local_consensus.h"
 
-#include <boost/thread/locks.hpp>
 #include <iostream>
+#include <mutex>
 
 #include "kudu/consensus/log.h"
 #include "kudu/consensus/metadata.pb.h"
@@ -60,7 +60,7 @@ Status LocalConsensus::Start(const ConsensusBootstrapInfo& info) {
   LOG_WITH_PREFIX(INFO) << "Starting LocalConsensus...";
 
   {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
 
     const RaftConfigPB& config = cmeta_->committed_config();
     CHECK(config.local()) << "Local consensus must be passed a local config";
@@ -98,7 +98,7 @@ Status LocalConsensus::ResubmitOrphanedReplicates(const std::vector<ReplicateMsg
 }
 
 bool LocalConsensus::IsRunning() const {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   return state_ == kRunning;
 }
 
@@ -117,7 +117,7 @@ Status LocalConsensus::Replicate(const scoped_refptr<ConsensusRound>& round) {
 
   LogEntryBatch* reserved_entry_batch;
   {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
 
     // create the new op id for the entry.
     cur_op_id->set_index(next_op_id_index_++);
@@ -164,12 +164,12 @@ Status LocalConsensus::RequestVote(const VoteRequestPB* request,
 }
 
 ConsensusStatePB LocalConsensus::ConsensusState(ConsensusConfigType type) const {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   return cmeta_->ToConsensusStatePB(type);
 }
 
 RaftConfigPB LocalConsensus::CommittedConfig() const {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   return cmeta_->committed_config();
 }
 
@@ -180,7 +180,7 @@ void LocalConsensus::Shutdown() {
 void LocalConsensus::DumpStatusHtml(std::ostream& out) const {
   out << "<h1>Local Consensus Status</h1>\n";
 
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   out << "next op: " << next_op_id_index_;
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/local_consensus.h
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/local_consensus.h b/src/kudu/consensus/local_consensus.h
index 9e5f799..db9db04 100644
--- a/src/kudu/consensus/local_consensus.h
+++ b/src/kudu/consensus/local_consensus.h
@@ -17,7 +17,6 @@
 #ifndef KUDU_CONSENSUS_LOCAL_CONSENSUS_H_
 #define KUDU_CONSENSUS_LOCAL_CONSENSUS_H_
 
-#include <boost/thread/locks.hpp>
 #include <string>
 #include <vector>
 

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/log.cc
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/log.cc b/src/kudu/consensus/log.cc
index 7987304..62eca2b 100644
--- a/src/kudu/consensus/log.cc
+++ b/src/kudu/consensus/log.cc
@@ -18,6 +18,8 @@
 #include "kudu/consensus/log.h"
 
 #include <algorithm>
+#include <boost/thread/shared_mutex.hpp>
+#include <mutex>
 
 #include "kudu/common/wire_protocol.h"
 #include "kudu/consensus/log_index.h"
@@ -138,7 +140,7 @@ class Log::AppendThread {
   Log* const log_;
 
   // Lock to protect access to thread_ during shutdown.
-  mutable boost::mutex lock_;
+  mutable std::mutex lock_;
   scoped_refptr<Thread> thread_;
 };
 
@@ -233,7 +235,7 @@ void Log::AppendThread::RunThread() {
 
 void Log::AppendThread::Shutdown() {
   log_->entry_queue()->Shutdown();
-  boost::lock_guard<boost::mutex> lock_guard(lock_);
+  std::lock_guard<std::mutex> lock_guard(lock_);
   if (thread_) {
     VLOG(1) << "Shutting down log append thread for tablet " << log_->tablet_id();
     CHECK_OK(ThreadJoiner(thread_.get()).Join());
@@ -301,7 +303,7 @@ Log::Log(LogOptions options, FsManager* fs_manager, string log_path,
 }
 
 Status Log::Init() {
-  boost::lock_guard<percpu_rwlock> write_lock(state_lock_);
+  std::lock_guard<percpu_rwlock> write_lock(state_lock_);
   CHECK_EQ(kLogInitialized, log_state_);
 
   // Init the index
@@ -343,7 +345,7 @@ Status Log::Init() {
 }
 
 Status Log::AsyncAllocateSegment() {
-  boost::lock_guard<boost::shared_mutex> lock_guard(allocation_lock_);
+  std::lock_guard<boost::shared_mutex> lock_guard(allocation_lock_);
   CHECK_EQ(allocation_state_, kAllocationNotStarted);
   allocation_status_.Reset();
   allocation_state_ = kAllocationInProgress;
@@ -486,7 +488,7 @@ Status Log::DoAppend(LogEntryBatch* entry_batch, bool caller_owns_operation) {
     // is not the last durable operation. Either move this to tablet peer (since we're
     // using in flights anyway no need to scan for ids here) or actually delay doing this
     // until fsync() has been done. See KUDU-527.
-    boost::lock_guard<rw_spinlock> write_lock(last_entry_op_id_lock_);
+    std::lock_guard<rw_spinlock> write_lock(last_entry_op_id_lock_);
     last_entry_op_id_.CopyFrom(entry_batch->MaxReplicateOpId());
   }
 
@@ -706,7 +708,7 @@ Status Log::GC(int64_t min_op_idx, int32_t* num_gced) {
     SegmentSequence segments_to_delete;
 
     {
-      boost::lock_guard<percpu_rwlock> l(state_lock_);
+      std::lock_guard<percpu_rwlock> l(state_lock_);
       CHECK_EQ(kLogWriting, log_state_);
 
       GetSegmentsToGCUnlocked(min_op_idx, &segments_to_delete);
@@ -778,7 +780,7 @@ void Log::GetMaxIndexesToSegmentSizeMap(int64_t min_op_idx,
 
 void Log::SetSchemaForNextLogSegment(const Schema& schema,
                                      uint32_t version) {
-  boost::lock_guard<rw_spinlock> l(schema_lock_);
+  std::lock_guard<rw_spinlock> l(schema_lock_);
   schema_ = schema;
   schema_version_ = version;
 }
@@ -787,7 +789,7 @@ Status Log::Close() {
   allocation_pool_->Shutdown();
   append_thread_->Shutdown();
 
-  boost::lock_guard<percpu_rwlock> l(state_lock_);
+  std::lock_guard<percpu_rwlock> l(state_lock_);
   switch (log_state_) {
     case kLogWriting:
       if (log_hooks_) {
@@ -846,7 +848,7 @@ Status Log::PreAllocateNewSegment() {
   }
 
   {
-    boost::lock_guard<boost::shared_mutex> lock_guard(allocation_lock_);
+    std::lock_guard<boost::shared_mutex> lock_guard(allocation_lock_);
     allocation_state_ = kAllocationFinished;
   }
   return Status::OK();
@@ -895,7 +897,7 @@ Status Log::SwitchToAllocatedSegment() {
   // need to be able to replay the segments for other peers.
   {
     if (active_segment_.get() != nullptr) {
-      boost::lock_guard<percpu_rwlock> l(state_lock_);
+      std::lock_guard<percpu_rwlock> l(state_lock_);
       CHECK_OK(ReplaceSegmentInReaderUnlocked());
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/log_anchor_registry.cc
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/log_anchor_registry.cc b/src/kudu/consensus/log_anchor_registry.cc
index 0ad9a7d..017ef79 100644
--- a/src/kudu/consensus/log_anchor_registry.cc
+++ b/src/kudu/consensus/log_anchor_registry.cc
@@ -18,7 +18,7 @@
 #include "kudu/consensus/log_anchor_registry.h"
 #include "kudu/consensus/opid_util.h"
 
-#include <boost/thread/locks.hpp>
+#include <mutex>
 #include <string>
 
 #include "kudu/gutil/strings/substitute.h"
@@ -42,14 +42,14 @@ LogAnchorRegistry::~LogAnchorRegistry() {
 void LogAnchorRegistry::Register(int64_t log_index,
                                  const string& owner,
                                  LogAnchor* anchor) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   RegisterUnlocked(log_index, owner, anchor);
 }
 
 Status LogAnchorRegistry::UpdateRegistration(int64_t log_index,
                                              const std::string& owner,
                                              LogAnchor* anchor) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   RETURN_NOT_OK_PREPEND(UnregisterUnlocked(anchor),
                         "Unable to swap registration, anchor not registered")
   RegisterUnlocked(log_index, owner, anchor);
@@ -57,18 +57,18 @@ Status LogAnchorRegistry::UpdateRegistration(int64_t log_index,
 }
 
 Status LogAnchorRegistry::Unregister(LogAnchor* anchor) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   return UnregisterUnlocked(anchor);
 }
 
 Status LogAnchorRegistry::UnregisterIfAnchored(LogAnchor* anchor) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   if (!anchor->is_registered) return Status::OK();
   return UnregisterUnlocked(anchor);
 }
 
 Status LogAnchorRegistry::GetEarliestRegisteredLogIndex(int64_t* log_index) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   auto iter = anchors_.begin();
   if (iter == anchors_.end()) {
     return Status::NotFound("No anchors in registry");
@@ -80,13 +80,13 @@ Status LogAnchorRegistry::GetEarliestRegisteredLogIndex(int64_t* log_index) {
 }
 
 size_t LogAnchorRegistry::GetAnchorCountForTests() const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   return anchors_.size();
 }
 
 std::string LogAnchorRegistry::DumpAnchorInfo() const {
   string buf;
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   MonoTime now = MonoTime::Now(MonoTime::FINE);
   for (const AnchorMultiMap::value_type& entry : anchors_) {
     const LogAnchor* anchor = entry.second;
@@ -153,7 +153,7 @@ MinLogIndexAnchorer::~MinLogIndexAnchorer() {
 }
 
 void MinLogIndexAnchorer::AnchorIfMinimum(int64_t log_index) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   if (PREDICT_FALSE(minimum_log_index_ == kInvalidOpIdIndex)) {
     minimum_log_index_ = log_index;
     registry_->Register(minimum_log_index_, owner_, &anchor_);
@@ -164,7 +164,7 @@ void MinLogIndexAnchorer::AnchorIfMinimum(int64_t log_index) {
 }
 
 Status MinLogIndexAnchorer::ReleaseAnchor() {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   if (PREDICT_TRUE(minimum_log_index_ != kInvalidOpIdIndex)) {
     return registry_->Unregister(&anchor_);
   }
@@ -172,7 +172,7 @@ Status MinLogIndexAnchorer::ReleaseAnchor() {
 }
 
 int64_t MinLogIndexAnchorer::minimum_log_index() const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   return minimum_log_index_;
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/log_reader.cc
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/log_reader.cc b/src/kudu/consensus/log_reader.cc
index c7d001d..c1ee1eb 100644
--- a/src/kudu/consensus/log_reader.cc
+++ b/src/kudu/consensus/log_reader.cc
@@ -17,8 +17,8 @@
 
 #include "kudu/consensus/log_reader.h"
 
-#include <boost/thread/locks.hpp>
 #include <algorithm>
+#include <mutex>
 
 #include "kudu/consensus/log_index.h"
 #include "kudu/consensus/opid_util.h"
@@ -118,7 +118,7 @@ LogReader::~LogReader() {
 
 Status LogReader::Init(const string& tablet_wal_path) {
   {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     CHECK_EQ(state_, kLogReaderInitialized) << "bad state for Init(): " << state_;
   }
   VLOG(1) << "Reading wal from path:" << tablet_wal_path;
@@ -163,7 +163,7 @@ Status LogReader::Init(const string& tablet_wal_path) {
 
 
   {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
 
     string previous_seg_path;
     int64_t previous_seg_seqno = -1;
@@ -189,7 +189,7 @@ Status LogReader::Init(const string& tablet_wal_path) {
 }
 
 Status LogReader::InitEmptyReaderForTests() {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   state_ = kLogReaderReading;
   return Status::OK();
 }
@@ -200,7 +200,7 @@ Status LogReader::GetSegmentPrefixNotIncluding(int64_t index,
   DCHECK(segments);
   segments->clear();
 
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   CHECK_EQ(state_, kLogReaderReading);
 
   for (const scoped_refptr<ReadableLogSegment>& segment : segments_) {
@@ -219,7 +219,7 @@ Status LogReader::GetSegmentPrefixNotIncluding(int64_t index,
 }
 
 int64_t LogReader::GetMinReplicateIndex() const {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   int64_t min_remaining_op_idx = -1;
 
   for (const scoped_refptr<ReadableLogSegment>& segment : segments_) {
@@ -237,7 +237,7 @@ void LogReader::GetMaxIndexesToSegmentSizeMap(int64_t min_op_idx, int32_t segmen
                                               int64_t max_close_time_us,
                                               std::map<int64_t, int64_t>*
                                               max_idx_to_segment_size) const {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   DCHECK_GE(segments_count, 0);
   for (const scoped_refptr<ReadableLogSegment>& segment : segments_) {
     if (max_idx_to_segment_size->size() == segments_count) {
@@ -261,7 +261,7 @@ void LogReader::GetMaxIndexesToSegmentSizeMap(int64_t min_op_idx, int32_t segmen
 }
 
 scoped_refptr<ReadableLogSegment> LogReader::GetSegmentBySequenceNumber(int64_t seq) const {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   if (segments_.empty()) {
     return nullptr;
   }
@@ -395,14 +395,14 @@ Status LogReader::LookupOpId(int64_t op_index, OpId* op_id) const {
 }
 
 Status LogReader::GetSegmentsSnapshot(SegmentSequence* segments) const {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   CHECK_EQ(state_, kLogReaderReading);
   segments->assign(segments_.begin(), segments_.end());
   return Status::OK();
 }
 
 Status LogReader::TrimSegmentsUpToAndIncluding(int64_t segment_sequence_number) {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   CHECK_EQ(state_, kLogReaderReading);
   auto iter = segments_.begin();
   int num_deleted_segments = 0;
@@ -421,7 +421,7 @@ Status LogReader::TrimSegmentsUpToAndIncluding(int64_t segment_sequence_number)
 }
 
 void LogReader::UpdateLastSegmentOffset(int64_t readable_to_offset) {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   CHECK_EQ(state_, kLogReaderReading);
   DCHECK(!segments_.empty());
   // Get the last segment
@@ -435,7 +435,7 @@ Status LogReader::ReplaceLastSegment(const scoped_refptr<ReadableLogSegment>& se
   // have a footer.
   DCHECK(segment->HasFooter());
 
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   CHECK_EQ(state_, kLogReaderReading);
   // Make sure the segment we're replacing has the same sequence number
   CHECK(!segments_.empty());
@@ -450,7 +450,7 @@ Status LogReader::AppendSegment(const scoped_refptr<ReadableLogSegment>& segment
   if (PREDICT_FALSE(!segment->HasFooter())) {
     RETURN_NOT_OK(segment->RebuildFooterByScanning());
   }
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   return AppendSegmentUnlocked(segment);
 }
 
@@ -468,7 +468,7 @@ Status LogReader::AppendSegmentUnlocked(const scoped_refptr<ReadableLogSegment>&
 
 Status LogReader::AppendEmptySegment(const scoped_refptr<ReadableLogSegment>& segment) {
   DCHECK(segment->IsInitialized());
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   CHECK_EQ(state_, kLogReaderReading);
   if (!segments_.empty()) {
     CHECK_EQ(segments_.back()->header().sequence_number() + 1,
@@ -479,12 +479,12 @@ Status LogReader::AppendEmptySegment(const scoped_refptr<ReadableLogSegment>& se
 }
 
 const int LogReader::num_segments() const {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   return segments_.size();
 }
 
 string LogReader::ToString() const {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   string ret = "Reader's SegmentSequence: \n";
   for (const SegmentSequence::value_type& entry : segments_) {
     ret.append(Substitute("Segment: $0 Footer: $1\n",

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/mt-log-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/mt-log-test.cc b/src/kudu/consensus/mt-log-test.cc
index 94921e7..7f590e2 100644
--- a/src/kudu/consensus/mt-log-test.cc
+++ b/src/kudu/consensus/mt-log-test.cc
@@ -17,11 +17,9 @@
 
 #include "kudu/consensus/log-test-base.h"
 
-#include <boost/thread/locks.hpp>
-#include <boost/thread/mutex.hpp>
-
 #include <algorithm>
 #include <memory>
+#include <mutex>
 #include <vector>
 
 #include "kudu/consensus/log_index.h"
@@ -93,7 +91,7 @@ class MultiThreadedLogTest : public LogTestBase {
       DVLOG(1) << num_ops << " ops in this batch";
       num_ops =  std::max(num_ops, 1);
       {
-        boost::lock_guard<simple_spinlock> lock_guard(lock_);
+        std::lock_guard<simple_spinlock> lock_guard(lock_);
         for (int j = 0; j < num_ops; j++) {
           ReplicateRefPtr replicate = make_scoped_refptr_replicate(new ReplicateMsg);
           int32_t index = current_index_++;

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/peer_manager.cc
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/peer_manager.cc b/src/kudu/consensus/peer_manager.cc
index 4acbac7..cb7df2f 100644
--- a/src/kudu/consensus/peer_manager.cc
+++ b/src/kudu/consensus/peer_manager.cc
@@ -17,6 +17,8 @@
 
 #include "kudu/consensus/peer_manager.h"
 
+#include <mutex>
+
 #include "kudu/consensus/consensus_peers.h"
 #include "kudu/consensus/log.h"
 #include "kudu/gutil/map-util.h"
@@ -53,7 +55,7 @@ Status PeerManager::UpdateRaftConfig(const RaftConfigPB& config) {
 
   VLOG(1) << "Updating peers from new config: " << config.ShortDebugString();
 
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   // Create new peers
   for (const RaftPeerPB& peer_pb : config.peers()) {
     new_peers.insert(peer_pb.permanent_uuid());
@@ -85,7 +87,7 @@ Status PeerManager::UpdateRaftConfig(const RaftConfigPB& config) {
 }
 
 void PeerManager::SignalRequest(bool force_if_queue_empty) {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   auto iter = peers_.begin();
   for (; iter != peers_.end(); iter++) {
     Status s = (*iter).second->SignalRequest(force_if_queue_empty);
@@ -100,7 +102,7 @@ void PeerManager::SignalRequest(bool force_if_queue_empty) {
 
 void PeerManager::Close() {
   {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     for (const PeersMap::value_type& entry : peers_) {
       entry.second->Close();
     }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/raft_consensus.cc
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/raft_consensus.cc b/src/kudu/consensus/raft_consensus.cc
index b23087a..710075a 100644
--- a/src/kudu/consensus/raft_consensus.cc
+++ b/src/kudu/consensus/raft_consensus.cc
@@ -21,6 +21,7 @@
 #include <boost/optional.hpp>
 #include <gflags/gflags.h>
 #include <iostream>
+#include <mutex>
 
 #include "kudu/common/wire_protocol.h"
 #include "kudu/consensus/consensus.pb.h"
@@ -499,7 +500,7 @@ Status RaftConsensus::Replicate(const scoped_refptr<ConsensusRound>& round) {
 
   RETURN_NOT_OK(ExecuteHook(PRE_REPLICATE));
 
-  boost::lock_guard<simple_spinlock> lock(update_lock_);
+  std::lock_guard<simple_spinlock> lock(update_lock_);
   {
     ReplicaState::UniqueLock lock;
     RETURN_NOT_OK(state_->LockForReplicate(&lock, *round->replicate_msg()));
@@ -669,7 +670,7 @@ Status RaftConsensus::Update(const ConsensusRequestPB* request,
   VLOG_WITH_PREFIX(2) << "Replica received request: " << request->ShortDebugString();
 
   // see var declaration
-  boost::lock_guard<simple_spinlock> lock(update_lock_);
+  std::lock_guard<simple_spinlock> lock(update_lock_);
   Status s = UpdateReplica(request, response);
   if (PREDICT_FALSE(VLOG_IS_ON(1))) {
     if (request->ops_size() == 0) {
@@ -1267,7 +1268,7 @@ Status RaftConsensus::RequestVote(const VoteRequestPB* request, VoteResponsePB*
   // We must acquire the update lock in order to ensure that this vote action
   // takes place between requests.
   // Lock ordering: The update lock must be acquired before the ReplicaState lock.
-  boost::unique_lock<simple_spinlock> update_guard(update_lock_, boost::defer_lock);
+  std::unique_lock<simple_spinlock> update_guard(update_lock_, std::defer_lock);
   if (FLAGS_enable_leader_failure_detection) {
     update_guard.try_lock();
   } else {

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/consensus/raft_consensus.h
----------------------------------------------------------------------
diff --git a/src/kudu/consensus/raft_consensus.h b/src/kudu/consensus/raft_consensus.h
index 6f5e377..2864ff3 100644
--- a/src/kudu/consensus/raft_consensus.h
+++ b/src/kudu/consensus/raft_consensus.h
@@ -18,8 +18,9 @@
 #ifndef KUDU_CONSENSUS_RAFT_CONSENSUS_H_
 #define KUDU_CONSENSUS_RAFT_CONSENSUS_H_
 
-#include <boost/thread/locks.hpp>
+#include <boost/optional/optional_fwd.hpp>
 #include <memory>
+#include <mutex>
 #include <string>
 #include <utility>
 #include <vector>
@@ -33,7 +34,7 @@
 
 namespace kudu {
 
-typedef boost::lock_guard<simple_spinlock> Lock;
+typedef std::lock_guard<simple_spinlock> Lock;
 typedef gscoped_ptr<Lock> ScopedLock;
 
 class Counter;

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/experiments/rwlock-perf.cc
----------------------------------------------------------------------
diff --git a/src/kudu/experiments/rwlock-perf.cc b/src/kudu/experiments/rwlock-perf.cc
index 175aaa3..a559c6b 100644
--- a/src/kudu/experiments/rwlock-perf.cc
+++ b/src/kudu/experiments/rwlock-perf.cc
@@ -16,10 +16,10 @@
 // under the License.
 
 #include <boost/smart_ptr/detail/spinlock.hpp>
-#include <boost/thread/mutex.hpp>
 #include <boost/thread/shared_mutex.hpp>
 #include <gflags/gflags.h>
 #include <glog/logging.h>
+#include <mutex>
 #include <stdio.h>
 #include <thread>
 #include <unistd.h>
@@ -82,7 +82,7 @@ struct shared_data {
 
   kudu::rw_spinlock rw_spinlock;
   boost::shared_mutex rwlock;
-  boost::mutex lock;
+  std::mutex lock;
   kudu::percpu_rwlock per_cpu;
 };
 
@@ -191,7 +191,7 @@ void test_shared_lock(int num_threads, TestMethod method, const char *name) {
         threads.emplace_back(shared_mutex_entry, &shared);
         break;
       case OWN_MUTEX:
-        threads.emplace_back(own_mutex_entry<boost::mutex>);
+        threads.emplace_back(own_mutex_entry<std::mutex>);
         break;
       case OWN_SPINLOCK:
         threads.emplace_back(own_mutex_entry<my_spinlock>);

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/master/catalog_manager.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/catalog_manager.cc b/src/kudu/master/catalog_manager.cc
index aac64d8..30f0cc2 100644
--- a/src/kudu/master/catalog_manager.cc
+++ b/src/kudu/master/catalog_manager.cc
@@ -41,14 +41,13 @@
 
 #include "kudu/master/catalog_manager.h"
 
+#include <algorithm>
 #include <boost/optional.hpp>
-#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/locks.hpp>
-#include <boost/thread/mutex.hpp>
+#include <boost/thread/shared_mutex.hpp>
+#include <condition_variable>
 #include <glog/logging.h>
-
-#include <algorithm>
 #include <memory>
+#include <mutex>
 #include <set>
 #include <string>
 #include <utility>
@@ -548,7 +547,7 @@ CatalogManager::~CatalogManager() {
 
 Status CatalogManager::Init(bool is_first_run) {
   {
-    boost::lock_guard<simple_spinlock> l(state_lock_);
+    std::lock_guard<simple_spinlock> l(state_lock_);
     CHECK_EQ(kConstructed, state_);
     state_ = kStarting;
   }
@@ -564,13 +563,13 @@ Status CatalogManager::Init(bool is_first_run) {
   RETURN_NOT_OK_PREPEND(sys_catalog_->WaitUntilRunning(),
                         "Failed waiting for the catalog tablet to run");
 
-  boost::lock_guard<LockType> l(lock_);
+  std::lock_guard<LockType> l(lock_);
   background_tasks_.reset(new CatalogManagerBgTasks(this));
   RETURN_NOT_OK_PREPEND(background_tasks_->Init(),
                         "Failed to initialize catalog manager background tasks");
 
   {
-    boost::lock_guard<simple_spinlock> l(state_lock_);
+    std::lock_guard<simple_spinlock> l(state_lock_);
     CHECK_EQ(kStarting, state_);
     state_ = kRunning;
   }
@@ -579,7 +578,7 @@ Status CatalogManager::Init(bool is_first_run) {
 }
 
 Status CatalogManager::ElectedAsLeaderCb() {
-  boost::lock_guard<simple_spinlock> l(state_lock_);
+  std::lock_guard<simple_spinlock> l(state_lock_);
   return worker_pool_->SubmitClosure(
       Bind(&CatalogManager::VisitTablesAndTabletsTask, Unretained(this)));
 }
@@ -616,7 +615,7 @@ void CatalogManager::VisitTablesAndTabletsTask() {
   }
 
   {
-    boost::lock_guard<LockType> lock(lock_);
+    std::lock_guard<LockType> lock(lock_);
     int64_t term_after_wait = consensus->ConsensusState(CONSENSUS_CONFIG_COMMITTED).current_term();
     if (term_after_wait != term) {
       // If we got elected leader again while waiting to catch up then we will
@@ -631,7 +630,7 @@ void CatalogManager::VisitTablesAndTabletsTask() {
       CHECK_OK(VisitTablesAndTabletsUnlocked());
     }
   }
-  boost::lock_guard<simple_spinlock> l(state_lock_);
+  std::lock_guard<simple_spinlock> l(state_lock_);
   leader_ready_term_ = term;
 }
 
@@ -654,7 +653,7 @@ Status CatalogManager::VisitTablesAndTabletsUnlocked() {
 }
 
 Status CatalogManager::InitSysCatalogAsync(bool is_first_run) {
-  boost::lock_guard<LockType> l(lock_);
+  std::lock_guard<LockType> l(lock_);
   sys_catalog_.reset(new SysCatalogTable(master_,
                                          master_->metric_registry(),
                                          Bind(&CatalogManager::ElectedAsLeaderCb,
@@ -668,12 +667,12 @@ Status CatalogManager::InitSysCatalogAsync(bool is_first_run) {
 }
 
 bool CatalogManager::IsInitialized() const {
-  boost::lock_guard<simple_spinlock> l(state_lock_);
+  std::lock_guard<simple_spinlock> l(state_lock_);
   return state_ == kRunning;
 }
 
 Status CatalogManager::CheckIsLeaderAndReady() const {
-  boost::lock_guard<simple_spinlock> l(state_lock_);
+  std::lock_guard<simple_spinlock> l(state_lock_);
   if (PREDICT_FALSE(state_ != kRunning)) {
     return Status::ServiceUnavailable(
         Substitute("Catalog manager is shutting down. State: $0", state_));
@@ -699,7 +698,7 @@ RaftPeerPB::Role CatalogManager::Role() const {
 
 void CatalogManager::Shutdown() {
   {
-    boost::lock_guard<simple_spinlock> l(state_lock_);
+    std::lock_guard<simple_spinlock> l(state_lock_);
     if (state_ == kClosing) {
       VLOG(2) << "CatalogManager already shut down";
       return;
@@ -720,7 +719,7 @@ void CatalogManager::Shutdown() {
   // any new tasks for those entries.
   TableInfoMap copy;
   {
-    boost::lock_guard<simple_spinlock> l(state_lock_);
+    std::lock_guard<simple_spinlock> l(state_lock_);
     copy = table_ids_map_;
   }
   for (const TableInfoMap::value_type &e : copy) {
@@ -866,7 +865,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
 
   scoped_refptr<TableInfo> table;
   {
-    boost::lock_guard<LockType> l(lock_);
+    std::lock_guard<LockType> l(lock_);
     TRACE("Acquired catalog manager lock");
 
     // b. Verify that the table does not exist.
@@ -887,7 +886,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
 
   // Ensure that if we return, we mark this table as no longer being created.
   auto cleanup = MakeScopedCleanup([&] () {
-    boost::lock_guard<LockType> l(lock_);
+    std::lock_guard<LockType> l(lock_);
     CHECK_EQ(1, tables_being_created_.erase(req.name()));
   });
 
@@ -939,7 +938,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
 
   // g. Make the new table and tablets visible in the catalog.
   {
-    boost::lock_guard<LockType> l(lock_);
+    std::lock_guard<LockType> l(lock_);
 
     table_ids_map_[table->id()] = table;
     table_names_map_[req.name()] = table;
@@ -1091,7 +1090,7 @@ Status CatalogManager::DeleteTable(const DeleteTableRequestPB* req,
     // 4. Remove the table from the by-name map.
     {
       TRACE("Removing table from by-name map");
-      boost::lock_guard<LockType> l_map(lock_);
+      std::lock_guard<LockType> l_map(lock_);
       if (table_names_map_.erase(l.data().name()) != 1) {
         PANIC_RPC(rpc, "Could not remove table from map, name=" + l.data().name());
       }
@@ -1248,7 +1247,7 @@ Status CatalogManager::AlterTable(const AlterTableRequestPB* req,
 
   // 3. Try to acquire the new table name
   if (req->has_new_table_name()) {
-    boost::lock_guard<LockType> catalog_lock(lock_);
+    std::lock_guard<LockType> catalog_lock(lock_);
 
     TRACE("Acquired catalog manager lock");
 
@@ -1297,7 +1296,7 @@ Status CatalogManager::AlterTable(const AlterTableRequestPB* req,
                    s.ToString()));
     LOG(WARNING) << s.ToString();
     if (req->has_new_table_name()) {
-      boost::lock_guard<LockType> catalog_lock(lock_);
+      std::lock_guard<LockType> catalog_lock(lock_);
       CHECK_EQ(table_names_map_.erase(req->new_table_name()), 1);
     }
     CheckIfNoLongerLeaderAndSetupError(s, resp);
@@ -1307,7 +1306,7 @@ Status CatalogManager::AlterTable(const AlterTableRequestPB* req,
   // 6. Remove the old name
   if (req->has_new_table_name()) {
     TRACE("Removing old-name $0 from by-name map", table_name);
-    boost::lock_guard<LockType> l_map(lock_);
+    std::lock_guard<LockType> l_map(lock_);
     if (table_names_map_.erase(table_name) != 1) {
       PANIC_RPC(rpc, "Could not remove table from map, name=" + l.data().name());
     }
@@ -2814,7 +2813,7 @@ Status CatalogManager::ProcessPendingAssignments(
   unlocker_out.Commit();
   unlocker_in.Commit();
   {
-    boost::lock_guard<LockType> l(lock_);
+    std::lock_guard<LockType> l(lock_);
     for (const auto& new_tablet : unlocker_out) {
       new_tablet->table()->AddTablet(new_tablet.get());
       tablet_map_[new_tablet->tablet_id()] = new_tablet;
@@ -3204,33 +3203,33 @@ TabletInfo::~TabletInfo() {
 }
 
 void TabletInfo::SetReplicaLocations(const ReplicaMap& replica_locations) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   last_update_time_ = MonoTime::Now(MonoTime::FINE);
   replica_locations_ = replica_locations;
 }
 
 void TabletInfo::GetReplicaLocations(ReplicaMap* replica_locations) const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   *replica_locations = replica_locations_;
 }
 
 bool TabletInfo::AddToReplicaLocations(const TabletReplica& replica) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   return InsertIfNotPresent(&replica_locations_, replica.ts_desc->permanent_uuid(), replica);
 }
 
 void TabletInfo::set_last_update_time(const MonoTime& ts) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   last_update_time_ = ts;
 }
 
 MonoTime TabletInfo::last_update_time() const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   return last_update_time_;
 }
 
 bool TabletInfo::set_reported_schema_version(uint32_t version) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   if (version > reported_schema_version_) {
     reported_schema_version_ = version;
     return true;
@@ -3239,7 +3238,7 @@ bool TabletInfo::set_reported_schema_version(uint32_t version) {
 }
 
 uint32_t TabletInfo::reported_schema_version() const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   return reported_schema_version_;
 }
 
@@ -3268,17 +3267,17 @@ std::string TableInfo::ToString() const {
 }
 
 bool TableInfo::RemoveTablet(const std::string& partition_key_start) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   return EraseKeyReturnValuePtr(&tablet_map_, partition_key_start) != NULL;
 }
 
 void TableInfo::AddTablet(TabletInfo *tablet) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   AddTabletUnlocked(tablet);
 }
 
 void TableInfo::AddTablets(const vector<TabletInfo*>& tablets) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   for (TabletInfo *tablet : tablets) {
     AddTabletUnlocked(tablet);
   }
@@ -3298,7 +3297,7 @@ void TableInfo::AddTabletUnlocked(TabletInfo* tablet) {
 
 void TableInfo::GetTabletsInRange(const GetTableLocationsRequestPB* req,
                                   vector<scoped_refptr<TabletInfo> > *ret) const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   int max_returned_locations = req->max_returned_locations();
 
   TableInfo::TabletInfoMap::const_iterator it, it_end;
@@ -3325,7 +3324,7 @@ void TableInfo::GetTabletsInRange(const GetTableLocationsRequestPB* req,
 }
 
 bool TableInfo::IsAlterInProgress(uint32_t version) const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   for (const TableInfo::TabletInfoMap::value_type& e : tablet_map_) {
     if (e.second->reported_schema_version() < version) {
       VLOG(3) << "Table " << table_id_ << " ALTER in progress due to tablet "
@@ -3338,7 +3337,7 @@ bool TableInfo::IsAlterInProgress(uint32_t version) const {
 }
 
 bool TableInfo::IsCreateInProgress() const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   for (const TableInfo::TabletInfoMap::value_type& e : tablet_map_) {
     TabletMetadataLock tablet_lock(e.second, TabletMetadataLock::READ);
     if (!tablet_lock.data().is_running()) {
@@ -3349,19 +3348,19 @@ bool TableInfo::IsCreateInProgress() const {
 }
 
 void TableInfo::AddTask(MonitoredTask* task) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   task->AddRef();
   pending_tasks_.insert(task);
 }
 
 void TableInfo::RemoveTask(MonitoredTask* task) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   pending_tasks_.erase(task);
   task->Release();
 }
 
 void TableInfo::AbortTasks() {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   for (MonitoredTask* task : pending_tasks_) {
     task->Abort();
   }
@@ -3371,7 +3370,7 @@ void TableInfo::WaitTasksCompletion() {
   int wait_time = 5;
   while (1) {
     {
-      boost::lock_guard<simple_spinlock> l(lock_);
+      std::lock_guard<simple_spinlock> l(lock_);
       if (pending_tasks_.empty()) {
         break;
       }
@@ -3382,7 +3381,7 @@ void TableInfo::WaitTasksCompletion() {
 }
 
 void TableInfo::GetTaskList(std::vector<scoped_refptr<MonitoredTask> > *ret) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   for (MonitoredTask* task : pending_tasks_) {
     ret->push_back(make_scoped_refptr(task));
   }
@@ -3390,7 +3389,7 @@ void TableInfo::GetTaskList(std::vector<scoped_refptr<MonitoredTask> > *ret) {
 
 void TableInfo::GetAllTablets(vector<scoped_refptr<TabletInfo> > *ret) const {
   ret->clear();
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   for (const TableInfo::TabletInfoMap::value_type& e : tablet_map_) {
     ret->push_back(make_scoped_refptr(e.second));
   }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/master/catalog_manager.h
----------------------------------------------------------------------
diff --git a/src/kudu/master/catalog_manager.h b/src/kudu/master/catalog_manager.h
index a4e07a3..daf1feb 100644
--- a/src/kudu/master/catalog_manager.h
+++ b/src/kudu/master/catalog_manager.h
@@ -18,7 +18,6 @@
 #define KUDU_MASTER_CATALOG_MANAGER_H
 
 #include <boost/optional/optional_fwd.hpp>
-#include <boost/thread/mutex.hpp>
 #include <map>
 #include <set>
 #include <string>

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/master/ts_descriptor.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/ts_descriptor.cc b/src/kudu/master/ts_descriptor.cc
index 7dbf5cc..1e5434b 100644
--- a/src/kudu/master/ts_descriptor.cc
+++ b/src/kudu/master/ts_descriptor.cc
@@ -15,20 +15,19 @@
 // specific language governing permissions and limitations
 // under the License.
 
+#include "kudu/master/ts_descriptor.h"
+
+#include <math.h>
+#include <mutex>
+#include <vector>
+
 #include "kudu/common/wire_protocol.h"
 #include "kudu/consensus/consensus.proxy.h"
 #include "kudu/gutil/strings/substitute.h"
-#include "kudu/master/ts_descriptor.h"
 #include "kudu/master/master.pb.h"
 #include "kudu/tserver/tserver_admin.proxy.h"
 #include "kudu/util/net/net_util.h"
 
-#include <boost/thread/locks.hpp>
-#include <boost/thread/mutex.hpp>
-
-#include <math.h>
-#include <vector>
-
 using std::shared_ptr;
 
 namespace kudu {
@@ -58,7 +57,7 @@ TSDescriptor::~TSDescriptor() {
 
 Status TSDescriptor::Register(const NodeInstancePB& instance,
                               const TSRegistrationPB& registration) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   CHECK_EQ(instance.permanent_uuid(), permanent_uuid_);
 
   // TODO(KUDU-418): we don't currently support changing IPs or hosts since the
@@ -99,28 +98,28 @@ Status TSDescriptor::Register(const NodeInstancePB& instance,
 }
 
 void TSDescriptor::UpdateHeartbeatTime() {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   last_heartbeat_ = MonoTime::Now(MonoTime::FINE);
 }
 
 MonoDelta TSDescriptor::TimeSinceHeartbeat() const {
   MonoTime now(MonoTime::Now(MonoTime::FINE));
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   return now.GetDeltaSince(last_heartbeat_);
 }
 
 int64_t TSDescriptor::latest_seqno() const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   return latest_seqno_;
 }
 
 bool TSDescriptor::has_tablet_report() const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   return has_tablet_report_;
 }
 
 void TSDescriptor::set_has_tablet_report(bool has_report) {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   has_tablet_report_ = has_report;
 }
 
@@ -148,19 +147,19 @@ void TSDescriptor::IncrementRecentReplicaCreations() {
 }
 
 double TSDescriptor::RecentReplicaCreations() {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   DecayRecentReplicaCreationsUnlocked();
   return recent_replica_creations_;
 }
 
 void TSDescriptor::GetRegistration(TSRegistrationPB* reg) const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   CHECK(registration_) << "No registration";
   CHECK_NOTNULL(reg)->CopyFrom(*registration_);
 }
 
 void TSDescriptor::GetNodeInstancePB(NodeInstancePB* instance_pb) const {
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   instance_pb->set_permanent_uuid(permanent_uuid_);
   instance_pb->set_instance_seqno(latest_seqno_);
 }
@@ -168,7 +167,7 @@ void TSDescriptor::GetNodeInstancePB(NodeInstancePB* instance_pb) const {
 Status TSDescriptor::ResolveSockaddr(Sockaddr* addr) const {
   vector<HostPort> hostports;
   {
-    boost::lock_guard<simple_spinlock> l(lock_);
+    std::lock_guard<simple_spinlock> l(lock_);
     for (const HostPortPB& addr : registration_->rpc_addresses()) {
       hostports.push_back(HostPort(addr.host(), addr.port()));
     }
@@ -201,7 +200,7 @@ Status TSDescriptor::ResolveSockaddr(Sockaddr* addr) const {
 Status TSDescriptor::GetTSAdminProxy(const shared_ptr<rpc::Messenger>& messenger,
                                      shared_ptr<tserver::TabletServerAdminServiceProxy>* proxy) {
   {
-    boost::lock_guard<simple_spinlock> l(lock_);
+    std::lock_guard<simple_spinlock> l(lock_);
     if (ts_admin_proxy_) {
       *proxy = ts_admin_proxy_;
       return Status::OK();
@@ -211,7 +210,7 @@ Status TSDescriptor::GetTSAdminProxy(const shared_ptr<rpc::Messenger>& messenger
   Sockaddr addr;
   RETURN_NOT_OK(ResolveSockaddr(&addr));
 
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   if (!ts_admin_proxy_) {
     ts_admin_proxy_.reset(new tserver::TabletServerAdminServiceProxy(messenger, addr));
   }
@@ -222,7 +221,7 @@ Status TSDescriptor::GetTSAdminProxy(const shared_ptr<rpc::Messenger>& messenger
 Status TSDescriptor::GetConsensusProxy(const shared_ptr<rpc::Messenger>& messenger,
                                        shared_ptr<consensus::ConsensusServiceProxy>* proxy) {
   {
-    boost::lock_guard<simple_spinlock> l(lock_);
+    std::lock_guard<simple_spinlock> l(lock_);
     if (consensus_proxy_) {
       *proxy = consensus_proxy_;
       return Status::OK();
@@ -232,7 +231,7 @@ Status TSDescriptor::GetConsensusProxy(const shared_ptr<rpc::Messenger>& messeng
   Sockaddr addr;
   RETURN_NOT_OK(ResolveSockaddr(&addr));
 
-  boost::lock_guard<simple_spinlock> l(lock_);
+  std::lock_guard<simple_spinlock> l(lock_);
   if (!consensus_proxy_) {
     consensus_proxy_.reset(new consensus::ConsensusServiceProxy(messenger, addr));
   }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/master/ts_manager.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/ts_manager.cc b/src/kudu/master/ts_manager.cc
index 1b5e140..ee8761f 100644
--- a/src/kudu/master/ts_manager.cc
+++ b/src/kudu/master/ts_manager.cc
@@ -17,8 +17,8 @@
 
 #include "kudu/master/ts_manager.h"
 
-#include <boost/thread/locks.hpp>
-#include <boost/thread/mutex.hpp>
+#include <boost/thread/shared_mutex.hpp>
+#include <mutex>
 #include <vector>
 
 #include "kudu/gutil/map-util.h"
@@ -72,7 +72,7 @@ bool TSManager::LookupTSByUUID(const string& uuid,
 Status TSManager::RegisterTS(const NodeInstancePB& instance,
                              const TSRegistrationPB& registration,
                              std::shared_ptr<TSDescriptor>* desc) {
-  boost::lock_guard<rw_spinlock> l(lock_);
+  std::lock_guard<rw_spinlock> l(lock_);
   const string& uuid = instance.permanent_uuid();
 
   if (!ContainsKey(servers_by_id_, uuid)) {

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/server/hybrid_clock.cc
----------------------------------------------------------------------
diff --git a/src/kudu/server/hybrid_clock.cc b/src/kudu/server/hybrid_clock.cc
index 937964f..cc3b611 100644
--- a/src/kudu/server/hybrid_clock.cc
+++ b/src/kudu/server/hybrid_clock.cc
@@ -15,11 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 
+#include "kudu/server/hybrid_clock.h"
+
 #include <algorithm>
-#include <boost/thread/locks.hpp>
 #include <glog/logging.h>
-
-#include "kudu/server/hybrid_clock.h"
+#include <mutex>
 
 #include "kudu/gutil/bind.h"
 #include "kudu/gutil/strings/substitute.h"
@@ -189,7 +189,7 @@ Timestamp HybridClock::Now() {
   Timestamp now;
   uint64_t error;
 
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   NowWithError(&now, &error);
   return now;
 }
@@ -199,7 +199,7 @@ Timestamp HybridClock::NowLatest() {
   uint64_t error;
 
   {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     NowWithError(&now, &error);
   }
 
@@ -271,7 +271,7 @@ void HybridClock::NowWithError(Timestamp* timestamp, uint64_t* max_error_usec) {
 }
 
 Status HybridClock::Update(const Timestamp& to_update) {
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   Timestamp now;
   uint64_t error_ignored;
   NowWithError(&now, &error_ignored);
@@ -306,7 +306,7 @@ Status HybridClock::WaitUntilAfter(const Timestamp& then_latest,
   Timestamp now;
   uint64_t error;
   {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     NowWithError(&now, &error);
   }
 
@@ -350,7 +350,7 @@ Status HybridClock::WaitUntilAfter(const Timestamp& then_latest,
     Timestamp now;
     uint64_t error;
     {
-      boost::lock_guard<simple_spinlock> lock(lock_);
+      std::lock_guard<simple_spinlock> lock(lock_);
       NowWithError(&now, &error);
     }
     if (now.CompareTo(then) > 0) {
@@ -372,7 +372,7 @@ bool HybridClock::IsAfter(Timestamp t) {
 
   Timestamp now;
   {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     now = Timestamp(std::max(next_timestamp_, now_usec << kBitsToShift));
   }
   return t.value() < now.value();
@@ -410,14 +410,14 @@ kudu::Status HybridClock::WalltimeWithError(uint64_t* now_usec, uint64_t* error_
 
 void HybridClock::SetMockClockWallTimeForTests(uint64_t now_usec) {
   CHECK(FLAGS_use_mock_wall_clock);
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   CHECK_GE(now_usec, mock_clock_time_usec_);
   mock_clock_time_usec_ = now_usec;
 }
 
 void HybridClock::SetMockMaxClockErrorForTests(uint64_t max_error_usec) {
   CHECK(FLAGS_use_mock_wall_clock);
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   mock_clock_max_error_usec_ = max_error_usec;
 }
 
@@ -431,7 +431,7 @@ uint64_t HybridClock::ErrorForMetrics() {
   Timestamp now;
   uint64_t error;
 
-  boost::lock_guard<simple_spinlock> lock(lock_);
+  std::lock_guard<simple_spinlock> lock(lock_);
   NowWithError(&now, &error);
   return error;
 }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/server/webserver.cc
----------------------------------------------------------------------
diff --git a/src/kudu/server/webserver.cc b/src/kudu/server/webserver.cc
index 539bed5..d685bc6 100644
--- a/src/kudu/server/webserver.cc
+++ b/src/kudu/server/webserver.cc
@@ -17,18 +17,20 @@
 #include "kudu/server/webserver.h"
 
 #include <algorithm>
-#include <stdio.h>
-#include <signal.h>
-#include <string>
-#include <map>
-#include <vector>
-#include <boost/lexical_cast.hpp>
+#include <boost/algorithm/string.hpp>
 #include <boost/bind.hpp>
+#include <boost/lexical_cast.hpp>
 #include <boost/mem_fn.hpp>
-#include <boost/algorithm/string.hpp>
+#include <boost/thread/shared_mutex.hpp>
 #include <gflags/gflags.h>
 #include <glog/logging.h>
+#include <map>
+#include <mutex>
+#include <signal.h>
 #include <squeasel.h>
+#include <stdio.h>
+#include <string>
+#include <vector>
 
 #include "kudu/gutil/map-util.h"
 #include "kudu/gutil/stl_util.h"
@@ -365,7 +367,7 @@ int Webserver::RunPathHandler(const PathHandler& handler,
 
 void Webserver::RegisterPathHandler(const string& path, const string& alias,
     const PathHandlerCallback& callback, bool is_styled, bool is_on_nav_bar) {
-  boost::lock_guard<boost::shared_mutex> lock(lock_);
+  std::lock_guard<boost::shared_mutex> lock(lock_);
   auto it = path_handlers_.find(path);
   if (it == path_handlers_.end()) {
     it = path_handlers_.insert(
@@ -423,7 +425,7 @@ bool Webserver::static_pages_available() const {
 }
 
 void Webserver::set_footer_html(const std::string& html) {
-  boost::lock_guard<boost::shared_mutex> l(lock_);
+  std::lock_guard<boost::shared_mutex> l(lock_);
   footer_html_ = html;
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/server/webserver.h
----------------------------------------------------------------------
diff --git a/src/kudu/server/webserver.h b/src/kudu/server/webserver.h
index 72ce092..6f19f1a 100644
--- a/src/kudu/server/webserver.h
+++ b/src/kudu/server/webserver.h
@@ -17,11 +17,10 @@
 #ifndef KUDU_UTIL_WEBSERVER_H
 #define KUDU_UTIL_WEBSERVER_H
 
+#include <boost/thread/shared_mutex.hpp>
 #include <map>
 #include <string>
 #include <vector>
-#include <boost/function.hpp>
-#include <boost/thread/shared_mutex.hpp>
 
 #include "kudu/server/webserver_options.h"
 #include "kudu/util/net/sockaddr.h"

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/tablet/compaction.h
----------------------------------------------------------------------
diff --git a/src/kudu/tablet/compaction.h b/src/kudu/tablet/compaction.h
index 04b55b0..9258ea0 100644
--- a/src/kudu/tablet/compaction.h
+++ b/src/kudu/tablet/compaction.h
@@ -18,6 +18,7 @@
 #define KUDU_TABLET_COMPACTION_H
 
 #include <memory>
+#include <mutex>
 #include <string>
 #include <vector>
 
@@ -78,10 +79,10 @@ class CompactionInput {
 class RowSetsInCompaction {
  public:
   void AddRowSet(const std::shared_ptr<RowSet> &rowset,
-                 const std::shared_ptr<boost::mutex::scoped_try_lock> &lock) {
-    CHECK(lock->owns_lock());
+                 std::unique_lock<std::mutex> lock) {
+    CHECK(lock.owns_lock());
 
-    locks_.push_back(lock);
+    locks_.push_back(std::move(lock));
     rowsets_.push_back(rowset);
   }
 
@@ -104,10 +105,8 @@ class RowSetsInCompaction {
   }
 
  private:
-  typedef vector<std::shared_ptr<boost::mutex::scoped_try_lock> > LockVector;
-
   RowSetVector rowsets_;
-  LockVector locks_;
+  vector<std::unique_lock<std::mutex>> locks_;
 };
 
 // One row yielded by CompactionInput::PrepareBlock.

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/tablet/diskrowset.cc
----------------------------------------------------------------------
diff --git a/src/kudu/tablet/diskrowset.cc b/src/kudu/tablet/diskrowset.cc
index 549cfab..d00f601 100644
--- a/src/kudu/tablet/diskrowset.cc
+++ b/src/kudu/tablet/diskrowset.cc
@@ -16,8 +16,9 @@
 // under the License.
 
 #include <algorithm>
-#include <boost/thread/locks.hpp>
+#include <boost/thread/shared_mutex.hpp>
 #include <glog/logging.h>
+#include <mutex>
 #include <vector>
 
 #include "kudu/common/generic_iterators.h"
@@ -520,7 +521,7 @@ Status DiskRowSet::MajorCompactDeltaStores() {
 
 Status DiskRowSet::MajorCompactDeltaStoresWithColumnIds(const vector<ColumnId>& col_ids) {
   TRACE_EVENT0("tablet", "DiskRowSet::MajorCompactDeltaStores");
-  boost::lock_guard<Mutex> l(*delta_tracker()->compact_flush_lock());
+  std::lock_guard<Mutex> l(*delta_tracker()->compact_flush_lock());
 
   // TODO: do we need to lock schema or anything here?
   gscoped_ptr<MajorDeltaCompaction> compaction;
@@ -539,7 +540,7 @@ Status DiskRowSet::MajorCompactDeltaStoresWithColumnIds(const vector<ColumnId>&
   gscoped_ptr<CFileSet> new_base(new CFileSet(rowset_metadata_));
   RETURN_NOT_OK(new_base->Open());
   {
-    boost::lock_guard<percpu_rwlock> lock(component_lock_);
+    std::lock_guard<percpu_rwlock> lock(component_lock_);
     RETURN_NOT_OK(compaction->UpdateDeltaTracker(delta_tracker_.get()));
     base_data_.reset(new_base.release());
   }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/tablet/diskrowset.h
----------------------------------------------------------------------
diff --git a/src/kudu/tablet/diskrowset.h b/src/kudu/tablet/diskrowset.h
index f82eb11..d638336 100644
--- a/src/kudu/tablet/diskrowset.h
+++ b/src/kudu/tablet/diskrowset.h
@@ -22,9 +22,9 @@
 #ifndef KUDU_TABLET_DISKROWSET_H_
 #define KUDU_TABLET_DISKROWSET_H_
 
-#include <boost/thread/mutex.hpp>
 #include <gtest/gtest_prod.h>
 #include <memory>
+#include <mutex>
 #include <string>
 #include <vector>
 
@@ -348,7 +348,7 @@ class DiskRowSet : public RowSet {
   // Major compacts all the delta files for all the columns.
   Status MajorCompactDeltaStores();
 
-  boost::mutex *compact_flush_lock() OVERRIDE {
+  std::mutex *compact_flush_lock() OVERRIDE {
     return &compact_flush_lock_;
   }
 
@@ -402,7 +402,7 @@ class DiskRowSet : public RowSet {
 
   // Lock governing this rowset's inclusion in a compact/flush. If locked,
   // no other compactor will attempt to include this rowset.
-  boost::mutex compact_flush_lock_;
+  std::mutex compact_flush_lock_;
 
   DISALLOW_COPY_AND_ASSIGN(DiskRowSet);
 };

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/tablet/lock_manager-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/tablet/lock_manager-test.cc b/src/kudu/tablet/lock_manager-test.cc
index ea0edf7..eccb67b 100644
--- a/src/kudu/tablet/lock_manager-test.cc
+++ b/src/kudu/tablet/lock_manager-test.cc
@@ -16,11 +16,10 @@
 // under the License.
 
 #include <algorithm>
-#include <boost/thread/locks.hpp>
-#include <boost/thread/mutex.hpp>
 #include <glog/logging.h>
 #include <gtest/gtest.h>
 #include <memory>
+#include <mutex>
 #include <vector>
 
 #include "kudu/gutil/gscoped_ptr.h"
@@ -118,7 +117,7 @@ class LmTestResource {
   }
 
   void acquire(uint64_t tid) {
-    boost::unique_lock<boost::mutex> lock(lock_);
+    std::unique_lock<std::mutex> lock(lock_);
     CHECK(!is_owned_);
     CHECK_EQ(0, owner_);
     owner_ = tid;
@@ -126,7 +125,7 @@ class LmTestResource {
   }
 
   void release(uint64_t tid) {
-    boost::unique_lock<boost::mutex> lock(lock_);
+    std::unique_lock<std::mutex> lock(lock_);
     CHECK(is_owned_);
     CHECK_EQ(tid, owner_);
     owner_ = 0;
@@ -137,7 +136,7 @@ class LmTestResource {
   DISALLOW_COPY_AND_ASSIGN(LmTestResource);
 
   const Slice* id_;
-  boost::mutex lock_;
+  std::mutex lock_;
   uint64_t owner_;
   bool is_owned_;
 };

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/tablet/lock_manager.cc
----------------------------------------------------------------------
diff --git a/src/kudu/tablet/lock_manager.cc b/src/kudu/tablet/lock_manager.cc
index b0d657e..af48540 100644
--- a/src/kudu/tablet/lock_manager.cc
+++ b/src/kudu/tablet/lock_manager.cc
@@ -15,17 +15,18 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/thread/locks.hpp>
-#include <boost/thread/mutex.hpp>
+#include "kudu/tablet/lock_manager.h"
+
+#include <boost/thread/shared_mutex.hpp>
 #include <glog/logging.h>
-#include <string>
+#include <mutex>
 #include <semaphore.h>
+#include <string>
 
 #include "kudu/gutil/dynamic_annotations.h"
 #include "kudu/gutil/gscoped_ptr.h"
 #include "kudu/gutil/hash/city.h"
 #include "kudu/gutil/walltime.h"
-#include "kudu/tablet/lock_manager.h"
 #include "kudu/util/locks.h"
 #include "kudu/util/semaphore.h"
 #include "kudu/util/trace.h"
@@ -169,7 +170,7 @@ LockEntry *LockTable::GetLockEntry(const Slice& key) {
     boost::shared_lock<rw_spinlock> table_rdlock(lock_.get_lock());
     Bucket *bucket = FindBucket(new_entry->key_hash_);
     {
-      boost::lock_guard<simple_spinlock> bucket_lock(bucket->lock);
+      std::lock_guard<simple_spinlock> bucket_lock(bucket->lock);
       LockEntry **node = FindSlot(bucket, new_entry->key_, new_entry->key_hash_);
       old_entry = *node;
       if (old_entry != nullptr) {
@@ -188,7 +189,7 @@ LockEntry *LockTable::GetLockEntry(const Slice& key) {
   }
 
   if (base::subtle::NoBarrier_AtomicIncrement(&item_count_, 1) > size_) {
-    boost::unique_lock<percpu_rwlock> table_wrlock(lock_, boost::try_to_lock);
+    std::unique_lock<percpu_rwlock> table_wrlock(lock_, std::try_to_lock);
     // if we can't take the lock, means that someone else is resizing.
     // (The percpu_rwlock try_lock waits for readers to complete)
     if (table_wrlock.owns_lock()) {
@@ -202,10 +203,10 @@ LockEntry *LockTable::GetLockEntry(const Slice& key) {
 void LockTable::ReleaseLockEntry(LockEntry *entry) {
   bool removed = false;
   {
-    boost::lock_guard<rw_spinlock> table_rdlock(lock_.get_lock());
+    std::lock_guard<rw_spinlock> table_rdlock(lock_.get_lock());
     Bucket *bucket = FindBucket(entry->key_hash_);
     {
-      boost::lock_guard<simple_spinlock> bucket_lock(bucket->lock);
+      std::lock_guard<simple_spinlock> bucket_lock(bucket->lock);
       LockEntry **node = FindEntry(bucket, entry);
       if (node != nullptr) {
         // ASSUMPTION: There are few updates, so locking the same row at the same time is rare

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/tablet/memrowset.h
----------------------------------------------------------------------
diff --git a/src/kudu/tablet/memrowset.h b/src/kudu/tablet/memrowset.h
index 962a0e1..364e3df 100644
--- a/src/kudu/tablet/memrowset.h
+++ b/src/kudu/tablet/memrowset.h
@@ -17,8 +17,9 @@
 #ifndef KUDU_TABLET_MEMROWSET_H
 #define KUDU_TABLET_MEMROWSET_H
 
-#include <boost/optional.hpp>
+#include <boost/optional/optional_fwd.hpp>
 #include <memory>
+#include <mutex>
 #include <string>
 #include <vector>
 
@@ -227,7 +228,7 @@ class MemRowSet : public RowSet,
     return 0;
   }
 
-  boost::mutex *compact_flush_lock() OVERRIDE {
+  std::mutex *compact_flush_lock() OVERRIDE {
     return &compact_flush_lock_;
   }
 
@@ -360,7 +361,7 @@ class MemRowSet : public RowSet,
   volatile uint64_t debug_insert_count_;
   volatile uint64_t debug_update_count_;
 
-  boost::mutex compact_flush_lock_;
+  std::mutex compact_flush_lock_;
 
   Atomic32 has_logged_throttling_;
 

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/tablet/mock-rowsets.h
----------------------------------------------------------------------
diff --git a/src/kudu/tablet/mock-rowsets.h b/src/kudu/tablet/mock-rowsets.h
index bccdf67..cc7f0f4 100644
--- a/src/kudu/tablet/mock-rowsets.h
+++ b/src/kudu/tablet/mock-rowsets.h
@@ -18,6 +18,7 @@
 #define KUDU_TABLET_MOCK_ROWSETS_H
 
 #include <memory>
+#include <mutex>
 #include <string>
 #include <vector>
 
@@ -77,7 +78,7 @@ class MockRowSet : public RowSet {
     LOG(FATAL) << "Unimplemented";
     return 0;
   }
-  virtual boost::mutex *compact_flush_lock() OVERRIDE {
+  virtual std::mutex *compact_flush_lock() OVERRIDE {
     LOG(FATAL) << "Unimplemented";
     return NULL;
   }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/cfa9a99f/src/kudu/tablet/mvcc-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/tablet/mvcc-test.cc b/src/kudu/tablet/mvcc-test.cc
index d54a10a..7e166c5 100644
--- a/src/kudu/tablet/mvcc-test.cc
+++ b/src/kudu/tablet/mvcc-test.cc
@@ -15,9 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/thread/locks.hpp>
 #include <glog/logging.h>
 #include <gtest/gtest.h>
+#include <mutex>
 #include <thread>
 
 #include "kudu/server/hybrid_clock.h"
@@ -45,12 +45,12 @@ class MvccTest : public KuduTest {
     MvccSnapshot s;
     CHECK_OK(mgr->WaitForCleanSnapshotAtTimestamp(ts, &s, MonoTime::Max()));
     CHECK(s.is_clean()) << "verifying postcondition";
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     result_snapshot_.reset(new MvccSnapshot(s));
   }
 
   bool HasResultSnapshot() {
-    boost::lock_guard<simple_spinlock> lock(lock_);
+    std::lock_guard<simple_spinlock> lock(lock_);
     return result_snapshot_ != nullptr;
   }
 


Mime
View raw message