kudu-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From danburk...@apache.org
Subject [4/6] incubator-kudu git commit: Replace BOOST_FOREACH with c++11 range syntax
Date Fri, 15 Jan 2016 20:00:09 GMT
http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/cluster_itest_util.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/cluster_itest_util.cc b/src/kudu/integration-tests/cluster_itest_util.cc
index 48f88b4..2cbfa4f 100644
--- a/src/kudu/integration-tests/cluster_itest_util.cc
+++ b/src/kudu/integration-tests/cluster_itest_util.cc
@@ -17,7 +17,6 @@
 
 #include <algorithm>
 #include <boost/optional.hpp>
-#include <boost/foreach.hpp>
 #include <glog/stl_logging.h>
 #include <limits>
 
@@ -114,7 +113,7 @@ Status GetLastOpIdForEachReplica(const string& tablet_id,
   RpcController controller;
 
   op_ids->clear();
-  BOOST_FOREACH(TServerDetails* ts, replicas) {
+  for (TServerDetails* ts : replicas) {
     controller.Reset();
     controller.set_timeout(MonoDelta::FromSeconds(3));
     opid_resp.Clear();
@@ -159,7 +158,7 @@ Status WaitForServersToAgree(const MonoDelta& timeout,
       bool any_behind = false;
       bool any_disagree = false;
       int64_t cur_index = kInvalidOpIdIndex;
-      BOOST_FOREACH(const OpId& id, ids) {
+      for (const OpId& id : ids) {
         if (cur_index == kInvalidOpIdIndex) {
           cur_index = id.index();
         }
@@ -200,7 +199,7 @@ Status WaitUntilAllReplicasHaveOp(const int64_t log_index,
     Status s = GetLastOpIdForEachReplica(tablet_id, replicas, &op_ids);
     if (s.ok()) {
       bool any_behind = false;
-      BOOST_FOREACH(const OpId& op_id, op_ids) {
+      for (const OpId& op_id : op_ids) {
         if (op_id.index() < log_index) {
           any_behind = true;
           break;
@@ -217,7 +216,7 @@ Status WaitUntilAllReplicasHaveOp(const int64_t log_index,
     SleepFor(MonoDelta::FromMilliseconds(50));
   }
   string replicas_str;
-  BOOST_FOREACH(const TServerDetails* replica, replicas) {
+  for (const TServerDetails* replica : replicas) {
     if (!replicas_str.empty()) replicas_str += ", ";
     replicas_str += "{ " + replica->ToString() + " }";
   }
@@ -240,7 +239,7 @@ Status CreateTabletServerMap(MasterServiceProxy* master_proxy,
   }
 
   ts_map->clear();
-  BOOST_FOREACH(const ListTabletServersResponsePB::Entry& entry, resp.servers()) {
+  for (const ListTabletServersResponsePB::Entry& entry : resp.servers()) {
     HostPort host_port;
     RETURN_NOT_OK(HostPortFromPB(entry.registration().rpc_addresses(0), &host_port));
     vector<Sockaddr> addresses;
@@ -568,7 +567,7 @@ Status ListRunningTabletIds(const TServerDetails* ts,
   vector<ListTabletsResponsePB::StatusAndSchemaPB> tablets;
   RETURN_NOT_OK(ListTablets(ts, timeout, &tablets));
   tablet_ids->clear();
-  BOOST_FOREACH(const ListTabletsResponsePB::StatusAndSchemaPB& t, tablets) {
+  for (const ListTabletsResponsePB::StatusAndSchemaPB& t : tablets) {
     if (t.tablet_status().state() == tablet::RUNNING) {
       tablet_ids->push_back(t.tablet_status().tablet_id());
     }
@@ -628,7 +627,7 @@ Status WaitForNumVotersInConfigOnMaster(const shared_ptr<MasterServiceProxy>& ma
     s = GetTabletLocations(master_proxy, tablet_id, time_remaining, &tablet_locations);
     if (s.ok()) {
       num_voters_found = 0;
-      BOOST_FOREACH(const TabletLocationsPB::ReplicaPB& r, tablet_locations.replicas()) {
+      for (const TabletLocationsPB::ReplicaPB& r : tablet_locations.replicas()) {
         if (r.role() == RaftPeerPB::LEADER || r.role() == RaftPeerPB::FOLLOWER) num_voters_found++;
       }
       if (num_voters_found == num_voters) break;
@@ -681,7 +680,7 @@ Status WaitUntilTabletInState(TServerDetails* ts,
     s = ListTablets(ts, MonoDelta::FromSeconds(10), &tablets);
     if (s.ok()) {
       bool seen = false;
-      BOOST_FOREACH(const ListTabletsResponsePB::StatusAndSchemaPB& t, tablets) {
+      for (const ListTabletsResponsePB::StatusAndSchemaPB& t : tablets) {
         if (t.tablet_status().tablet_id() == tablet_id) {
           seen = true;
           last_state = t.tablet_status().state();

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/create-table-stress-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/create-table-stress-test.cc b/src/kudu/integration-tests/create-table-stress-test.cc
index 0a2ddf3..622aa0c 100644
--- a/src/kudu/integration-tests/create-table-stress-test.cc
+++ b/src/kudu/integration-tests/create-table-stress-test.cc
@@ -275,11 +275,11 @@ TEST_F(CreateTableStressTest, TestGetTableLocationsOptions) {
   LOG(INFO) << "========================================================";
   std::vector<scoped_refptr<master::TableInfo> > tables;
   cluster_->mini_master()->master()->catalog_manager()->GetAllTables(&tables);
-  BOOST_FOREACH(const scoped_refptr<master::TableInfo>& table_info, tables) {
+  for (const scoped_refptr<master::TableInfo>& table_info : tables) {
     LOG(INFO) << "Table: " << table_info->ToString();
     std::vector<scoped_refptr<master::TabletInfo> > tablets;
     table_info->GetAllTablets(&tablets);
-    BOOST_FOREACH(const scoped_refptr<master::TabletInfo>& tablet_info, tablets) {
+    for (const scoped_refptr<master::TabletInfo>& tablet_info : tablets) {
       master::TabletMetadataLock l_tablet(tablet_info.get(), master::TabletMetadataLock::READ);
       const master::SysTabletsEntryPB& metadata = tablet_info->metadata().state().pb;
       LOG(INFO) << "  Tablet: " << tablet_info->ToString()

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/delete_table-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/delete_table-test.cc b/src/kudu/integration-tests/delete_table-test.cc
index 5c11d55..4edbba8 100644
--- a/src/kudu/integration-tests/delete_table-test.cc
+++ b/src/kudu/integration-tests/delete_table-test.cc
@@ -15,7 +15,6 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/foreach.hpp>
 #include <boost/optional.hpp>
 #include <glog/stl_logging.h>
 #include <gtest/gtest.h>
@@ -952,7 +951,7 @@ TEST_P(DeleteTableTombstonedParamTest, TestTabletTombstone) {
   NO_FATALS(WaitForTabletTombstonedOnTS(kTsIndex, tablet_id, CMETA_EXPECTED));
 
   ASSERT_OK(itest::WaitForNumTabletsOnTS(ts, 2, timeout, &tablets));
-  BOOST_FOREACH(const ListTabletsResponsePB::StatusAndSchemaPB& t, tablets) {
+  for (const ListTabletsResponsePB::StatusAndSchemaPB& t : tablets) {
     if (t.tablet_status().tablet_id() == tablet_id) {
       ASSERT_EQ(tablet::SHUTDOWN, t.tablet_status().state());
       ASSERT_EQ(TABLET_DATA_TOMBSTONED, t.tablet_status().tablet_data_state())
@@ -978,7 +977,7 @@ TEST_P(DeleteTableTombstonedParamTest, TestTabletTombstone) {
   // just with their data state set as TOMBSTONED. They should also be listed
   // as NOT_STARTED because we restarted the server.
   ASSERT_OK(itest::WaitForNumTabletsOnTS(ts, 2, timeout, &tablets));
-  BOOST_FOREACH(const ListTabletsResponsePB::StatusAndSchemaPB& t, tablets) {
+  for (const ListTabletsResponsePB::StatusAndSchemaPB& t : tablets) {
     ASSERT_EQ(tablet::NOT_STARTED, t.tablet_status().state());
     ASSERT_EQ(TABLET_DATA_TOMBSTONED, t.tablet_status().tablet_data_state())
         << t.tablet_status().tablet_id() << " not tombstoned";
@@ -986,7 +985,7 @@ TEST_P(DeleteTableTombstonedParamTest, TestTabletTombstone) {
 
   // Finally, delete all tablets on the TS, and wait for all data to be gone.
   LOG(INFO) << "Deleting all tablets...";
-  BOOST_FOREACH(const ListTabletsResponsePB::StatusAndSchemaPB& tablet, tablets) {
+  for (const ListTabletsResponsePB::StatusAndSchemaPB& tablet : tablets) {
     string tablet_id = tablet.tablet_status().tablet_id();
     // We need retries here, since some of the tablets may still be
     // bootstrapping after being restarted above.

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/external_mini_cluster-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/external_mini_cluster-test.cc b/src/kudu/integration-tests/external_mini_cluster-test.cc
index ddc8230..cb3d37b 100644
--- a/src/kudu/integration-tests/external_mini_cluster-test.cc
+++ b/src/kudu/integration-tests/external_mini_cluster-test.cc
@@ -15,7 +15,6 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/foreach.hpp>
 #include <glog/logging.h>
 #include <gtest/gtest.h>
 #include <sys/types.h>

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/external_mini_cluster.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/external_mini_cluster.cc b/src/kudu/integration-tests/external_mini_cluster.cc
index 4ca4934..0af21c7 100644
--- a/src/kudu/integration-tests/external_mini_cluster.cc
+++ b/src/kudu/integration-tests/external_mini_cluster.cc
@@ -17,7 +17,6 @@
 
 #include "kudu/integration-tests/external_mini_cluster.h"
 
-#include <boost/foreach.hpp>
 #include <gtest/gtest.h>
 #include <memory>
 #include <rapidjson/document.h>
@@ -151,27 +150,27 @@ Status ExternalMiniCluster::Start() {
 
 void ExternalMiniCluster::Shutdown(NodeSelectionMode mode) {
   if (mode == ALL) {
-    BOOST_FOREACH(const scoped_refptr<ExternalMaster>& master, masters_) {
+    for (const scoped_refptr<ExternalMaster>& master : masters_) {
       if (master) {
         master->Shutdown();
       }
     }
   }
 
-  BOOST_FOREACH(const scoped_refptr<ExternalTabletServer>& ts, tablet_servers_) {
+  for (const scoped_refptr<ExternalTabletServer>& ts : tablet_servers_) {
     ts->Shutdown();
   }
 }
 
 Status ExternalMiniCluster::Restart() {
-  BOOST_FOREACH(const scoped_refptr<ExternalMaster>& master, masters_) {
+  for (const scoped_refptr<ExternalMaster>& master : masters_) {
     if (master && master->IsShutdown()) {
       RETURN_NOT_OK_PREPEND(master->Restart(), "Cannot restart master bound at: " +
                                                master->bound_rpc_hostport().ToString());
     }
   }
 
-  BOOST_FOREACH(const scoped_refptr<ExternalTabletServer>& ts, tablet_servers_) {
+  for (const scoped_refptr<ExternalTabletServer>& ts : tablet_servers_) {
     if (ts->IsShutdown()) {
       RETURN_NOT_OK_PREPEND(ts->Restart(), "Cannot restart tablet server bound at: " +
                                            ts->bound_rpc_hostport().ToString());
@@ -200,7 +199,7 @@ vector<string> SubstituteInFlags(const vector<string>& orig_flags,
                                  int index) {
   string str_index = strings::Substitute("$0", index);
   vector<string> ret;
-  BOOST_FOREACH(const string& orig, orig_flags) {
+  for (const string& orig : orig_flags) {
     ret.push_back(StringReplace(orig, "${index}", str_index, true));
   }
   return ret;
@@ -306,8 +305,8 @@ Status ExternalMiniCluster::WaitForTabletServerCount(int count, const MonoDelta&
       // Do a second step of verification to verify that the descs that we got
       // are aligned (same uuid/seqno) with the TSs that we have in the cluster.
       int match_count = 0;
-      BOOST_FOREACH(const master::ListTabletServersResponsePB_Entry& e, resp.servers()) {
-        BOOST_FOREACH(const scoped_refptr<ExternalTabletServer>& ets, tablet_servers_) {
+      for (const master::ListTabletServersResponsePB_Entry& e : resp.servers()) {
+        for (const scoped_refptr<ExternalTabletServer>& ets : tablet_servers_) {
           if (ets->instance_id().permanent_uuid() == e.instance_id().permanent_uuid() &&
               ets->instance_id().instance_seqno() == e.instance_id().instance_seqno()) {
             match_count++;
@@ -326,7 +325,7 @@ Status ExternalMiniCluster::WaitForTabletServerCount(int count, const MonoDelta&
 
 void ExternalMiniCluster::AssertNoCrashes() {
   vector<ExternalDaemon*> daemons = this->daemons();
-  BOOST_FOREACH(ExternalDaemon* d, daemons) {
+  for (ExternalDaemon* d : daemons) {
     if (d->IsShutdown()) continue;
     EXPECT_TRUE(d->IsProcessAlive()) << "At least one process crashed";
   }
@@ -349,7 +348,7 @@ Status ExternalMiniCluster::WaitForTabletsRunning(ExternalTabletServer* ts,
     }
 
     int num_not_running = 0;
-    BOOST_FOREACH(const StatusAndSchemaPB& status, resp.status_and_schema()) {
+    for (const StatusAndSchemaPB& status : resp.status_and_schema()) {
       if (status.tablet_status().state() != tablet::RUNNING) {
         num_not_running++;
       }
@@ -385,7 +384,7 @@ Status ExternalMiniCluster::GetLeaderMasterIndex(int* idx) {
   MonoTime deadline = MonoTime::Now(MonoTime::FINE);
   deadline.AddDelta(MonoDelta::FromSeconds(5));
 
-  BOOST_FOREACH(const scoped_refptr<ExternalMaster>& master, masters_) {
+  for (const scoped_refptr<ExternalMaster>& master : masters_) {
     addrs.push_back(master->bound_rpc_addr());
   }
   rpc.reset(new GetLeaderMasterRpc(Bind(&LeaderMasterCallback,
@@ -414,7 +413,7 @@ Status ExternalMiniCluster::GetLeaderMasterIndex(int* idx) {
 }
 
 ExternalTabletServer* ExternalMiniCluster::tablet_server_by_uuid(const std::string& uuid) const {
-  BOOST_FOREACH(const scoped_refptr<ExternalTabletServer>& ts, tablet_servers_) {
+  for (const scoped_refptr<ExternalTabletServer>& ts : tablet_servers_) {
     if (ts->instance_id().permanent_uuid() == uuid) {
       return ts.get();
     }
@@ -433,10 +432,10 @@ int ExternalMiniCluster::tablet_server_index_by_uuid(const std::string& uuid) co
 
 vector<ExternalDaemon*> ExternalMiniCluster::daemons() const {
   vector<ExternalDaemon*> results;
-  BOOST_FOREACH(const scoped_refptr<ExternalTabletServer>& ts, tablet_servers_) {
+  for (const scoped_refptr<ExternalTabletServer>& ts : tablet_servers_) {
     results.push_back(ts.get());
   }
-  BOOST_FOREACH(const scoped_refptr<ExternalMaster>& master, masters_) {
+  for (const scoped_refptr<ExternalMaster>& master : masters_) {
     results.push_back(master.get());
   }
   return results;
@@ -461,7 +460,7 @@ Status ExternalMiniCluster::CreateClient(client::KuduClientBuilder& builder,
                                          client::sp::shared_ptr<client::KuduClient>* client) {
   CHECK(!masters_.empty());
   builder.clear_master_server_addrs();
-  BOOST_FOREACH(const scoped_refptr<ExternalMaster>& master, masters_) {
+  for (const scoped_refptr<ExternalMaster>& master : masters_) {
     builder.add_master_server_addr(master->bound_rpc_hostport().ToString());
   }
   return builder.Build(client);
@@ -718,7 +717,7 @@ Status ExternalDaemon::GetInt64Metric(const MetricEntityPrototype* entity_proto,
   RETURN_NOT_OK(r.Init());
   vector<const Value*> entities;
   RETURN_NOT_OK(r.ExtractObjectArray(r.root(), NULL, &entities));
-  BOOST_FOREACH(const Value* entity, entities) {
+  for (const Value* entity : entities) {
     // Find the desired entity.
     string type;
     RETURN_NOT_OK(r.ExtractString(entity, "type", &type));
@@ -736,7 +735,7 @@ Status ExternalDaemon::GetInt64Metric(const MetricEntityPrototype* entity_proto,
     // Find the desired metric within the entity.
     vector<const Value*> metrics;
     RETURN_NOT_OK(r.ExtractObjectArray(entity, "metrics", &metrics));
-    BOOST_FOREACH(const Value* metric, metrics) {
+    for (const Value* metric : metrics) {
       string name;
       RETURN_NOT_OK(r.ExtractString(metric, "name", &name));
       if (name != metric_proto->name()) {

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/external_mini_cluster_fs_inspector.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/external_mini_cluster_fs_inspector.cc b/src/kudu/integration-tests/external_mini_cluster_fs_inspector.cc
index ae227a0..5d5b2d2 100644
--- a/src/kudu/integration-tests/external_mini_cluster_fs_inspector.cc
+++ b/src/kudu/integration-tests/external_mini_cluster_fs_inspector.cc
@@ -18,7 +18,6 @@
 #include "kudu/integration-tests/external_mini_cluster_fs_inspector.h"
 
 #include <algorithm>
-#include <boost/foreach.hpp>
 
 #include "kudu/consensus/metadata.pb.h"
 #include "kudu/gutil/strings/join.h"
@@ -75,7 +74,7 @@ int ExternalMiniClusterFsInspector::CountWALSegmentsOnTS(int index) {
   vector<string> tablets;
   CHECK_OK(ListFilesInDir(ts_wal_dir, &tablets));
   int total_segments = 0;
-  BOOST_FOREACH(const string& tablet, tablets) {
+  for (const string& tablet : tablets) {
     string tablet_wal_dir = JoinPathSegments(ts_wal_dir, tablet);
     total_segments += CountFilesInDir(tablet_wal_dir);
   }
@@ -284,9 +283,9 @@ Status ExternalMiniClusterFsInspector::WaitForFilePatternInTabletWalDirOnTs(
 
     error_msg = "";
     bool any_missing_required = false;
-    BOOST_FOREACH(const string& required_filter, substrings_required) {
+    for (const string& required_filter : substrings_required) {
       bool filter_matched = false;
-      BOOST_FOREACH(const string& entry, entries) {
+      for (const string& entry : entries) {
         if (entry.find(required_filter) != string::npos) {
           filter_matched = true;
           break;
@@ -300,9 +299,9 @@ Status ExternalMiniClusterFsInspector::WaitForFilePatternInTabletWalDirOnTs(
     }
 
     bool any_present_disallowed = false;
-    BOOST_FOREACH(const string& entry, entries) {
+    for (const string& entry : entries) {
       if (any_present_disallowed) break;
-      BOOST_FOREACH(const string& disallowed_filter, substrings_disallowed) {
+      for (const string& disallowed_filter : substrings_disallowed) {
         if (entry.find(disallowed_filter) != string::npos) {
           any_present_disallowed = true;
           error_msg += "present from substrings_disallowed: " + entry +

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/flex_partitioning-itest.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/flex_partitioning-itest.cc b/src/kudu/integration-tests/flex_partitioning-itest.cc
index e481876..5c2fea9 100644
--- a/src/kudu/integration-tests/flex_partitioning-itest.cc
+++ b/src/kudu/integration-tests/flex_partitioning-itest.cc
@@ -19,7 +19,6 @@
 // of PK subsets, etc).
 
 #include <algorithm>
-#include <boost/foreach.hpp>
 #include <glog/stl_logging.h>
 #include <map>
 #include <memory>
@@ -222,7 +221,7 @@ void FlexPartitioningITest::CheckScanWithColumnPredicate(Slice col_name, int low
 
   // Manually evaluate the predicate against the data we think we inserted.
   vector<string> expected_rows;
-  BOOST_FOREACH(const KuduPartialRow* row, inserted_rows_) {
+  for (const KuduPartialRow* row : inserted_rows_) {
     int32_t val;
     CHECK_OK(row->GetInt32(col_name, &val));
     if (val >= lower && val <= upper) {
@@ -263,7 +262,7 @@ void FlexPartitioningITest::CheckPartitionKeyRangeScan() {
 
   vector<string> rows;
 
-  BOOST_FOREACH(const master::TabletLocationsPB& tablet_locations,
+  for (const master::TabletLocationsPB& tablet_locations :
                 table_locations.tablet_locations()) {
 
     string partition_key_start = tablet_locations.partition().partition_key_start();
@@ -278,7 +277,7 @@ void FlexPartitioningITest::CheckPartitionKeyRangeScan() {
   std::sort(rows.begin(), rows.end());
 
   vector<string> expected_rows;
-  BOOST_FOREACH(KuduPartialRow* row, inserted_rows_) {
+  for (KuduPartialRow* row : inserted_rows_) {
     expected_rows.push_back("(" + row->ToString() + ")");
   }
   std::sort(expected_rows.begin(), expected_rows.end());
@@ -296,7 +295,7 @@ void FlexPartitioningITest::CheckPartitionKeyRangeScanWithPKRange(int lower, int
 
   vector<string> rows;
 
-  BOOST_FOREACH(const master::TabletLocationsPB& tablet_locations,
+  for (const master::TabletLocationsPB& tablet_locations :
                 table_locations.tablet_locations()) {
 
     string partition_key_start = tablet_locations.partition().partition_key_start();

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/full_stack-insert-scan-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/full_stack-insert-scan-test.cc b/src/kudu/integration-tests/full_stack-insert-scan-test.cc
index c42707f..a2abf77 100644
--- a/src/kudu/integration-tests/full_stack-insert-scan-test.cc
+++ b/src/kudu/integration-tests/full_stack-insert-scan-test.cc
@@ -15,7 +15,6 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/foreach.hpp>
 #include <cmath>
 #include <cstdlib>
 #include <gflags/gflags.h>
@@ -299,7 +298,7 @@ void FullStackInsertScanTest::DoConcurrentClientInserts() {
              strings::Substitute("concurrent inserts ($0 rows, $1 threads)",
                                  kNumRows, kNumInsertClients)) {
     start_latch.CountDown();
-    BOOST_FOREACH(const scoped_refptr<Thread>& thread, threads) {
+    for (const scoped_refptr<Thread>& thread : threads) {
       ASSERT_OK(ThreadJoiner(thread.get())
                 .warn_every_ms(15000)
                 .Join());
@@ -333,7 +332,7 @@ void FullStackInsertScanTest::FlushToDisk() {
     tserver::TSTabletManager* tm = ts->tablet_manager();
     vector<scoped_refptr<TabletPeer> > peers;
     tm->GetTabletPeers(&peers);
-    BOOST_FOREACH(const scoped_refptr<TabletPeer>& peer, peers) {
+    for (const scoped_refptr<TabletPeer>& peer : peers) {
       Tablet* tablet = peer->tablet();
       if (!tablet->MemRowSetEmpty()) {
         ASSERT_OK(tablet->Flush());

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/linked_list-test-util.h
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/linked_list-test-util.h b/src/kudu/integration-tests/linked_list-test-util.h
index 42a0891..0536e6f 100644
--- a/src/kudu/integration-tests/linked_list-test-util.h
+++ b/src/kudu/integration-tests/linked_list-test-util.h
@@ -16,7 +16,6 @@
 // under the License.
 
 #include <algorithm>
-#include <boost/foreach.hpp>
 #include <glog/logging.h>
 #include <iostream>
 #include <list>
@@ -303,7 +302,7 @@ class PeriodicWebUIChecker {
 
     // Generate list of urls for each master and tablet server
     for (int i = 0; i < cluster.num_masters(); i++) {
-      BOOST_FOREACH(std::string page, master_pages) {
+      for (std::string page : master_pages) {
         urls_.push_back(strings::Substitute(
             "http://$0$1",
             cluster.master(i)->bound_http_hostport().ToString(),
@@ -311,7 +310,7 @@ class PeriodicWebUIChecker {
       }
     }
     for (int i = 0; i < cluster.num_tablet_servers(); i++) {
-      BOOST_FOREACH(std::string page, ts_pages) {
+      for (std::string page : ts_pages) {
         urls_.push_back(strings::Substitute(
             "http://$0$1",
             cluster.tablet_server(i)->bound_http_hostport().ToString(),
@@ -336,7 +335,7 @@ class PeriodicWebUIChecker {
     faststring dst;
     LOG(INFO) << "Curl thread will poll the following URLs every " << period_.ToMilliseconds()
         << " ms: ";
-    BOOST_FOREACH(std::string url, urls_) {
+    for (std::string url : urls_) {
       LOG(INFO) << url;
     }
     for (int count = 0; is_running_.Load(); count++) {
@@ -402,7 +401,7 @@ class LinkedListVerifier {
 std::vector<const KuduPartialRow*> LinkedListTester::GenerateSplitRows(
     const client::KuduSchema& schema) {
   std::vector<const KuduPartialRow*> split_keys;
-  BOOST_FOREACH(int64_t val, GenerateSplitInts()) {
+  for (int64_t val : GenerateSplitInts()) {
     KuduPartialRow* row = schema.NewRow();
     CHECK_OK(row->SetInt64(kKeyColumnName, val));
     split_keys.push_back(row);
@@ -496,7 +495,7 @@ Status LinkedListTester::LoadLinkedList(
       }
       return Status::OK();
     }
-    BOOST_FOREACH(LinkedListChainGenerator* chain, chains) {
+    for (LinkedListChainGenerator* chain : chains) {
       RETURN_NOT_OK_PREPEND(chain->GenerateNextInsert(table.get(), session.get()),
                             "Unable to generate next insert into linked list chain");
     }
@@ -510,7 +509,7 @@ Status LinkedListTester::LoadLinkedList(
 
     if (enable_mutation_) {
       // Rows have been inserted; they're now safe to update.
-      BOOST_FOREACH(LinkedListChainGenerator* chain, chains) {
+      for (LinkedListChainGenerator* chain : chains) {
         updater.to_update()->Put(chain->prev_key());
       }
     }
@@ -611,7 +610,7 @@ Status LinkedListTester::VerifyLinkedListRemote(
       cb_called = true;
     }
     RETURN_NOT_OK_PREPEND(scanner.NextBatch(&rows), "Couldn't fetch next row batch");
-    BOOST_FOREACH(const client::KuduRowResult& row, rows) {
+    for (const client::KuduRowResult& row : rows) {
       int64_t key;
       int64_t link;
       bool updated;
@@ -802,7 +801,7 @@ void LinkedListVerifier::SummarizeBrokenLinks(const std::vector<int64_t>& broken
   int n_logged = 0;
   const int kMaxToLog = 100;
 
-  BOOST_FOREACH(int64_t broken, broken_links) {
+  for (int64_t broken : broken_links) {
     int tablet = std::upper_bound(split_key_ints_.begin(),
                                   split_key_ints_.end(),
                                   broken) - split_key_ints_.begin();

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/linked_list-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/linked_list-test.cc b/src/kudu/integration-tests/linked_list-test.cc
index f1dc222..ed5574a 100644
--- a/src/kudu/integration-tests/linked_list-test.cc
+++ b/src/kudu/integration-tests/linked_list-test.cc
@@ -135,7 +135,7 @@ class LinkedListTest : public tserver::TabletServerIntegrationTestBase {
       return;
     }
     vector<string> split_flags = strings::Split(flags_str, " ");
-    BOOST_FOREACH(const string& flag, split_flags) {
+    for (const string& flag : split_flags) {
       flags->push_back(flag);
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/mini_cluster.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/mini_cluster.cc b/src/kudu/integration-tests/mini_cluster.cc
index ccc1377..3af4192 100644
--- a/src/kudu/integration-tests/mini_cluster.cc
+++ b/src/kudu/integration-tests/mini_cluster.cc
@@ -17,7 +17,6 @@
 
 #include "kudu/integration-tests/mini_cluster.h"
 
-#include <boost/foreach.hpp>
 
 #include "kudu/client/client.h"
 #include "kudu/gutil/strings/join.h"
@@ -118,7 +117,7 @@ Status MiniCluster::StartDistributedMasters() {
     mini_masters_[i] = shared_ptr<MiniMaster>(mini_master.release());
   }
   int i = 0;
-  BOOST_FOREACH(const shared_ptr<MiniMaster>& master, mini_masters_) {
+  for (const shared_ptr<MiniMaster>& master : mini_masters_) {
     LOG(INFO) << "Waiting to initialize catalog manager on master " << i++;
     RETURN_NOT_OK_PREPEND(master->WaitForCatalogManagerInit(),
                           Substitute("Could not initialize catalog manager on master $0", i));
@@ -129,7 +128,7 @@ Status MiniCluster::StartDistributedMasters() {
 Status MiniCluster::StartSync() {
   RETURN_NOT_OK(Start());
   int count = 0;
-  BOOST_FOREACH(const shared_ptr<MiniTabletServer>& tablet_server, mini_tablet_servers_) {
+  for (const shared_ptr<MiniTabletServer>& tablet_server : mini_tablet_servers_) {
     RETURN_NOT_OK_PREPEND(tablet_server->WaitStarted(),
                           Substitute("TabletServer $0 failed to start.", count));
     count++;
@@ -171,7 +170,7 @@ Status MiniCluster::AddTabletServer() {
 
   // set the master addresses
   tablet_server->options()->master_addresses.clear();
-  BOOST_FOREACH(const shared_ptr<MiniMaster>& master, mini_masters_) {
+  for (const shared_ptr<MiniMaster>& master : mini_masters_) {
     tablet_server->options()->master_addresses.push_back(HostPort(master->bound_rpc_addr()));
   }
   RETURN_NOT_OK(tablet_server->Start())
@@ -201,11 +200,11 @@ MiniMaster* MiniCluster::leader_mini_master() {
 }
 
 void MiniCluster::Shutdown() {
-  BOOST_FOREACH(const shared_ptr<MiniTabletServer>& tablet_server, mini_tablet_servers_) {
+  for (const shared_ptr<MiniTabletServer>& tablet_server : mini_tablet_servers_) {
     tablet_server->Shutdown();
   }
   mini_tablet_servers_.clear();
-  BOOST_FOREACH(shared_ptr<MiniMaster>& master_server, mini_masters_) {
+  for (shared_ptr<MiniMaster>& master_server : mini_masters_) {
     master_server->Shutdown();
     master_server.reset();
   }
@@ -213,7 +212,7 @@ void MiniCluster::Shutdown() {
 }
 
 void MiniCluster::ShutdownMasters() {
-  BOOST_FOREACH(shared_ptr<MiniMaster>& master_server, mini_masters_) {
+  for (shared_ptr<MiniMaster>& master_server : mini_masters_) {
     master_server->Shutdown();
     master_server.reset();
   }
@@ -280,7 +279,7 @@ Status MiniCluster::WaitForTabletServerCount(int count,
       // Do a second step of verification to verify that the descs that we got
       // are aligned (same uuid/seqno) with the TSs that we have in the cluster.
       int match_count = 0;
-      BOOST_FOREACH(const shared_ptr<TSDescriptor>& desc, *descs) {
+      for (const shared_ptr<TSDescriptor>& desc : *descs) {
         for (int i = 0; i < mini_tablet_servers_.size(); ++i) {
           TabletServer *ts = mini_tablet_servers_[i]->server();
           if (ts->instance_pb().permanent_uuid() == desc->permanent_uuid() &&
@@ -309,7 +308,7 @@ Status MiniCluster::CreateClient(KuduClientBuilder* builder,
     builder = &default_builder;
   }
   builder->clear_master_server_addrs();
-  BOOST_FOREACH(const shared_ptr<MiniMaster>& master, mini_masters_) {
+  for (const shared_ptr<MiniMaster>& master : mini_masters_) {
     CHECK(master);
     builder->add_master_server_addr(master->bound_rpc_addr_str());
   }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/raft_consensus-itest.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/raft_consensus-itest.cc b/src/kudu/integration-tests/raft_consensus-itest.cc
index b8351c0..8496ddb 100644
--- a/src/kudu/integration-tests/raft_consensus-itest.cc
+++ b/src/kudu/integration-tests/raft_consensus-itest.cc
@@ -15,7 +15,6 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/foreach.hpp>
 #include <boost/optional.hpp>
 #include <gflags/gflags.h>
 #include <glog/logging.h>
@@ -187,12 +186,12 @@ class RaftConsensusITest : public TabletServerIntegrationTestBase {
                                      replica_results.size());
 
     StrAppend(&ret, "Leader Results: \n");
-    BOOST_FOREACH(const string& result, leader_results) {
+    for (const string& result : leader_results) {
       StrAppend(&ret, result, "\n");
     }
 
     StrAppend(&ret, "Replica Results: \n");
-    BOOST_FOREACH(const string& result, replica_results) {
+    for (const string& result : replica_results) {
       StrAppend(&ret, result, "\n");
     }
 
@@ -237,13 +236,13 @@ class RaftConsensusITest : public TabletServerIntegrationTestBase {
         bool overflow;
         session->GetPendingErrors(&errors, &overflow);
         CHECK(!overflow);
-        BOOST_FOREACH(const client::KuduError* e, errors) {
+        for (const client::KuduError* e : errors) {
           CHECK(e->status().IsAlreadyPresent()) << "Unexpected error: " << e->status().ToString();
         }
         inserted -= errors.size();
       }
 
-      BOOST_FOREACH(CountDownLatch* latch, latches) {
+      for (CountDownLatch* latch : latches) {
         latch->CountDown(inserted);
       }
     }
@@ -297,7 +296,7 @@ class RaftConsensusITest : public TabletServerIntegrationTestBase {
     vector<TServerDetails*> followers;
     GetOnlyLiveFollowerReplicas(tablet_id_, &followers);
 
-    BOOST_FOREACH(TServerDetails* ts, followers) {
+    for (TServerDetails* ts : followers) {
       ExternalTabletServer* ets = cluster_->tablet_server_by_uuid(ts->uuid());
       CHECK_OK(ets->Pause());
       SleepFor(MonoDelta::FromMilliseconds(100));
@@ -312,7 +311,7 @@ class RaftConsensusITest : public TabletServerIntegrationTestBase {
     }
 
     // Resume the replicas.
-    BOOST_FOREACH(TServerDetails* ts, followers) {
+    for (TServerDetails* ts : followers) {
       ExternalTabletServer* ets = cluster_->tablet_server_by_uuid(ts->uuid());
       CHECK_OK(ets->Resume());
     }
@@ -524,7 +523,7 @@ TEST_F(RaftConsensusITest, MultiThreadedMutateAndInsertThroughConsensus) {
                                   &new_thread));
     threads_.push_back(new_thread);
   }
-  BOOST_FOREACH(scoped_refptr<kudu::Thread> thr, threads_) {
+  for (scoped_refptr<kudu::Thread> thr : threads_) {
    CHECK_OK(ThreadJoiner(thr.get()).Join());
   }
 
@@ -1004,12 +1003,12 @@ TEST_F(RaftConsensusITest, MultiThreadedInsertWithFailovers) {
     threads_.push_back(new_thread);
   }
 
-  BOOST_FOREACH(CountDownLatch* latch, latches) {
+  for (CountDownLatch* latch : latches) {
     latch->Wait();
     StopOrKillLeaderAndElectNewOne();
   }
 
-  BOOST_FOREACH(scoped_refptr<kudu::Thread> thr, threads_) {
+  for (scoped_refptr<kudu::Thread> thr : threads_) {
    CHECK_OK(ThreadJoiner(thr.get()).Join());
   }
 
@@ -1055,7 +1054,7 @@ TEST_F(RaftConsensusITest, TestAutomaticLeaderElection) {
   }
 
   // Restart every node that was killed, and wait for the nodes to converge
-  BOOST_FOREACH(TServerDetails* killed_node, killed_leaders) {
+  for (TServerDetails* killed_node : killed_leaders) {
     CHECK_OK(cluster_->tablet_server_by_uuid(killed_node->uuid())->Restart());
   }
   // Verify the data on the remaining replicas.
@@ -1140,7 +1139,7 @@ TEST_F(RaftConsensusITest, TestKUDU_597) {
   }
 
   finish.Store(true);
-  BOOST_FOREACH(scoped_refptr<kudu::Thread> thr, threads_) {
+  for (scoped_refptr<kudu::Thread> thr : threads_) {
     CHECK_OK(ThreadJoiner(thr.get()).Join());
   }
 }
@@ -1418,7 +1417,7 @@ void RaftConsensusITest::AssertMajorityRequiredForElectionsAndWrites(
     int num_to_pause = config_size - minority_to_retain;
     LOG(INFO) << "Pausing " << num_to_pause << " tablet servers in config of size " << config_size;
     vector<string> paused_uuids;
-    BOOST_FOREACH(const TabletServerMap::value_type& entry, tablet_servers) {
+    for (const TabletServerMap::value_type& entry : tablet_servers) {
       if (paused_uuids.size() == num_to_pause) {
         continue;
       }
@@ -1450,7 +1449,7 @@ void RaftConsensusITest::AssertMajorityRequiredForElectionsAndWrites(
 
     // Resume the paused servers.
     LOG(INFO) << "Resuming " << num_to_pause << " tablet servers in config of size " << config_size;
-    BOOST_FOREACH(const string& replica_uuid, paused_uuids) {
+    for (const string& replica_uuid : paused_uuids) {
       ExternalTabletServer* replica_ts = cluster_->tablet_server_by_uuid(replica_uuid);
       ASSERT_OK(replica_ts->Resume());
     }
@@ -1503,7 +1502,7 @@ void RaftConsensusITest::WaitForReplicasReportedToMaster(
     ASSERT_OK(GetTabletLocations(tablet_id, timeout, tablet_locations));
     *has_leader = false;
     if (tablet_locations->replicas_size() == num_replicas) {
-      BOOST_FOREACH(const master::TabletLocationsPB_ReplicaPB& replica,
+      for (const master::TabletLocationsPB_ReplicaPB& replica :
                     tablet_locations->replicas()) {
         if (replica.role() == RaftPeerPB::LEADER) {
           *has_leader = true;
@@ -1566,7 +1565,7 @@ TEST_F(RaftConsensusITest, TestAddRemoveServer) {
 
   // Go from 3 tablet servers down to 1 in the configuration.
   vector<int> remove_list = { 2, 1 };
-  BOOST_FOREACH(int to_remove_idx, remove_list) {
+  for (int to_remove_idx : remove_list) {
     int num_servers = active_tablet_servers.size();
     LOG(INFO) << "Remove: Going from " << num_servers << " to " << num_servers - 1 << " replicas";
 
@@ -1586,7 +1585,7 @@ TEST_F(RaftConsensusITest, TestAddRemoveServer) {
 
   // Add the tablet servers back, in reverse order, going from 1 to 3 servers in the configuration.
   vector<int> add_list = { 1, 2 };
-  BOOST_FOREACH(int to_add_idx, add_list) {
+  for (int to_add_idx : add_list) {
     int num_servers = active_tablet_servers.size();
     LOG(INFO) << "Add: Going from " << num_servers << " to " << num_servers + 1 << " replicas";
 
@@ -1896,7 +1895,7 @@ TEST_F(RaftConsensusITest, TestConfigChangeUnderLoad) {
   LOG(INFO) << "Removing servers...";
   // Go from 3 tablet servers down to 1 in the configuration.
   vector<int> remove_list = { 2, 1 };
-  BOOST_FOREACH(int to_remove_idx, remove_list) {
+  for (int to_remove_idx : remove_list) {
     int num_servers = active_tablet_servers.size();
     LOG(INFO) << "Remove: Going from " << num_servers << " to " << num_servers - 1 << " replicas";
 
@@ -1913,7 +1912,7 @@ TEST_F(RaftConsensusITest, TestConfigChangeUnderLoad) {
   LOG(INFO) << "Adding servers...";
   // Add the tablet servers back, in reverse order, going from 1 to 3 servers in the configuration.
   vector<int> add_list = { 1, 2 };
-  BOOST_FOREACH(int to_add_idx, add_list) {
+  for (int to_add_idx : add_list) {
     int num_servers = active_tablet_servers.size();
     LOG(INFO) << "Add: Going from " << num_servers << " to " << num_servers + 1 << " replicas";
 
@@ -1929,7 +1928,7 @@ TEST_F(RaftConsensusITest, TestConfigChangeUnderLoad) {
 
   LOG(INFO) << "Joining writer threads...";
   finish.Store(true);
-  BOOST_FOREACH(const scoped_refptr<Thread>& thread, threads) {
+  for (const scoped_refptr<Thread>& thread : threads) {
     ASSERT_OK(ThreadJoiner(thread.get()).Join());
   }
 
@@ -1967,7 +1966,7 @@ TEST_F(RaftConsensusITest, TestMasterNotifiedOnConfigChange) {
 
   // Determine the server to add to the config.
   string uuid_to_add;
-  BOOST_FOREACH(const TabletServerMap::value_type& entry, tablet_servers_) {
+  for (const TabletServerMap::value_type& entry : tablet_servers_) {
     if (!ContainsKey(active_tablet_servers, entry.second->uuid())) {
       uuid_to_add = entry.second->uuid();
     }
@@ -2130,7 +2129,7 @@ TEST_F(RaftConsensusITest, TestAutoCreateReplica) {
   InsertOrDie(&active_tablet_servers, follower->uuid(), follower);
 
   TServerDetails* new_node = NULL;
-  BOOST_FOREACH(TServerDetails* ts, tservers) {
+  for (TServerDetails* ts : tservers) {
     if (!ContainsKey(active_tablet_servers, ts->uuid())) {
       new_node = ts;
       break;
@@ -2251,7 +2250,7 @@ static void EnableLogLatency(server::GenericServiceProxy* proxy) {
   FlagMap flags;
   InsertOrDie(&flags, "log_inject_latency", "true");
   InsertOrDie(&flags, "log_inject_latency_ms_mean", "1000");
-  BOOST_FOREACH(const FlagMap::value_type& e, flags) {
+  for (const FlagMap::value_type& e : flags) {
     SetFlagRequestPB req;
     SetFlagResponsePB resp;
     RpcController rpc;

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/remote_bootstrap-itest.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/remote_bootstrap-itest.cc b/src/kudu/integration-tests/remote_bootstrap-itest.cc
index 3a72353..828f03a 100644
--- a/src/kudu/integration-tests/remote_bootstrap-itest.cc
+++ b/src/kudu/integration-tests/remote_bootstrap-itest.cc
@@ -433,13 +433,13 @@ TEST_F(RemoteBootstrapITest, TestConcurrentRemoteBootstraps) {
   ASSERT_OK(WaitForNumTabletsOnTS(target_ts, kNumTablets, timeout, &tablets));
 
   vector<string> tablet_ids;
-  BOOST_FOREACH(const ListTabletsResponsePB::StatusAndSchemaPB& t, tablets) {
+  for (const ListTabletsResponsePB::StatusAndSchemaPB& t : tablets) {
     tablet_ids.push_back(t.tablet_status().tablet_id());
   }
 
   // Wait until all replicas are up and running.
   for (int i = 0; i < cluster_->num_tablet_servers(); i++) {
-    BOOST_FOREACH(const string& tablet_id, tablet_ids) {
+    for (const string& tablet_id : tablet_ids) {
       ASSERT_OK(itest::WaitUntilTabletRunning(ts_map_[cluster_->tablet_server(i)->uuid()],
                                               tablet_id, timeout));
     }
@@ -448,7 +448,7 @@ TEST_F(RemoteBootstrapITest, TestConcurrentRemoteBootstraps) {
   // Elect leaders on each tablet for term 1. All leaders will be on TS 1.
   const int kLeaderIndex = 1;
   const string kLeaderUuid = cluster_->tablet_server(kLeaderIndex)->uuid();
-  BOOST_FOREACH(const string& tablet_id, tablet_ids) {
+  for (const string& tablet_id : tablet_ids) {
     ASSERT_OK(itest::StartElection(ts_map_[kLeaderUuid], tablet_id, timeout));
   }
 
@@ -464,14 +464,14 @@ TEST_F(RemoteBootstrapITest, TestConcurrentRemoteBootstraps) {
   }
   workload.StopAndJoin();
 
-  BOOST_FOREACH(const string& tablet_id, tablet_ids) {
+  for (const string& tablet_id : tablet_ids) {
     ASSERT_OK(WaitForServersToAgree(timeout, ts_map_, tablet_id, 1));
   }
 
   // Now pause the leader so we can tombstone the tablets.
   ASSERT_OK(cluster_->tablet_server(kLeaderIndex)->Pause());
 
-  BOOST_FOREACH(const string& tablet_id, tablet_ids) {
+  for (const string& tablet_id : tablet_ids) {
     LOG(INFO) << "Tombstoning tablet " << tablet_id << " on TS " << target_ts->uuid();
     ASSERT_OK(itest::DeleteTablet(target_ts, tablet_id, TABLET_DATA_TOMBSTONED, boost::none,
                                   MonoDelta::FromSeconds(10)));
@@ -480,7 +480,7 @@ TEST_F(RemoteBootstrapITest, TestConcurrentRemoteBootstraps) {
   // Unpause the leader TS and wait for it to remotely bootstrap the tombstoned
   // tablets, in parallel.
   ASSERT_OK(cluster_->tablet_server(kLeaderIndex)->Resume());
-  BOOST_FOREACH(const string& tablet_id, tablet_ids) {
+  for (const string& tablet_id : tablet_ids) {
     ASSERT_OK(itest::WaitUntilTabletRunning(target_ts, tablet_id, timeout));
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/test_workload.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/test_workload.cc b/src/kudu/integration-tests/test_workload.cc
index aa6ab25..aa0588c 100644
--- a/src/kudu/integration-tests/test_workload.cc
+++ b/src/kudu/integration-tests/test_workload.cc
@@ -15,7 +15,6 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/foreach.hpp>
 
 #include "kudu/client/client.h"
 #include "kudu/client/client-test-util.h"
@@ -138,7 +137,7 @@ void TestWorkload::WriteThread() {
       bool overflow;
       session->GetPendingErrors(&errors, &overflow);
       CHECK(!overflow);
-      BOOST_FOREACH(const client::KuduError* e, errors) {
+      for (const client::KuduError* e : errors) {
         if (timeout_allowed_ && e->status().IsTimedOut()) {
           continue;
         }
@@ -237,7 +236,7 @@ void TestWorkload::Start() {
 void TestWorkload::StopAndJoin() {
   should_run_.Store(false);
   start_latch_.Reset(0);
-  BOOST_FOREACH(scoped_refptr<kudu::Thread> thr, threads_) {
+  for (scoped_refptr<kudu::Thread> thr : threads_) {
    CHECK_OK(ThreadJoiner(thr.get()).Join());
   }
   threads_.clear();

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/ts_itest-base.h
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/ts_itest-base.h b/src/kudu/integration-tests/ts_itest-base.h
index 17d29c0..5a77969 100644
--- a/src/kudu/integration-tests/ts_itest-base.h
+++ b/src/kudu/integration-tests/ts_itest-base.h
@@ -17,7 +17,6 @@
 #ifndef KUDU_INTEGRATION_TESTS_ITEST_UTIL_H_
 #define KUDU_INTEGRATION_TESTS_ITEST_UTIL_H_
 
-#include <boost/foreach.hpp>
 #include <glog/stl_logging.h>
 #include <string>
 #include <utility>
@@ -79,7 +78,7 @@ class TabletServerIntegrationTestBase : public TabletServerTestBase {
       return;
     }
     std::vector<std::string> split_flags = strings::Split(flags_str, " ");
-    BOOST_FOREACH(const std::string& flag, split_flags) {
+    for (const std::string& flag : split_flags) {
       flags->push_back(flag);
     }
   }
@@ -105,11 +104,11 @@ class TabletServerIntegrationTestBase : public TabletServerTestBase {
       opts.extra_tserver_flags.push_back(strings::Substitute("--consensus_rpc_timeout_ms=$0",
                                                              FLAGS_consensus_rpc_timeout_ms));
     } else {
-      BOOST_FOREACH(const std::string& flag, non_default_ts_flags) {
+      for (const std::string& flag : non_default_ts_flags) {
         opts.extra_tserver_flags.push_back(flag);
       }
     }
-    BOOST_FOREACH(const std::string& flag, non_default_master_flags) {
+    for (const std::string& flag : non_default_master_flags) {
       opts.extra_master_flags.push_back(flag);
     }
 
@@ -148,8 +147,8 @@ class TabletServerIntegrationTestBase : public TabletServerTestBase {
       CHECK_OK(controller.status());
       CHECK(!resp.has_error()) << "Response had an error: " << resp.error().ShortDebugString();
 
-      BOOST_FOREACH(const master::TabletLocationsPB& location, resp.tablet_locations()) {
-        BOOST_FOREACH(const master::TabletLocationsPB_ReplicaPB& replica, location.replicas()) {
+      for (const master::TabletLocationsPB& location : resp.tablet_locations()) {
+        for (const master::TabletLocationsPB_ReplicaPB& replica : location.replicas()) {
           TServerDetails* server = FindOrDie(tablet_servers_, replica.ts_info().permanent_uuid());
           tablet_replicas.insert(pair<std::string, TServerDetails*>(location.tablet_id(), server));
         }
@@ -199,7 +198,7 @@ class TabletServerIntegrationTestBase : public TabletServerTestBase {
     }
 
     std::random_shuffle(replicas_copy.begin(), replicas_copy.end());
-    BOOST_FOREACH(TServerDetails* replica, replicas_copy) {
+    for (TServerDetails* replica : replicas_copy) {
       if (GetReplicaStatusAndCheckIfLeader(replica, tablet_id,
                                            MonoDelta::FromMilliseconds(100)).ok()) {
         return replica;
@@ -231,9 +230,9 @@ class TabletServerIntegrationTestBase : public TabletServerTestBase {
     req.mutable_table()->set_table_name(kTableId);
 
     RETURN_NOT_OK(cluster_->master_proxy()->GetTableLocations(req, &resp, &controller));
-    BOOST_FOREACH(const TabletLocationsPB& loc, resp.tablet_locations()) {
+    for (const TabletLocationsPB& loc : resp.tablet_locations()) {
       if (loc.tablet_id() == tablet_id) {
-        BOOST_FOREACH(const TabletLocationsPB::ReplicaPB& replica, loc.replicas()) {
+        for (const TabletLocationsPB::ReplicaPB& replica : loc.replicas()) {
           if (replica.role() == RaftPeerPB::LEADER) {
             *leader_uuid = replica.ts_info().permanent_uuid();
             return Status::OK();
@@ -290,7 +289,7 @@ class TabletServerIntegrationTestBase : public TabletServerTestBase {
       ++iter;
     }
 
-    BOOST_FOREACH(const std::string& uuid, uuids) {
+    for (const std::string& uuid : uuids) {
       delete EraseKeyReturnValuePtr(&tablet_servers_, uuid);
     }
   }
@@ -308,7 +307,7 @@ class TabletServerIntegrationTestBase : public TabletServerTestBase {
       replicas.push_back((*range.first).second);
     }
 
-    BOOST_FOREACH(TServerDetails* replica, replicas) {
+    for (TServerDetails* replica : replicas) {
       if (leader != NULL &&
           replica->instance_id.permanent_uuid() == leader->instance_id.permanent_uuid()) {
         continue;
@@ -374,7 +373,7 @@ class TabletServerIntegrationTestBase : public TabletServerTestBase {
     std::string error = strings::Substitute("Fewer than $0 TabletServers were alive. Dead TSs: ",
                                             num_tablet_servers);
     RpcController controller;
-    BOOST_FOREACH(const TabletServerMap::value_type& entry, tablet_servers_) {
+    for (const TabletServerMap::value_type& entry : tablet_servers_) {
       controller.Reset();
       controller.set_timeout(MonoDelta::FromSeconds(10));
       PingRequestPB req;

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/integration-tests/update_scan_delta_compact-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/integration-tests/update_scan_delta_compact-test.cc b/src/kudu/integration-tests/update_scan_delta_compact-test.cc
index aa51e3f..82b3edd 100644
--- a/src/kudu/integration-tests/update_scan_delta_compact-test.cc
+++ b/src/kudu/integration-tests/update_scan_delta_compact-test.cc
@@ -228,7 +228,7 @@ void UpdateScanDeltaCompactionTest::RunThreads() {
   SleepFor(MonoDelta::FromSeconds(FLAGS_seconds_to_run * 1.0));
   stop_latch.CountDown();
 
-  BOOST_FOREACH(const scoped_refptr<Thread>& thread, threads) {
+  for (const scoped_refptr<Thread>& thread : threads) {
     ASSERT_OK(ThreadJoiner(thread.get())
               .warn_every_ms(500)
               .Join());
@@ -278,7 +278,7 @@ void UpdateScanDeltaCompactionTest::CurlWebPages(CountDownLatch* stop_latch) con
   EasyCurl curl;
   faststring dst;
   while (stop_latch->count() > 0) {
-    BOOST_FOREACH(const string& url, urls) {
+    for (const string& url : urls) {
       VLOG(1) << "Curling URL " << url;
       Status status = curl.FetchURL(url, &dst);
       if (status.ok()) {

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/catalog_manager.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/catalog_manager.cc b/src/kudu/master/catalog_manager.cc
index 342580f..8a3b77b 100644
--- a/src/kudu/master/catalog_manager.cc
+++ b/src/kudu/master/catalog_manager.cc
@@ -36,7 +36,6 @@
 
 #include "kudu/master/catalog_manager.h"
 
-#include <boost/foreach.hpp>
 #include <boost/optional.hpp>
 #include <boost/thread/condition_variable.hpp>
 #include <boost/thread/locks.hpp>
@@ -601,7 +600,7 @@ void CatalogManager::Shutdown() {
   }
 
   // Abort and Wait tables task completion
-  BOOST_FOREACH(const TableInfoMap::value_type& e, table_ids_map_) {
+  for (const TableInfoMap::value_type& e : table_ids_map_) {
     e.second->AbortTasks();
     e.second->WaitTasksCompletion();
   }
@@ -631,7 +630,7 @@ void CatalogManager::AbortTableCreation(TableInfo* table,
   string table_id = table->id();
   string table_name = table->mutable_metadata()->mutable_dirty()->pb.name();
   vector<string> tablet_ids_to_erase;
-  BOOST_FOREACH(TabletInfo* tablet, tablets) {
+  for (TabletInfo* tablet : tablets) {
     tablet_ids_to_erase.push_back(tablet->tablet_id());
   }
 
@@ -648,11 +647,11 @@ void CatalogManager::AbortTableCreation(TableInfo* table,
 
   // Call AbortMutation() manually, as otherwise the lock won't be
   // released.
-  BOOST_FOREACH(TabletInfo* tablet, tablets) {
+  for (TabletInfo* tablet : tablets) {
     tablet->mutable_metadata()->AbortMutation();
   }
   table->mutable_metadata()->AbortMutation();
-  BOOST_FOREACH(const string& tablet_id_to_erase, tablet_ids_to_erase) {
+  for (const string& tablet_id_to_erase : tablet_ids_to_erase) {
     CHECK_EQ(tablet_map_.erase(tablet_id_to_erase), 1)
         << "Unable to erase tablet " << tablet_id_to_erase << " from tablet map.";
   }
@@ -715,7 +714,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
   vector<DecodedRowOperation> ops;
   RETURN_NOT_OK(decoder.DecodeOperations(&ops));
 
-  BOOST_FOREACH(const DecodedRowOperation& op, ops) {
+  for (const DecodedRowOperation& op : ops) {
     if (op.type != RowOperationsPB::SPLIT_ROW) {
       Status s = Status::InvalidArgument(
           "Split rows must be specified as RowOperationsPB::SPLIT_ROW");
@@ -779,7 +778,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
     table_names_map_[req.name()] = table;
 
     // d. Create the TabletInfo objects in state PREPARING.
-    BOOST_FOREACH(const Partition& partition, partitions) {
+    for (const Partition& partition : partitions) {
       PartitionPB partition_pb;
       partition.ToPB(&partition_pb);
       tablets.push_back(CreateTabletInfo(table.get(), partition_pb));
@@ -788,7 +787,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
     // Add the table/tablets to the in-memory map for the assignment.
     resp->set_table_id(table->id());
     table->AddTablets(tablets);
-    BOOST_FOREACH(TabletInfo* tablet, tablets) {
+    for (TabletInfo* tablet : tablets) {
       InsertOrDie(&tablet_map_, tablet->tablet_id(), tablet);
     }
   }
@@ -799,7 +798,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
   // They will get committed at the end of this function.
   // Sanity check: the tables and tablets should all be in "preparing" state.
   CHECK_EQ(SysTablesEntryPB::PREPARING, table->metadata().dirty().pb.state());
-  BOOST_FOREACH(const TabletInfo *tablet, tablets) {
+  for (const TabletInfo *tablet : tablets) {
     CHECK_EQ(SysTabletsEntryPB::PREPARING, tablet->metadata().dirty().pb.state());
   }
 
@@ -831,7 +830,7 @@ Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req,
   // g. Commit the in-memory state.
   table->mutable_metadata()->CommitMutation();
 
-  BOOST_FOREACH(TabletInfo *tablet, tablets) {
+  for (TabletInfo *tablet : tablets) {
     tablet->mutable_metadata()->CommitMutation();
   }
 
@@ -1000,7 +999,7 @@ static Status ApplyAlterSteps(const SysTablesEntryPB& current_pb,
     builder.set_next_column_id(ColumnId(current_pb.next_column_id()));
   }
 
-  BOOST_FOREACH(const AlterTableRequestPB::Step& step, req->alter_schema_steps()) {
+  for (const AlterTableRequestPB::Step& step : req->alter_schema_steps()) {
     switch (step.type()) {
       case AlterTableRequestPB::ADD_COLUMN: {
         if (!step.has_add_column()) {
@@ -1271,7 +1270,7 @@ Status CatalogManager::ListTables(const ListTablesRequestPB* req,
 
   boost::shared_lock<LockType> l(lock_);
 
-  BOOST_FOREACH(const TableInfoMap::value_type& entry, table_names_map_) {
+  for (const TableInfoMap::value_type& entry : table_names_map_) {
     TableMetadataLock ltm(entry.second.get(), TableMetadataLock::READ);
     if (!ltm.data().is_running()) continue;
 
@@ -1299,7 +1298,7 @@ bool CatalogManager::GetTableInfo(const string& table_id, scoped_refptr<TableInf
 void CatalogManager::GetAllTables(std::vector<scoped_refptr<TableInfo> > *tables) {
   tables->clear();
   boost::shared_lock<LockType> l(lock_);
-  BOOST_FOREACH(const TableInfoMap::value_type& e, table_ids_map_) {
+  for (const TableInfoMap::value_type& e : table_ids_map_) {
     tables->push_back(e.second);
   }
 }
@@ -1339,7 +1338,7 @@ Status CatalogManager::ProcessTabletReport(TSDescriptor* ts_desc,
   // the server should have, compare vs the ones being reported, and somehow mark
   // any that have been "lost" (eg somehow the tablet metadata got corrupted or something).
 
-  BOOST_FOREACH(const ReportedTabletPB& reported, report.updated_tablets()) {
+  for (const ReportedTabletPB& reported : report.updated_tablets()) {
     ReportedTabletUpdatesPB *tablet_report = report_update->add_tablets();
     tablet_report->set_tablet_id(reported.tablet_id());
     RETURN_NOT_OK_PREPEND(HandleReportedTablet(ts_desc, reported, tablet_report),
@@ -1615,7 +1614,7 @@ Status CatalogManager::ResetTabletReplicasFromReportedConfig(
   *tablet_lock->mutable_data()->pb.mutable_committed_consensus_state() = cstate;
 
   TabletInfo::ReplicaMap replica_locations;
-  BOOST_FOREACH(const consensus::RaftPeerPB& peer, cstate.config().peers()) {
+  for (const consensus::RaftPeerPB& peer : cstate.config().peers()) {
     shared_ptr<TSDescriptor> ts_desc;
     if (!peer.has_permanent_uuid()) {
       return Status::InvalidArgument("Missing UUID for peer", peer.ShortDebugString());
@@ -1635,11 +1634,11 @@ Status CatalogManager::ResetTabletReplicasFromReportedConfig(
 
   if (FLAGS_master_tombstone_evicted_tablet_replicas) {
     unordered_set<string> current_member_uuids;
-    BOOST_FOREACH(const consensus::RaftPeerPB& peer, cstate.config().peers()) {
+    for (const consensus::RaftPeerPB& peer : cstate.config().peers()) {
       InsertOrDie(&current_member_uuids, peer.permanent_uuid());
     }
     // Send a DeleteTablet() request to peers that are not in the new config.
-    BOOST_FOREACH(const consensus::RaftPeerPB& prev_peer, prev_cstate.config().peers()) {
+    for (const consensus::RaftPeerPB& prev_peer : prev_cstate.config().peers()) {
       const string& peer_uuid = prev_peer.permanent_uuid();
       if (!ContainsKey(current_member_uuids, peer_uuid)) {
         shared_ptr<TSDescriptor> ts_desc;
@@ -1755,7 +1754,7 @@ class PickLeaderReplica : public TSPicker {
   virtual Status PickReplica(TSDescriptor** ts_desc) OVERRIDE {
     TabletInfo::ReplicaMap replica_locations;
     tablet_->GetReplicaLocations(&replica_locations);
-    BOOST_FOREACH(const TabletInfo::ReplicaMap::value_type& r, replica_locations) {
+    for (const TabletInfo::ReplicaMap::value_type& r : replica_locations) {
       if (r.second.role == consensus::RaftPeerPB::LEADER) {
         *ts_desc = r.second.ts_desc;
         return Status::OK();
@@ -2264,7 +2263,7 @@ bool SelectRandomTSForReplica(const TSDescriptorVector& ts_descs,
                               const unordered_set<string>& exclude_uuids,
                               shared_ptr<TSDescriptor>* selection) {
   TSDescriptorVector tablet_servers;
-  BOOST_FOREACH(const shared_ptr<TSDescriptor>& ts, ts_descs) {
+  for (const shared_ptr<TSDescriptor>& ts : ts_descs) {
     if (!ContainsKey(exclude_uuids, ts->permanent_uuid())) {
       tablet_servers.push_back(ts);
     }
@@ -2336,7 +2335,7 @@ bool AsyncAddServerTask::SendRequest(int attempt) {
   // Select the replica we wish to add to the config.
   // Do not include current members of the config.
   unordered_set<string> replica_uuids;
-  BOOST_FOREACH(const RaftPeerPB& peer, cstate_.config().peers()) {
+  for (const RaftPeerPB& peer : cstate_.config().peers()) {
     InsertOrDie(&replica_uuids, peer.permanent_uuid());
   }
   TSDescriptorVector ts_descs;
@@ -2403,7 +2402,7 @@ void CatalogManager::SendAlterTableRequest(const scoped_refptr<TableInfo>& table
   vector<scoped_refptr<TabletInfo> > tablets;
   table->GetAllTablets(&tablets);
 
-  BOOST_FOREACH(const scoped_refptr<TabletInfo>& tablet, tablets) {
+  for (const scoped_refptr<TabletInfo>& tablet : tablets) {
     SendAlterTabletRequest(tablet);
   }
 }
@@ -2420,12 +2419,12 @@ void CatalogManager::DeleteTabletsAndSendRequests(const scoped_refptr<TableInfo>
 
   string deletion_msg = "Table deleted at " + LocalTimeAsString();
 
-  BOOST_FOREACH(const scoped_refptr<TabletInfo>& tablet, tablets) {
+  for (const scoped_refptr<TabletInfo>& tablet : tablets) {
     TabletInfo::ReplicaMap locations;
     tablet->GetReplicaLocations(&locations);
     LOG(INFO) << "Sending DeleteTablet for " << locations.size()
               << " replicas of tablet " << tablet->tablet_id();
-    BOOST_FOREACH(const TabletInfo::ReplicaMap::value_type& r, locations) {
+    for (const TabletInfo::ReplicaMap::value_type& r : locations) {
       SendDeleteTabletRequest(tablet->tablet_id(), TABLET_DATA_DELETED,
                               boost::none, table, r.second.ts_desc, deletion_msg);
     }
@@ -2488,7 +2487,7 @@ void CatalogManager::ExtractTabletsToProcess(
   //       or just a counter to avoid to take the lock and loop through the tablets
   //       if everything is "stable".
 
-  BOOST_FOREACH(const TabletInfoMap::value_type& entry, tablet_map_) {
+  for (const TabletInfoMap::value_type& entry : tablet_map_) {
     scoped_refptr<TabletInfo> tablet = entry.second;
     TabletMetadataLock tablet_lock(tablet.get(), TabletMetadataLock::READ);
 
@@ -2633,7 +2632,7 @@ class ScopedTabletInfoCommitter {
   // This method is not thread safe. Must be called by the same thread
   // that would destroy this instance.
   void Abort() {
-    BOOST_FOREACH(const scoped_refptr<TabletInfo>& tablet, *tablets_) {
+    for (const scoped_refptr<TabletInfo>& tablet : *tablets_) {
       tablet->mutable_metadata()->AbortMutation();
     }
     aborted_ = true;
@@ -2642,7 +2641,7 @@ class ScopedTabletInfoCommitter {
   // Commit the transactions.
   ~ScopedTabletInfoCommitter() {
     if (PREDICT_TRUE(!aborted_)) {
-      BOOST_FOREACH(const scoped_refptr<TabletInfo>& tablet, *tablets_) {
+      for (const scoped_refptr<TabletInfo>& tablet : *tablets_) {
         tablet->mutable_metadata()->CommitMutation();
       }
     }
@@ -2660,7 +2659,7 @@ Status CatalogManager::ProcessPendingAssignments(
 
   // Take write locks on all tablets to be processed, and ensure that they are
   // unlocked at the end of this scope.
-  BOOST_FOREACH(const scoped_refptr<TabletInfo>& tablet, tablets) {
+  for (const scoped_refptr<TabletInfo>& tablet : tablets) {
     tablet->mutable_metadata()->StartMutation();
   }
   ScopedTabletInfoCommitter unlocker_in(&tablets);
@@ -2676,7 +2675,7 @@ Status CatalogManager::ProcessPendingAssignments(
   // Iterate over each of the tablets and handle it, whatever state
   // it may be in. The actions required for the tablet are collected
   // into 'deferred'.
-  BOOST_FOREACH(const scoped_refptr<TabletInfo>& tablet, tablets) {
+  for (const scoped_refptr<TabletInfo>& tablet : tablets) {
     SysTabletsEntryPB::State t_state = tablet->metadata().state().pb.state();
 
     switch (t_state) {
@@ -2707,7 +2706,7 @@ Status CatalogManager::ProcessPendingAssignments(
   master_->ts_manager()->GetAllLiveDescriptors(&ts_descs);
 
   Status s;
-  BOOST_FOREACH(TabletInfo *tablet, deferred.needs_create_rpc) {
+  for (TabletInfo *tablet : deferred.needs_create_rpc) {
     // NOTE: if we fail to select replicas on the first pass (due to
     // insufficient Tablet Servers being online), we will still try
     // again unless the tablet/table creation is cancelled.
@@ -2734,7 +2733,7 @@ Status CatalogManager::ProcessPendingAssignments(
     // If there was an error, abort any mutations started by the
     // current task.
     vector<string> tablet_ids_to_remove;
-    BOOST_FOREACH(scoped_refptr<TabletInfo>& new_tablet, new_tablets) {
+    for (scoped_refptr<TabletInfo>& new_tablet : new_tablets) {
       TableInfo* table = new_tablet->table().get();
       TableMetadataLock l_table(table, TableMetadataLock::WRITE);
       if (table->RemoveTablet(new_tablet->tablet_id())) {
@@ -2746,7 +2745,7 @@ Status CatalogManager::ProcessPendingAssignments(
     boost::lock_guard<LockType> l(lock_);
     unlocker_out.Abort();
     unlocker_in.Abort();
-    BOOST_FOREACH(const string& tablet_id_to_remove, tablet_ids_to_remove) {
+    for (const string& tablet_id_to_remove : tablet_ids_to_remove) {
       CHECK_EQ(tablet_map_.erase(tablet_id_to_remove), 1)
           << "Unable to erase " << tablet_id_to_remove << " from tablet map.";
     }
@@ -2794,11 +2793,11 @@ Status CatalogManager::SelectReplicasForTablet(const TSDescriptorVector& ts_desc
 }
 
 void CatalogManager::SendCreateTabletRequests(const vector<TabletInfo*>& tablets) {
-  BOOST_FOREACH(TabletInfo *tablet, tablets) {
+  for (TabletInfo *tablet : tablets) {
     const consensus::RaftConfigPB& config =
         tablet->metadata().dirty().pb.committed_consensus_state().config();
     tablet->set_last_update_time(MonoTime::Now(MonoTime::FINE));
-    BOOST_FOREACH(const RaftPeerPB& peer, config.peers()) {
+    for (const RaftPeerPB& peer : config.peers()) {
       AsyncCreateReplica* task = new AsyncCreateReplica(master_, worker_pool_.get(),
                                                         peer.permanent_uuid(), tablet);
       tablet->table()->AddTask(task);
@@ -2832,7 +2831,7 @@ void CatalogManager::SelectReplicas(const TSDescriptorVector& ts_descs,
     peer->set_permanent_uuid(ts->permanent_uuid());
 
     // TODO: This is temporary, we will use only UUIDs
-    BOOST_FOREACH(const HostPortPB& addr, reg.rpc_addresses()) {
+    for (const HostPortPB& addr : reg.rpc_addresses()) {
       peer->mutable_last_known_addr()->CopyFrom(addr);
     }
   }
@@ -2867,7 +2866,7 @@ Status CatalogManager::BuildLocationsForTablet(const scoped_refptr<TabletInfo>&
 
   // If the locations are cached.
   if (!locs.empty()) {
-    BOOST_FOREACH(const TabletInfo::ReplicaMap::value_type& replica, locs) {
+    for (const TabletInfo::ReplicaMap::value_type& replica : locs) {
       TabletLocationsPB_ReplicaPB* replica_pb = locs_pb->add_replicas();
       replica_pb->set_role(replica.second.role);
 
@@ -2883,7 +2882,7 @@ Status CatalogManager::BuildLocationsForTablet(const scoped_refptr<TabletInfo>&
   // If the locations were not cached.
   // TODO: Why would this ever happen? See KUDU-759.
   if (cstate.IsInitialized()) {
-    BOOST_FOREACH(const consensus::RaftPeerPB& peer, cstate.config().peers()) {
+    for (const consensus::RaftPeerPB& peer : cstate.config().peers()) {
       TabletLocationsPB_ReplicaPB* replica_pb = locs_pb->add_replicas();
       CHECK(peer.has_permanent_uuid()) << "Missing UUID: " << peer.ShortDebugString();
       replica_pb->set_role(GetConsensusRole(peer.permanent_uuid(), cstate));
@@ -2956,7 +2955,7 @@ Status CatalogManager::GetTableLocations(const GetTableLocationsRequestPB* req,
 
   TSRegistrationPB reg;
   vector<TabletReplica> locs;
-  BOOST_FOREACH(const scoped_refptr<TabletInfo>& tablet, tablets_in_range) {
+  for (const scoped_refptr<TabletInfo>& tablet : tablets_in_range) {
     if (!BuildLocationsForTablet(tablet, resp->add_tablet_locations()).ok()) {
       // Not running.
       resp->mutable_tablet_locations()->RemoveLast();
@@ -2979,7 +2978,7 @@ void CatalogManager::DumpState(std::ostream* out) const {
   }
 
   *out << "Tables:\n";
-  BOOST_FOREACH(const TableInfoMap::value_type& e, ids_copy) {
+  for (const TableInfoMap::value_type& e : ids_copy) {
     TableInfo* t = e.second.get();
     TableMetadataLock l(t, TableMetadataLock::READ);
     const string& name = l.data().name();
@@ -2998,7 +2997,7 @@ void CatalogManager::DumpState(std::ostream* out) const {
 
     vector<scoped_refptr<TabletInfo> > table_tablets;
     t->GetAllTablets(&table_tablets);
-    BOOST_FOREACH(const scoped_refptr<TabletInfo>& tablet, table_tablets) {
+    for (const scoped_refptr<TabletInfo>& tablet : table_tablets) {
       TabletMetadataLock l_tablet(tablet.get(), TabletMetadataLock::READ);
       *out << "    " << tablet->tablet_id() << ": "
            << l_tablet.data().pb.ShortDebugString() << "\n";
@@ -3011,7 +3010,7 @@ void CatalogManager::DumpState(std::ostream* out) const {
 
   if (!tablets_copy.empty()) {
     *out << "Orphaned tablets (not referenced by any table):\n";
-    BOOST_FOREACH(const TabletInfoMap::value_type& entry, tablets_copy) {
+    for (const TabletInfoMap::value_type& entry : tablets_copy) {
       const scoped_refptr<TabletInfo>& tablet = entry.second;
       TabletMetadataLock l_tablet(tablet.get(), TabletMetadataLock::READ);
       *out << "    " << tablet->tablet_id() << ": "
@@ -3021,7 +3020,7 @@ void CatalogManager::DumpState(std::ostream* out) const {
 
   if (!names_copy.empty()) {
     *out << "Orphaned tables (in by-name map, but not id map):\n";
-    BOOST_FOREACH(const TableInfoMap::value_type& e, names_copy) {
+    for (const TableInfoMap::value_type& e : names_copy) {
       *out << e.second->id() << ":\n";
       *out << "  name: \"" << CHexEscape(e.first) << "\"\n";
     }
@@ -3131,7 +3130,7 @@ void TableInfo::AddTablet(TabletInfo *tablet) {
 
 void TableInfo::AddTablets(const vector<TabletInfo*>& tablets) {
   boost::lock_guard<simple_spinlock> l(lock_);
-  BOOST_FOREACH(TabletInfo *tablet, tablets) {
+  for (TabletInfo *tablet : tablets) {
     AddTabletUnlocked(tablet);
   }
 }
@@ -3176,7 +3175,7 @@ void TableInfo::GetTabletsInRange(const GetTableLocationsRequestPB* req,
 
 bool TableInfo::IsAlterInProgress(uint32_t version) const {
   boost::lock_guard<simple_spinlock> l(lock_);
-  BOOST_FOREACH(const TableInfo::TabletInfoMap::value_type& e, tablet_map_) {
+  for (const TableInfo::TabletInfoMap::value_type& e : tablet_map_) {
     if (e.second->reported_schema_version() < version) {
       VLOG(3) << "Table " << table_id_ << " ALTER in progress due to tablet "
               << e.second->ToString() << " because reported schema "
@@ -3189,7 +3188,7 @@ bool TableInfo::IsAlterInProgress(uint32_t version) const {
 
 bool TableInfo::IsCreateInProgress() const {
   boost::lock_guard<simple_spinlock> l(lock_);
-  BOOST_FOREACH(const TableInfo::TabletInfoMap::value_type& e, tablet_map_) {
+  for (const TableInfo::TabletInfoMap::value_type& e : tablet_map_) {
     TabletMetadataLock tablet_lock(e.second, TabletMetadataLock::READ);
     if (!tablet_lock.data().is_running()) {
       return true;
@@ -3212,7 +3211,7 @@ void TableInfo::RemoveTask(MonitoredTask* task) {
 
 void TableInfo::AbortTasks() {
   boost::lock_guard<simple_spinlock> l(lock_);
-  BOOST_FOREACH(MonitoredTask* task, pending_tasks_) {
+  for (MonitoredTask* task : pending_tasks_) {
     task->Abort();
   }
 }
@@ -3233,7 +3232,7 @@ void TableInfo::WaitTasksCompletion() {
 
 void TableInfo::GetTaskList(std::vector<scoped_refptr<MonitoredTask> > *ret) {
   boost::lock_guard<simple_spinlock> l(lock_);
-  BOOST_FOREACH(MonitoredTask* task, pending_tasks_) {
+  for (MonitoredTask* task : pending_tasks_) {
     ret->push_back(make_scoped_refptr(task));
   }
 }
@@ -3241,7 +3240,7 @@ void TableInfo::GetTaskList(std::vector<scoped_refptr<MonitoredTask> > *ret) {
 void TableInfo::GetAllTablets(vector<scoped_refptr<TabletInfo> > *ret) const {
   ret->clear();
   boost::lock_guard<simple_spinlock> l(lock_);
-  BOOST_FOREACH(const TableInfo::TabletInfoMap::value_type& e, tablet_map_) {
+  for (const TableInfo::TabletInfoMap::value_type& e : tablet_map_) {
     ret->push_back(make_scoped_refptr(e.second));
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/master-path-handlers.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/master-path-handlers.cc b/src/kudu/master/master-path-handlers.cc
index e44f89d..e71c5ce 100644
--- a/src/kudu/master/master-path-handlers.cc
+++ b/src/kudu/master/master-path-handlers.cc
@@ -19,7 +19,6 @@
 
 #include <algorithm>
 #include <boost/bind.hpp>
-#include <boost/foreach.hpp>
 #include <map>
 #include <string>
 #include <vector>
@@ -64,7 +63,7 @@ void MasterPathHandlers::HandleTabletServers(const Webserver::WebRequest& req,
 
   *output << "<table class='table table-striped'>\n";
   *output << "  <tr><th>UUID</th><th>Time since heartbeat</th><th>Registration</th></tr>\n";
-  BOOST_FOREACH(const std::shared_ptr<TSDescriptor>& desc, descs) {
+  for (const std::shared_ptr<TSDescriptor>& desc : descs) {
     const string time_since_hb = StringPrintf("%.1fs", desc->TimeSinceHeartbeat().ToSeconds());
     TSRegistrationPB reg;
     desc->GetRegistration(&reg);
@@ -87,7 +86,7 @@ void MasterPathHandlers::HandleCatalogManager(const Webserver::WebRequest& req,
   *output << "  <tr><th>Table Name</th><th>Table Id</th><th>State</th></tr>\n";
   typedef std::map<string, string> StringMap;
   StringMap ordered_tables;
-  BOOST_FOREACH(const scoped_refptr<TableInfo>& table, tables) {
+  for (const scoped_refptr<TableInfo>& table : tables) {
     TableMetadataLock l(table.get(), TableMetadataLock::READ);
     if (!l.data().is_running()) {
       continue;
@@ -101,7 +100,7 @@ void MasterPathHandlers::HandleCatalogManager(const Webserver::WebRequest& req,
         state,
         EscapeForHtmlToString(l.data().pb.state_msg()));
   }
-  BOOST_FOREACH(const StringMap::value_type& table, ordered_tables) {
+  for (const StringMap::value_type& table : ordered_tables) {
     *output << table.second;
   }
   *output << "</table>\n";
@@ -167,7 +166,7 @@ void MasterPathHandlers::HandleTablePage(const Webserver::WebRequest& req,
   *output << "<table class='table table-striped'>\n";
   *output << "  <tr><th>Tablet ID</th><th>Partition</th><th>State</th>"
       "<th>Message</th><th>RaftConfig</th></tr>\n";
-  BOOST_FOREACH(const scoped_refptr<TabletInfo>& tablet, tablets) {
+  for (const scoped_refptr<TabletInfo>& tablet : tablets) {
     TabletInfo::ReplicaMap locations;
     tablet->GetReplicaLocations(&locations);
     vector<TabletReplica> sorted_locations;
@@ -196,7 +195,7 @@ void MasterPathHandlers::HandleTablePage(const Webserver::WebRequest& req,
   string master_addresses;
   if (master_->opts().IsDistributed()) {
     vector<string> all_addresses;
-    BOOST_FOREACH(const HostPort& hp, master_->opts().master_addresses) {
+    for (const HostPort& hp : master_->opts().master_addresses) {
       master_addresses.append(hp.ToString());
     }
     master_addresses = JoinElements(all_addresses, ",");
@@ -232,7 +231,7 @@ void MasterPathHandlers::HandleMasters(const Webserver::WebRequest& req,
   *output <<  "<table class='table table-striped'>\n";
   *output <<  "  <tr><th>Registration</th><th>Role</th></tr>\n";
 
-  BOOST_FOREACH(const ServerEntryPB& master, masters) {
+  for (const ServerEntryPB& master : masters) {
     if (master.has_error()) {
       Status error = StatusFromPB(master.error());
       *output << Substitute("  <tr><td colspan=2><font color='red'><b>$0</b></font></td></tr>\n",
@@ -316,7 +315,7 @@ class JsonDumper : public TableVisitor, public TabletVisitor {
       const consensus::ConsensusStatePB& cs = metadata.committed_consensus_state();
       jw_->String("replicas");
       jw_->StartArray();
-      BOOST_FOREACH(const RaftPeerPB& peer, cs.config().peers()) {
+      for (const RaftPeerPB& peer : cs.config().peers()) {
         jw_->StartObject();
         jw_->String("type");
         jw_->String(RaftPeerPB::MemberType_Name(peer.member_type()));
@@ -410,7 +409,7 @@ string MasterPathHandlers::RaftConfigToHtml(const std::vector<TabletReplica>& lo
   stringstream html;
 
   html << "<ul>\n";
-  BOOST_FOREACH(const TabletReplica& location, locations) {
+  for (const TabletReplica& location : locations) {
     string location_html = TSDescriptorToHtml(*location.ts_desc, tablet_id);
     if (location.role == RaftPeerPB::LEADER) {
       html << Substitute("  <li><b>LEADER: $0</b></li>\n", location_html);

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/master-test-util.h
----------------------------------------------------------------------
diff --git a/src/kudu/master/master-test-util.h b/src/kudu/master/master-test-util.h
index 044df60..d74bbb9 100644
--- a/src/kudu/master/master-test-util.h
+++ b/src/kudu/master/master-test-util.h
@@ -49,7 +49,7 @@ Status WaitForRunningTabletCount(MiniMaster* mini_master,
     RETURN_NOT_OK(mini_master->master()->catalog_manager()->GetTableLocations(&req, resp));
     if (resp->tablet_locations_size() >= expected_count) {
       bool is_stale = false;
-      BOOST_FOREACH(const TabletLocationsPB& loc, resp->tablet_locations()) {
+      for (const TabletLocationsPB& loc : resp->tablet_locations()) {
         is_stale |= loc.stale();
       }
 

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/master-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/master-test.cc b/src/kudu/master/master-test.cc
index 334e4fd..9abf32f 100644
--- a/src/kudu/master/master-test.cc
+++ b/src/kudu/master/master-test.cc
@@ -15,7 +15,6 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/foreach.hpp>
 #include <gtest/gtest.h>
 
 #include <algorithm>
@@ -234,7 +233,7 @@ Status MasterTest::CreateTable(const string& table_name,
   req.set_name(table_name);
   RETURN_NOT_OK(SchemaToPB(schema, req.mutable_schema()));
   RowOperationsPBEncoder encoder(req.mutable_split_rows());
-  BOOST_FOREACH(const KuduPartialRow& row, split_rows) {
+  for (const KuduPartialRow& row : split_rows) {
     encoder.Add(RowOperationsPB::SPLIT_ROW, row);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/master.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/master.cc b/src/kudu/master/master.cc
index 45e553f..6efa1ff 100644
--- a/src/kudu/master/master.cc
+++ b/src/kudu/master/master.cc
@@ -19,7 +19,6 @@
 
 #include <algorithm>
 #include <boost/bind.hpp>
-#include <boost/foreach.hpp>
 #include <glog/logging.h>
 #include <list>
 #include <memory>
@@ -226,7 +225,7 @@ Status Master::ListMasters(std::vector<ServerEntryPB>* masters) const {
     return Status::OK();
   }
 
-  BOOST_FOREACH(const HostPort& peer_addr, opts_.master_addresses) {
+  for (const HostPort& peer_addr : opts_.master_addresses) {
     ServerEntryPB peer_entry;
     Status s = GetMasterEntryForHost(messenger_, peer_addr, &peer_entry);
     if (!s.ok()) {

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/master_rpc.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/master_rpc.cc b/src/kudu/master/master_rpc.cc
index 0b41dd2..8e71608 100644
--- a/src/kudu/master/master_rpc.cc
+++ b/src/kudu/master/master_rpc.cc
@@ -20,7 +20,6 @@
 #include "kudu/master/master_rpc.h"
 
 #include <boost/bind.hpp>
-#include <boost/foreach.hpp>
 
 #include "kudu/common/wire_protocol.h"
 #include "kudu/common/wire_protocol.pb.h"
@@ -128,7 +127,7 @@ GetLeaderMasterRpc::~GetLeaderMasterRpc() {
 
 string GetLeaderMasterRpc::ToString() const {
   vector<string> sockaddr_str;
-  BOOST_FOREACH(const Sockaddr& addr, addrs_) {
+  for (const Sockaddr& addr : addrs_) {
     sockaddr_str.push_back(addr.ToString());
   }
   return strings::Substitute("GetLeaderMasterRpc(addrs: $0, num_attempts: $1)",

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/master_service.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/master_service.cc b/src/kudu/master/master_service.cc
index 127641c..329a786 100644
--- a/src/kudu/master/master_service.cc
+++ b/src/kudu/master/master_service.cc
@@ -17,7 +17,6 @@
 
 #include "kudu/master/master_service.h"
 
-#include <boost/foreach.hpp>
 #include <gflags/gflags.h>
 #include <memory>
 #include <string>
@@ -217,7 +216,7 @@ void MasterServiceImpl::GetTabletLocations(const GetTabletLocationsRequestPB* re
 
   TSRegistrationPB reg;
   vector<TSDescriptor*> locs;
-  BOOST_FOREACH(const string& tablet_id, req->tablet_ids()) {
+  for (const string& tablet_id : req->tablet_ids()) {
     // TODO: once we have catalog data. ACL checks would also go here, probably.
     TabletLocationsPB* locs_pb = resp->add_tablet_locations();
     Status s = server_->catalog_manager()->GetTabletLocations(tablet_id, locs_pb);
@@ -340,7 +339,7 @@ void MasterServiceImpl::ListTabletServers(const ListTabletServersRequestPB* req,
 
   vector<std::shared_ptr<TSDescriptor> > descs;
   server_->ts_manager()->GetAllDescriptors(&descs);
-  BOOST_FOREACH(const std::shared_ptr<TSDescriptor>& desc, descs) {
+  for (const std::shared_ptr<TSDescriptor>& desc : descs) {
     ListTabletServersResponsePB::Entry* entry = resp->add_servers();
     desc->GetNodeInstancePB(entry->mutable_instance_id());
     desc->GetRegistration(entry->mutable_registration());
@@ -358,7 +357,7 @@ void MasterServiceImpl::ListMasters(const ListMastersRequestPB* req,
     StatusToPB(s, resp->mutable_error());
     resp->mutable_error()->set_code(AppStatusPB::UNKNOWN_ERROR);
   } else {
-    BOOST_FOREACH(const ServerEntryPB& master, masters) {
+    for (const ServerEntryPB& master : masters) {
       resp->add_masters()->CopyFrom(master);
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/mini_master.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/mini_master.cc b/src/kudu/master/mini_master.cc
index f8f5630..7881135 100644
--- a/src/kudu/master/mini_master.cc
+++ b/src/kudu/master/mini_master.cc
@@ -101,7 +101,7 @@ Status MiniMaster::StartDistributedMasterOnPorts(uint16_t rpc_port, uint16_t web
   MasterOptions opts;
 
   vector<HostPort> peer_addresses;
-  BOOST_FOREACH(uint16_t peer_port, peer_ports) {
+  for (uint16_t peer_port : peer_ports) {
     HostPort peer_address("127.0.0.1", peer_port);
     peer_addresses.push_back(peer_address);
   }

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/sys_catalog-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/sys_catalog-test.cc b/src/kudu/master/sys_catalog-test.cc
index 51eb737..165a328 100644
--- a/src/kudu/master/sys_catalog-test.cc
+++ b/src/kudu/master/sys_catalog-test.cc
@@ -15,7 +15,6 @@
 // specific language governing permissions and limitations
 // under the License.
 
-#include <boost/foreach.hpp>
 #include <gtest/gtest.h>
 
 #include <algorithm>
@@ -78,7 +77,7 @@ class TableLoader : public TableVisitor {
   ~TableLoader() { Reset(); }
 
   void Reset() {
-    BOOST_FOREACH(TableInfo* ti, tables) {
+    for (TableInfo* ti : tables) {
       ti->Release();
     }
     tables.clear();
@@ -201,7 +200,7 @@ class TabletLoader : public TabletVisitor {
   ~TabletLoader() { Reset(); }
 
   void Reset() {
-    BOOST_FOREACH(TabletInfo* ti, tablets) {
+    for (TabletInfo* ti : tablets) {
       ti->Release();
     }
     tablets.clear();

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/sys_catalog.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/sys_catalog.cc b/src/kudu/master/sys_catalog.cc
index 6db9c67..c8b6d55 100644
--- a/src/kudu/master/sys_catalog.cc
+++ b/src/kudu/master/sys_catalog.cc
@@ -175,7 +175,7 @@ Status SysCatalogTable::SetupDistributedConfig(const MasterOptions& options,
   new_config.set_opid_index(consensus::kInvalidOpIdIndex);
 
   // Build the set of followers from our server options.
-  BOOST_FOREACH(const HostPort& host_port, options.master_addresses) {
+  for (const HostPort& host_port : options.master_addresses) {
     RaftPeerPB peer;
     HostPortPB peer_host_port_pb;
     RETURN_NOT_OK(HostPortToPB(host_port, &peer_host_port_pb));
@@ -190,7 +190,7 @@ Status SysCatalogTable::SetupDistributedConfig(const MasterOptions& options,
   DCHECK(master_->messenger());
   RaftConfigPB resolved_config = new_config;
   resolved_config.clear_peers();
-  BOOST_FOREACH(const RaftPeerPB& peer, new_config.peers()) {
+  for (const RaftPeerPB& peer : new_config.peers()) {
     if (peer.has_permanent_uuid()) {
       resolved_config.add_peers()->CopyFrom(peer);
     } else {
@@ -326,7 +326,7 @@ Status SysCatalogTable::SyncWrite(const WriteRequestPB *req, WriteResponsePB *re
     return StatusFromPB(resp->error().status());
   }
   if (resp->per_row_errors_size() > 0) {
-    BOOST_FOREACH(const WriteResponsePB::PerRowErrorPB& error, resp->per_row_errors()) {
+    for (const WriteResponsePB::PerRowErrorPB& error : resp->per_row_errors()) {
       LOG(WARNING) << "row " << error.row_index() << ": " << StatusFromPB(error.error()).ToString();
     }
     return Status::Corruption("One or more rows failed to write");
@@ -484,7 +484,7 @@ Status SysCatalogTable::AddTabletsToPB(const vector<TabletInfo*>& tablets,
   faststring metadata_buf;
   KuduPartialRow row(&schema_);
   RowOperationsPBEncoder enc(ops);
-  BOOST_FOREACH(const TabletInfo *tablet, tablets) {
+  for (const TabletInfo *tablet : tablets) {
     if (!pb_util::SerializeToString(tablet->metadata().dirty().pb, &metadata_buf)) {
       return Status::Corruption("Unable to serialize SysCatalogTabletsEntryPB for tablet",
                                 tablet->tablet_id());
@@ -545,7 +545,7 @@ Status SysCatalogTable::DeleteTablets(const vector<TabletInfo*>& tablets) {
 
   RowOperationsPBEncoder enc(req.mutable_row_operations());
   KuduPartialRow row(&schema_);
-  BOOST_FOREACH(const TabletInfo* tablet, tablets) {
+  for (const TabletInfo* tablet : tablets) {
     CHECK_OK(row.SetInt8(kSysCatalogTableColType, TABLETS_ENTRY));
     CHECK_OK(row.SetString(kSysCatalogTableColId, tablet->tablet_id()));
     enc.Add(RowOperationsPB::DELETE, row);

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/ts_descriptor.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/ts_descriptor.cc b/src/kudu/master/ts_descriptor.cc
index ba8a274..a4d315e 100644
--- a/src/kudu/master/ts_descriptor.cc
+++ b/src/kudu/master/ts_descriptor.cc
@@ -23,7 +23,6 @@
 #include "kudu/tserver/tserver_admin.proxy.h"
 #include "kudu/util/net/net_util.h"
 
-#include <boost/foreach.hpp>
 #include <boost/thread/locks.hpp>
 #include <boost/thread/mutex.hpp>
 
@@ -124,7 +123,7 @@ Status TSDescriptor::ResolveSockaddr(Sockaddr* addr) const {
   vector<HostPort> hostports;
   {
     boost::lock_guard<simple_spinlock> l(lock_);
-    BOOST_FOREACH(const HostPortPB& addr, registration_->rpc_addresses()) {
+    for (const HostPortPB& addr : registration_->rpc_addresses()) {
       hostports.push_back(HostPort(addr.host(), addr.port()));
     }
   }
@@ -132,7 +131,7 @@ Status TSDescriptor::ResolveSockaddr(Sockaddr* addr) const {
   // Resolve DNS outside the lock.
   HostPort last_hostport;
   vector<Sockaddr> addrs;
-  BOOST_FOREACH(const HostPort& hostport, hostports) {
+  for (const HostPort& hostport : hostports) {
     RETURN_NOT_OK(hostport.ResolveAddresses(&addrs));
     if (!addrs.empty()) {
       last_hostport = hostport;

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/master/ts_manager.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/ts_manager.cc b/src/kudu/master/ts_manager.cc
index 150cae2..1b5e140 100644
--- a/src/kudu/master/ts_manager.cc
+++ b/src/kudu/master/ts_manager.cc
@@ -17,7 +17,6 @@
 
 #include "kudu/master/ts_manager.h"
 
-#include <boost/foreach.hpp>
 #include <boost/thread/locks.hpp>
 #include <boost/thread/mutex.hpp>
 #include <vector>
@@ -103,7 +102,7 @@ void TSManager::GetAllLiveDescriptors(vector<shared_ptr<TSDescriptor> > *descs)
 
   boost::shared_lock<rw_spinlock> l(lock_);
   descs->reserve(servers_by_id_.size());
-  BOOST_FOREACH(const TSDescriptorMap::value_type& entry, servers_by_id_) {
+  for (const TSDescriptorMap::value_type& entry : servers_by_id_) {
     const shared_ptr<TSDescriptor>& ts = entry.second;
     if (ts->TimeSinceHeartbeat().ToMilliseconds() < FLAGS_tserver_unresponsive_timeout_ms) {
       descs->push_back(ts);

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/rpc/acceptor_pool.cc
----------------------------------------------------------------------
diff --git a/src/kudu/rpc/acceptor_pool.cc b/src/kudu/rpc/acceptor_pool.cc
index a144152..6ceb7fe 100644
--- a/src/kudu/rpc/acceptor_pool.cc
+++ b/src/kudu/rpc/acceptor_pool.cc
@@ -17,7 +17,6 @@
 
 #include "kudu/rpc/acceptor_pool.h"
 
-#include <boost/foreach.hpp>
 #include <gflags/gflags.h>
 #include <glog/logging.h>
 #include <inttypes.h>
@@ -104,12 +103,12 @@ void AcceptorPool::Shutdown() {
   // Calling shutdown on an accepting (non-connected) socket is illegal on most
   // platforms (but not Linux). Instead, the accepting threads are interrupted
   // forcefully.
-  BOOST_FOREACH(const scoped_refptr<kudu::Thread>& thread, threads_) {
+  for (const scoped_refptr<kudu::Thread>& thread : threads_) {
     pthread_cancel(thread.get()->pthread_id());
   }
 #endif
 
-  BOOST_FOREACH(const scoped_refptr<kudu::Thread>& thread, threads_) {
+  for (const scoped_refptr<kudu::Thread>& thread : threads_) {
     CHECK_OK(ThreadJoiner(thread.get()).Join());
   }
   threads_.clear();

http://git-wip-us.apache.org/repos/asf/incubator-kudu/blob/9daafa5e/src/kudu/rpc/connection.cc
----------------------------------------------------------------------
diff --git a/src/kudu/rpc/connection.cc b/src/kudu/rpc/connection.cc
index 738f501..6478029 100644
--- a/src/kudu/rpc/connection.cc
+++ b/src/kudu/rpc/connection.cc
@@ -17,7 +17,6 @@
 
 #include "kudu/rpc/connection.h"
 
-#include <boost/foreach.hpp>
 #include <boost/intrusive/list.hpp>
 #include <gflags/gflags.h>
 #include <glog/logging.h>
@@ -142,7 +141,7 @@ void Connection::Shutdown(const Status &status) {
   }
 
   // Clear any calls which have been sent and were awaiting a response.
-  BOOST_FOREACH(const car_map_t::value_type &v, awaiting_response_) {
+  for (const car_map_t::value_type &v : awaiting_response_) {
     CallAwaitingResponse *c = v.second;
     if (c->call) {
       c->call->SetFailed(status);
@@ -602,14 +601,14 @@ Status Connection::DumpPB(const DumpRunningRpcsRequestPB& req,
   }
 
   if (direction_ == CLIENT) {
-    BOOST_FOREACH(const car_map_t::value_type& entry, awaiting_response_) {
+    for (const car_map_t::value_type& entry : awaiting_response_) {
       CallAwaitingResponse *c = entry.second;
       if (c->call) {
         c->call->DumpPB(req, resp->add_calls_in_flight());
       }
     }
   } else if (direction_ == SERVER) {
-    BOOST_FOREACH(const inbound_call_map_t::value_type& entry, calls_being_handled_) {
+    for (const inbound_call_map_t::value_type& entry : calls_being_handled_) {
       InboundCall* c = entry.second;
       c->DumpPB(req, resp->add_calls_in_flight());
     }



Mime
View raw message