Repository: hbase
Updated Branches:
refs/heads/master f08880573 -> ba5fb53d1
HBASE-20065 Revisit the timestamp usage in MetaTableAccessor
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ba5fb53d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ba5fb53d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ba5fb53d
Branch: refs/heads/master
Commit: ba5fb53d147ef63415027f776928c478af37f515
Parents: f088805
Author: zhangduo <zhangduo@apache.org>
Authored: Sun Feb 25 09:13:34 2018 +0800
Committer: zhangduo <zhangduo@apache.org>
Committed: Sun Feb 25 09:13:56 2018 +0800
----------------------------------------------------------------------
.../apache/hadoop/hbase/MetaTableAccessor.java | 453 ++++++++-----------
.../favored/FavoredNodeAssignmentHelper.java | 18 +-
.../master/assignment/AssignmentManager.java | 4 -
.../master/assignment/RegionStateStore.java | 54 +--
.../hadoop/hbase/regionserver/HRegion.java | 6 +-
.../org/apache/hadoop/hbase/util/HBaseFsck.java | 13 +-
.../hadoop/hbase/util/HBaseFsckRepair.java | 4 +-
.../hadoop/hbase/TestMetaTableAccessor.java | 54 +--
.../master/assignment/MockMasterServices.java | 8 -
.../regionserver/TestGetClosestAtOrBefore.java | 109 ++---
10 files changed, 301 insertions(+), 422 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5fb53d/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 7d00f92..803f183 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -75,58 +75,65 @@ import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
/**
- * Read/write operations on region and assignment information store in
- * <code>hbase:meta</code>.
+ * <p>
+ * Read/write operations on region and assignment information store in <code>hbase:meta</code>.
+ * </p>
+ * <p>
+ * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason for this is
+ * because when used on client-side (like from HBaseAdmin), we want to use short-living connection
+ * (opened before each operation, closed right after), while when used on HM or HRS (like in
+ * AssignmentManager) we want permanent connection.
+ * </p>
+ * <p>
+ * HBASE-10070 adds a replicaId to HRI, meaning more than one HRI can be defined for the same table
+ * range (table, startKey, endKey). For every range, there will be at least one HRI defined which is
+ * called default replica.
+ * </p>
+ * <p>
+ * Meta layout (as of 0.98 + HBASE-10070) is like:
*
- * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason
- * for this is because when used on client-side (like from HBaseAdmin), we want to use
- * short-living connection (opened before each operation, closed right after), while
- * when used on HM or HRS (like in AssignmentManager) we want permanent connection.
+ * <pre>
+ * For each table there is single row in column family 'table' formatted:
+ * <tableName> including namespace and columns are:
+ * table: state => contains table state
+ *
+ * For each table range, there is a single row, formatted like:
+ * <tableName>,<startKey>,<regionId>,<encodedRegionName>.
+ * This row corresponds to the regionName of the default region replica.
+ * Columns are:
+ * info:regioninfo => contains serialized HRI for the default region replica
+ * info:server => contains hostname:port (in string form) for the server hosting
+ * the default regionInfo replica
+ * info:server_<replicaId> => contains hostname:port (in string form) for the server hosting
+ * the regionInfo replica with replicaId
+ * info:serverstartcode => contains server start code (in binary long form) for the server
+ * hosting the default regionInfo replica
+ * info:serverstartcode_<replicaId> => contains server start code (in binary long form) for
+ * the server hosting the regionInfo replica with
+ * replicaId
+ * info:seqnumDuringOpen => contains seqNum (in binary long form) for the region at the time
+ * the server opened the region with default replicaId
+ * info:seqnumDuringOpen_<replicaId> => contains seqNum (in binary long form) for the region
+ * at the time the server opened the region with
+ * replicaId
+ * info:splitA => contains a serialized HRI for the first daughter region if the
+ * region is split
+ * info:splitB => contains a serialized HRI for the second daughter region if the
+ * region is split
+ * info:mergeA => contains a serialized HRI for the first parent region if the
+ * region is the result of a merge
+ * info:mergeB => contains a serialized HRI for the second parent region if the
+ * region is the result of a merge
+ * </pre>
+ * </p>
+ * <p>
+ * The actual layout of meta should be encapsulated inside MetaTableAccessor methods, and should not
+ * leak out of it (through Result objects, etc)
+ * </p>
*/
@InterfaceAudience.Private
public class MetaTableAccessor {
- /*
- * HBASE-10070 adds a replicaId to HRI, meaning more than one HRI can be defined for the
- * same table range (table, startKey, endKey). For every range, there will be at least one
- * HRI defined which is called default replica.
- *
- * Meta layout (as of 0.98 + HBASE-10070) is like:
- *
- * For each table there is single row in column family 'table' formatted:
- * <tableName> including namespace and columns are:
- * table: state => contains table state
- *
- * For each table range, there is a single row, formatted like:
- * <tableName>,<startKey>,<regionId>,<encodedRegionName>. This row corresponds to the regionName
- * of the default region replica.
- * Columns are:
- * info:regioninfo => contains serialized HRI for the default region replica
- * info:server => contains hostname:port (in string form) for the server hosting
- * the default regionInfo replica
- * info:server_<replicaId> => contains hostname:port (in string form) for the server hosting the
- * regionInfo replica with replicaId
- * info:serverstartcode => contains server start code (in binary long form) for the server
- * hosting the default regionInfo replica
- * info:serverstartcode_<replicaId> => contains server start code (in binary long form) for the
- * server hosting the regionInfo replica with replicaId
- * info:seqnumDuringOpen => contains seqNum (in binary long form) for the region at the time
- * the server opened the region with default replicaId
- * info:seqnumDuringOpen_<replicaId> => contains seqNum (in binary long form) for the region at
- * the time the server opened the region with replicaId
- * info:splitA => contains a serialized HRI for the first daughter region if the
- * region is split
- * info:splitB => contains a serialized HRI for the second daughter region if the
- * region is split
- * info:mergeA => contains a serialized HRI for the first parent region if the
- * region is the result of a merge
- * info:mergeB => contains a serialized HRI for the second parent region if the
- * region is the result of a merge
- *
- * The actual layout of meta should be encapsulated inside MetaTableAccessor methods,
- * and should not leak out of it (through Result objects, etc)
- */
-
private static final Logger LOG = LoggerFactory.getLogger(MetaTableAccessor.class);
private static final Logger METALOG = LoggerFactory.getLogger("org.apache.hadoop.hbase.META");
@@ -146,7 +153,6 @@ public class MetaTableAccessor {
* @param connection what we will use
* @param tableName table to list
* @return Map of all user-space regions to servers
- * @throws java.io.IOException
* @deprecated use {@link #getTableRegionsAndLocations}, region can have multiple locations
*/
@Deprecated
@@ -203,7 +209,6 @@ public class MetaTableAccessor {
* Performs a full scan of <code>hbase:meta</code> for regions.
* @param connection connection we're using
* @param visitor Visitor invoked against each row in regions family.
- * @throws IOException
*/
public static void fullScanRegions(Connection connection,
final Visitor visitor)
@@ -214,7 +219,6 @@ public class MetaTableAccessor {
/**
* Performs a full scan of <code>hbase:meta</code> for regions.
* @param connection connection we're using
- * @throws IOException
*/
public static List<Result> fullScanRegions(Connection connection)
throws IOException {
@@ -225,7 +229,6 @@ public class MetaTableAccessor {
* Performs a full scan of <code>hbase:meta</code> for tables.
* @param connection connection we're using
* @param visitor Visitor invoked against each row in tables family.
- * @throws IOException
*/
public static void fullScanTables(Connection connection,
final Visitor visitor)
@@ -238,7 +241,6 @@ public class MetaTableAccessor {
* @param connection connection we're using
* @param type scanned part of meta
* @return List of {@link Result}
- * @throws IOException
*/
public static List<Result> fullScan(Connection connection, QueryType type)
throws IOException {
@@ -251,7 +253,6 @@ public class MetaTableAccessor {
* Callers should call close on the returned {@link Table} instance.
* @param connection connection we're using to access Meta
* @return An {@link Table} for <code>hbase:meta</code>
- * @throws IOException
*/
public static Table getMetaHTable(final Connection connection)
throws IOException {
@@ -267,7 +268,6 @@ public class MetaTableAccessor {
/**
* @param t Table to use (will be closed when done).
* @param g Get to run
- * @throws IOException
*/
private static Result get(final Table t, final Get g) throws IOException {
if (t == null) return null;
@@ -283,7 +283,6 @@ public class MetaTableAccessor {
* @param connection connection we're using
* @param regionName Region to lookup.
* @return Location and RegionInfo for <code>regionName</code>
- * @throws IOException
* @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
*/
@Deprecated
@@ -300,10 +299,9 @@ public class MetaTableAccessor {
* @param connection connection we're using
* @param regionName region we're looking for
* @return HRegionLocation for the given region
- * @throws IOException
*/
- public static HRegionLocation getRegionLocation(Connection connection,
- byte[] regionName) throws IOException {
+ public static HRegionLocation getRegionLocation(Connection connection, byte[] regionName)
+ throws IOException {
byte[] row = regionName;
RegionInfo parsedInfo = null;
try {
@@ -316,8 +314,7 @@ public class MetaTableAccessor {
get.addFamily(HConstants.CATALOG_FAMILY);
Result r = get(getMetaHTable(connection), get);
RegionLocations locations = getRegionLocations(r);
- return locations == null
- ? null
+ return locations == null ? null
: locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId());
}
@@ -326,10 +323,9 @@ public class MetaTableAccessor {
* @param connection connection we're using
* @param regionInfo region information
* @return HRegionLocation for the given region
- * @throws IOException
*/
- public static HRegionLocation getRegionLocation(Connection connection,
- RegionInfo regionInfo) throws IOException {
+ public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo)
+ throws IOException {
byte[] row = getMetaKeyForRegion(regionInfo);
Get get = new Get(row);
get.addFamily(HConstants.CATALOG_FAMILY);
@@ -364,7 +360,6 @@ public class MetaTableAccessor {
* @param connection connection we're using
* @param regionName region we're looking for
* @return result of the specified region
- * @throws IOException
*/
public static Result getRegionResult(Connection connection,
byte[] regionName) throws IOException {
@@ -376,7 +371,6 @@ public class MetaTableAccessor {
/**
* Get regions from the merge qualifier of the specified merged region
* @return null if it doesn't contain merge qualifier, else two merge regions
- * @throws IOException
*/
@Nullable
public static Pair<RegionInfo, RegionInfo> getRegionsFromMergeQualifier(
@@ -396,7 +390,6 @@ public class MetaTableAccessor {
* @param connection connection we're using
* @param tableName table to check
* @return true if the table exists in meta, false if not
- * @throws IOException
*/
public static boolean tableExists(Connection connection,
final TableName tableName)
@@ -413,7 +406,6 @@ public class MetaTableAccessor {
* @param excludeOfflinedSplitParents False if we are to include offlined/splitparents regions,
* true and we'll leave out offlined regions from returned list
* @return List of all user-space regions.
- * @throws IOException
*/
@VisibleForTesting
public static List<RegionInfo> getAllRegions(Connection connection,
@@ -434,7 +426,6 @@ public class MetaTableAccessor {
* @param connection connection we're using
* @param tableName table we're looking for
* @return Ordered list of {@link RegionInfo}.
- * @throws IOException
*/
public static List<RegionInfo> getTableRegions(Connection connection, TableName tableName)
throws IOException {
@@ -449,36 +440,27 @@ public class MetaTableAccessor {
* @param excludeOfflinedSplitParents If true, do not include offlined split
* parents in the return.
* @return Ordered list of {@link RegionInfo}.
- * @throws IOException
*/
- public static List<RegionInfo> getTableRegions(Connection connection,
- TableName tableName, final boolean excludeOfflinedSplitParents)
- throws IOException {
+ public static List<RegionInfo> getTableRegions(Connection connection, TableName tableName,
+ final boolean excludeOfflinedSplitParents) throws IOException {
List<Pair<RegionInfo, ServerName>> result =
- getTableRegionsAndLocations(connection, tableName, excludeOfflinedSplitParents);
+ getTableRegionsAndLocations(connection, tableName, excludeOfflinedSplitParents);
return getListOfRegionInfos(result);
}
- static List<RegionInfo> getListOfRegionInfos(final List<Pair<RegionInfo, ServerName>> pairs) {
- if (pairs == null || pairs.isEmpty()) return Collections.EMPTY_LIST;
+ private static List<RegionInfo> getListOfRegionInfos(
+ final List<Pair<RegionInfo, ServerName>> pairs) {
+ if (pairs == null || pairs.isEmpty()) {
+ return Collections.emptyList();
+ }
List<RegionInfo> result = new ArrayList<>(pairs.size());
- for (Pair<RegionInfo, ServerName> pair: pairs) {
+ for (Pair<RegionInfo, ServerName> pair : pairs) {
result.add(pair.getFirst());
}
return result;
}
/**
- * @param current region of current table we're working with
- * @param tableName table we're checking against
- * @return True if <code>current</code> tablename is equal to
- * <code>tableName</code>
- */
- static boolean isInsideTable(final RegionInfo current, final TableName tableName) {
- return tableName.equals(current.getTable());
- }
-
- /**
* @param tableName table we're working with
* @return start row for scanning META according to query type
*/
@@ -602,11 +584,11 @@ public class MetaTableAccessor {
@Override
public boolean visit(Result r) throws IOException {
current = getRegionLocations(r);
- if (current == null || current.getRegionLocation().getRegionInfo() == null) {
+ if (current == null || current.getRegionLocation().getRegion() == null) {
LOG.warn("No serialized RegionInfo in " + r);
return true;
}
- RegionInfo hri = current.getRegionLocation().getRegionInfo();
+ RegionInfo hri = current.getRegionLocation().getRegion();
if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
// Else call super and add this Result to the collection.
return super.visit(r);
@@ -619,7 +601,7 @@ public class MetaTableAccessor {
}
for (HRegionLocation loc : current.getRegionLocations()) {
if (loc != null) {
- this.results.add(new Pair<>(loc.getRegionInfo(), loc.getServerName()));
+ this.results.add(new Pair<>(loc.getRegion(), loc.getServerName()));
}
}
}
@@ -653,7 +635,7 @@ public class MetaTableAccessor {
for (HRegionLocation loc : locations.getRegionLocations()) {
if (loc != null) {
if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
- hris.put(loc.getRegionInfo(), r);
+ hris.put(loc.getRegion(), r);
}
}
}
@@ -678,7 +660,7 @@ public class MetaTableAccessor {
if (locations == null) return true;
for (HRegionLocation loc : locations.getRegionLocations()) {
if (loc != null) {
- LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo());
+ LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegion());
}
}
}
@@ -751,18 +733,21 @@ public class MetaTableAccessor {
* @param visitor Visitor invoked against each row.
* @throws IOException
*/
- public static void scanMeta(Connection connection,
- @Nullable final byte[] startRow, @Nullable final byte[] stopRow,
- QueryType type, int maxRows, final Visitor visitor)
- throws IOException {
+ public static void scanMeta(Connection connection, @Nullable final byte[] startRow,
+ @Nullable final byte[] stopRow, QueryType type, int maxRows, final Visitor visitor)
+ throws IOException {
int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE;
Scan scan = getMetaScan(connection, rowUpperLimit);
for (byte[] family : type.getFamilies()) {
scan.addFamily(family);
}
- if (startRow != null) scan.setStartRow(startRow);
- if (stopRow != null) scan.setStopRow(stopRow);
+ if (startRow != null) {
+ scan.withStartRow(startRow);
+ }
+ if (stopRow != null) {
+ scan.withStopRow(stopRow);
+ }
if (LOG.isTraceEnabled()) {
LOG.trace("Scanning META"
@@ -796,17 +781,14 @@ public class MetaTableAccessor {
/**
* @return Get closest metatable region row to passed <code>row</code>
- * @throws java.io.IOException
*/
@NonNull
- public static RegionInfo getClosestRegionInfo(Connection connection,
- @NonNull final TableName tableName,
- @NonNull final byte[] row)
- throws IOException {
+ private static RegionInfo getClosestRegionInfo(Connection connection,
+ @NonNull final TableName tableName, @NonNull final byte[] row) throws IOException {
byte[] searchRow = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
Scan scan = getMetaScan(connection, 1);
scan.setReversed(true);
- scan.setStartRow(searchRow);
+ scan.withStartRow(searchRow);
try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) {
Result result = resultScanner.next();
if (result == null) {
@@ -826,7 +808,7 @@ public class MetaTableAccessor {
* Returns the column family used for meta columns.
* @return HConstants.CATALOG_FAMILY.
*/
- protected static byte[] getCatalogFamily() {
+ private static byte[] getCatalogFamily() {
return HConstants.CATALOG_FAMILY;
}
@@ -834,7 +816,7 @@ public class MetaTableAccessor {
* Returns the column family used for table columns.
* @return HConstants.TABLE_FAMILY.
*/
- protected static byte[] getTableFamily() {
+ private static byte[] getTableFamily() {
return HConstants.TABLE_FAMILY;
}
@@ -842,25 +824,23 @@ public class MetaTableAccessor {
* Returns the column qualifier for serialized region info
* @return HConstants.REGIONINFO_QUALIFIER
*/
- protected static byte[] getRegionInfoColumn() {
+ private static byte[] getRegionInfoColumn() {
return HConstants.REGIONINFO_QUALIFIER;
}
/**
* Returns the column qualifier for serialized table state
- *
* @return HConstants.TABLE_STATE_QUALIFIER
*/
- protected static byte[] getTableStateColumn() {
+ private static byte[] getTableStateColumn() {
return HConstants.TABLE_STATE_QUALIFIER;
}
/**
* Returns the column qualifier for serialized region state
- *
* @return HConstants.TABLE_STATE_QUALIFIER
*/
- protected static byte[] getRegionStateColumn() {
+ private static byte[] getRegionStateColumn() {
return HConstants.STATE_QUALIFIER;
}
@@ -1138,21 +1118,20 @@ public class MetaTableAccessor {
* Should contain cell from HConstants.TABLE_FAMILY
* @param r result
* @return null if not found
- * @throws IOException
*/
@Nullable
- public static TableState getTableState(Result r)
- throws IOException {
+ public static TableState getTableState(Result r) throws IOException {
Cell cell = r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
- if (cell == null) return null;
+ if (cell == null) {
+ return null;
+ }
try {
return TableState.parseFrom(TableName.valueOf(r.getRow()),
- Arrays.copyOfRange(cell.getValueArray(),
- cell.getValueOffset(), cell.getValueOffset() + cell.getValueLength()));
+ Arrays.copyOfRange(cell.getValueArray(), cell.getValueOffset(),
+ cell.getValueOffset() + cell.getValueLength()));
} catch (DeserializationException e) {
throw new IOException(e);
}
-
}
/**
@@ -1265,10 +1244,9 @@ public class MetaTableAccessor {
* @param c Configuration object
* @param tableName table name to count regions for
* @return Count or regions in table <code>tableName</code>
- * @throws IOException
*/
public static int getRegionCount(final Configuration c, final TableName tableName)
- throws IOException {
+ throws IOException {
try (Connection connection = ConnectionFactory.createConnection(c)) {
return getRegionCount(connection, tableName);
}
@@ -1279,13 +1257,12 @@ public class MetaTableAccessor {
* @param connection Connection object
* @param tableName table name to count regions for
* @return Count or regions in table <code>tableName</code>
- * @throws IOException
*/
public static int getRegionCount(final Connection connection, final TableName tableName)
- throws IOException {
+ throws IOException {
try (RegionLocator locator = connection.getRegionLocator(tableName)) {
List<HRegionLocation> locations = locator.getAllRegionLocations();
- return locations == null? 0: locations.size();
+ return locations == null ? 0 : locations.size();
}
}
@@ -1296,13 +1273,6 @@ public class MetaTableAccessor {
/**
* Generates and returns a Put containing the region into for the catalog table
*/
- public static Put makePutFromRegionInfo(RegionInfo regionInfo) throws IOException {
- return makePutFromRegionInfo(regionInfo, EnvironmentEdgeManager.currentTime());
- }
-
- /**
- * Generates and returns a Put containing the region into for the catalog table
- */
public static Put makePutFromRegionInfo(RegionInfo regionInfo, long ts) throws IOException {
Put put = new Put(regionInfo.getRegionName(), ts);
addRegionInfo(put, regionInfo);
@@ -1313,16 +1283,7 @@ public class MetaTableAccessor {
* Generates and returns a Delete containing the region info for the catalog
* table
*/
- public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo) {
- long now = EnvironmentEdgeManager.currentTime();
- return makeDeleteFromRegionInfo(regionInfo, now);
- }
-
- /**
- * Generates and returns a Delete containing the region info for the catalog
- * table
- */
- public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo, long ts) {
+ private static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo, long ts) {
if (regionInfo == null) {
throw new IllegalArgumentException("Can't make a delete for null region");
}
@@ -1363,41 +1324,32 @@ public class MetaTableAccessor {
* Put the passed <code>p</code> to the <code>hbase:meta</code> table.
* @param connection connection we're using
* @param p Put to add to hbase:meta
- * @throws IOException
*/
- static void putToMetaTable(final Connection connection, final Put p)
- throws IOException {
- put(getMetaHTable(connection), p);
+ private static void putToMetaTable(Connection connection, Put p) throws IOException {
+ try (Table table = getMetaHTable(connection)) {
+ put(table, p);
+ }
}
/**
- * @param t Table to use (will be closed when done).
+ * @param t Table to use
* @param p put to make
- * @throws IOException
*/
- private static void put(final Table t, final Put p) throws IOException {
- try {
- debugLogMutation(p);
- t.put(p);
- } finally {
- t.close();
- }
+ private static void put(Table t, Put p) throws IOException {
+ debugLogMutation(p);
+ t.put(p);
}
/**
* Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
* @param connection connection we're using
* @param ps Put to add to hbase:meta
- * @throws IOException
*/
public static void putsToMetaTable(final Connection connection, final List<Put> ps)
- throws IOException {
- Table t = getMetaHTable(connection);
- try {
+ throws IOException {
+ try (Table t = getMetaHTable(connection)) {
debugLogMutations(ps);
t.put(ps);
- } finally {
- t.close();
}
}
@@ -1405,10 +1357,9 @@ public class MetaTableAccessor {
* Delete the passed <code>d</code> from the <code>hbase:meta</code> table.
* @param connection connection we're using
* @param d Delete to add to hbase:meta
- * @throws IOException
*/
- static void deleteFromMetaTable(final Connection connection, final Delete d)
- throws IOException {
+ private static void deleteFromMetaTable(final Connection connection, final Delete d)
+ throws IOException {
List<Delete> dels = new ArrayList<>(1);
dels.add(d);
deleteFromMetaTable(connection, dels);
@@ -1418,16 +1369,12 @@ public class MetaTableAccessor {
* Delete the passed <code>deletes</code> from the <code>hbase:meta</code> table.
* @param connection connection we're using
* @param deletes Deletes to add to hbase:meta This list should support #remove.
- * @throws IOException
*/
- public static void deleteFromMetaTable(final Connection connection, final List<Delete> deletes)
- throws IOException {
- Table t = getMetaHTable(connection);
- try {
+ private static void deleteFromMetaTable(final Connection connection, final List<Delete> deletes)
+ throws IOException {
+ try (Table t = getMetaHTable(connection)) {
debugLogMutations(deletes);
t.delete(deletes);
- } finally {
- t.close();
}
}
@@ -1437,7 +1384,6 @@ public class MetaTableAccessor {
* @param replicaIndexToDeleteFrom the replica ID we would start deleting from
* @param numReplicasToRemove how many replicas to remove
* @param connection connection we're using to access meta table
- * @throws IOException
*/
public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection)
@@ -1480,7 +1426,7 @@ public class MetaTableAccessor {
}
}
- static void addRegionStateToPut(Put put, RegionState.State state) throws IOException {
+ private static void addRegionStateToPut(Put put, RegionState.State state) throws IOException {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(put.getRow())
.setFamily(HConstants.CATALOG_FAMILY)
@@ -1507,7 +1453,7 @@ public class MetaTableAccessor {
RegionInfo splitA, RegionInfo splitB) throws IOException {
Table meta = getMetaHTable(connection);
try {
- Put put = makePutFromRegionInfo(regionInfo);
+ Put put = makePutFromRegionInfo(regionInfo, EnvironmentEdgeManager.currentTime());
addDaughtersToPut(put, splitA, splitB);
meta.put(put);
debugLogMutation(put);
@@ -1542,8 +1488,10 @@ public class MetaTableAccessor {
*/
public static void addRegionsToMeta(Connection connection, List<RegionInfo> regionInfos,
int regionReplication) throws IOException {
- addRegionsToMeta(connection, regionInfos, regionReplication, HConstants.LATEST_TIMESTAMP);
+ addRegionsToMeta(connection, regionInfos, regionReplication,
+ EnvironmentEdgeManager.currentTime());
}
+
/**
* Adds a hbase:meta row for each of the specified new regions. Initial state for new regions
* is CLOSED.
@@ -1553,7 +1501,7 @@ public class MetaTableAccessor {
* @param ts desired timestamp
* @throws IOException if problem connecting or updating meta
*/
- public static void addRegionsToMeta(Connection connection, List<RegionInfo> regionInfos,
+ private static void addRegionsToMeta(Connection connection, List<RegionInfo> regionInfos,
int regionReplication, long ts) throws IOException {
List<Put> puts = new ArrayList<>();
for (RegionInfo regionInfo : regionInfos) {
@@ -1579,20 +1527,16 @@ public class MetaTableAccessor {
* two merging regions.
* @param connection connection we're using
* @param mergedRegion the merged region
- * @param regionA
- * @param regionB
+ * @param regionA merge parent region A
+ * @param regionB merge parent region B
* @param sn the location of the region
- * @param masterSystemTime
- * @throws IOException
*/
public static void mergeRegions(final Connection connection, RegionInfo mergedRegion,
- RegionInfo regionA, RegionInfo regionB, ServerName sn, int regionReplication,
- long masterSystemTime)
- throws IOException {
- Table meta = getMetaHTable(connection);
- try {
+ RegionInfo regionA, RegionInfo regionB, ServerName sn, int regionReplication)
+ throws IOException {
+ try (Table meta = getMetaHTable(connection)) {
// use the maximum of what master passed us vs local time.
- long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime);
+ long time = EnvironmentEdgeManager.currentTime();
// Put for parent
Put putOfMerged = makePutFromRegionInfo(mergedRegion, time);
@@ -1628,7 +1572,9 @@ public class MetaTableAccessor {
// if crash after merge happened but before we got to here.. means in-memory
// locations of offlined merged, now-closed, regions is lost. Should be ok. We
// assign the merged region later.
- if (sn != null) addLocation(putOfMerged, sn, 1, -1, mergedRegion.getReplicaId());
+ if (sn != null) {
+ addLocation(putOfMerged, sn, 1, mergedRegion.getReplicaId());
+ }
// Add empty locations for region replicas of the merged region so that number of replicas can
// be cached whenever the primary region is looked up from meta
@@ -1639,8 +1585,6 @@ public class MetaTableAccessor {
byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
+ HConstants.DELIMITER);
multiMutate(connection, meta, tableRow, putOfMerged, deleteA, deleteB);
- } finally {
- meta.close();
}
}
@@ -1652,22 +1596,22 @@ public class MetaTableAccessor {
* @param connection connection we're using
* @param parent the parent region which is split
* @param splitA Split daughter region A
- * @param splitB Split daughter region A
+ * @param splitB Split daughter region B
* @param sn the location of the region
*/
public static void splitRegion(final Connection connection, RegionInfo parent, RegionInfo splitA,
RegionInfo splitB, ServerName sn, int regionReplication) throws IOException {
- Table meta = getMetaHTable(connection);
- try {
- //Put for parent
+ try (Table meta = getMetaHTable(connection)) {
+ long time = EnvironmentEdgeManager.currentTime();
+ // Put for parent
Put putParent = makePutFromRegionInfo(RegionInfoBuilder.newBuilder(parent)
.setOffline(true)
- .setSplit(true).build());
+ .setSplit(true).build(), time);
addDaughtersToPut(putParent, splitA, splitB);
- //Puts for daughters
- Put putA = makePutFromRegionInfo(splitA);
- Put putB = makePutFromRegionInfo(splitB);
+ // Puts for daughters
+ Put putA = makePutFromRegionInfo(splitA, time);
+ Put putB = makePutFromRegionInfo(splitB, time);
// Set initial state to CLOSED
// NOTE: If initial state is not set to CLOSED then daughter regions get added with the
@@ -1677,8 +1621,8 @@ public class MetaTableAccessor {
addRegionStateToPut(putA, RegionState.State.CLOSED);
addRegionStateToPut(putB, RegionState.State.CLOSED);
- addSequenceNum(putA, 1, -1, splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
- addSequenceNum(putB, 1, -1, splitB.getReplicaId());
+ addSequenceNum(putA, 1, splitA.getReplicaId()); // new regions, openSeqNum = 1 is fine.
+ addSequenceNum(putB, 1, splitB.getReplicaId());
// Add empty locations for region replicas of daughters so that number of replicas can be
// cached whenever the primary region is looked up from meta
@@ -1689,8 +1633,6 @@ public class MetaTableAccessor {
byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
multiMutate(connection, meta, tableRow, putParent, putA, putB);
- } finally {
- meta.close();
}
}
@@ -1698,23 +1640,19 @@ public class MetaTableAccessor {
* Update state of the table in meta.
* @param connection what we use for update
* @param state new state
- * @throws IOException
*/
- public static void updateTableState(Connection connection, TableState state)
- throws IOException {
- Put put = makePutFromTableState(state);
+ private static void updateTableState(Connection connection, TableState state) throws IOException {
+ Put put = makePutFromTableState(state, EnvironmentEdgeManager.currentTime());
putToMetaTable(connection, put);
- LOG.info(
- "Updated table " + state.getTableName() + " state to " + state.getState() + " in META");
+ LOG.info("Updated table {} state to {} in META", state.getTableName(), state.getState());
}
/**
* Construct PUT for given state
* @param state new state
*/
- public static Put makePutFromTableState(TableState state) {
- long time = EnvironmentEdgeManager.currentTime();
- Put put = new Put(state.getTableName().getName(), time);
+ public static Put makePutFromTableState(TableState state, long ts) {
+ Put put = new Put(state.getTableName().getName(), ts);
put.addColumn(getTableFamily(), getTableStateColumn(), state.convert().toByteArray());
return put;
}
@@ -1776,7 +1714,7 @@ public class MetaTableAccessor {
// The call to #prepare that ran before this invocation will have populated HRegionLocation.
HRegionLocation hrl = getLocation();
RegionSpecifier region = ProtobufUtil.buildRegionSpecifier(
- RegionSpecifierType.REGION_NAME, hrl.getRegionInfo().getRegionName());
+ RegionSpecifierType.REGION_NAME, hrl.getRegion().getRegionName());
builder.setRegion(region);
// The rpcController here is awkward. The Coprocessor Endpoint wants an instance of a
// com.google.protobuf but we are going over an rpc that is all shaded protobuf so it
@@ -1802,55 +1740,44 @@ public class MetaTableAccessor {
}
/**
- * Updates the location of the specified region in hbase:meta to be the specified
- * server hostname and startcode.
+ * Updates the location of the specified region in hbase:meta to be the specified server hostname
+ * and startcode.
* <p>
- * Uses passed catalog tracker to get a connection to the server hosting
- * hbase:meta and makes edits to that region.
- *
+ * Uses passed catalog tracker to get a connection to the server hosting hbase:meta and makes
+ * edits to that region.
* @param connection connection we're using
* @param regionInfo region to update location of
* @param openSeqNum the latest sequence number obtained when the region was open
* @param sn Server name
- * @param masterSystemTime wall clock time from master if passed in the open region RPC or -1
- * @throws IOException
+ * @param masterSystemTime wall clock time from master if passed in the open region RPC
*/
- public static void updateRegionLocation(Connection connection,
- RegionInfo regionInfo, ServerName sn, long openSeqNum,
- long masterSystemTime)
- throws IOException {
+ @VisibleForTesting
+ public static void updateRegionLocation(Connection connection, RegionInfo regionInfo,
+ ServerName sn, long openSeqNum, long masterSystemTime) throws IOException {
updateLocation(connection, regionInfo, sn, openSeqNum, masterSystemTime);
}
/**
* Updates the location of the specified region to be the specified server.
* <p>
- * Connects to the specified server which should be hosting the specified
- * catalog region name to perform the edit.
- *
+ * Connects to the specified server which should be hosting the specified catalog region name to
+ * perform the edit.
* @param connection connection we're using
* @param regionInfo region to update location of
* @param sn Server name
* @param openSeqNum the latest sequence number obtained when the region was open
- * @param masterSystemTime wall clock time from master if passed in the open region RPC or -1
- * @throws IOException In particular could throw {@link java.net.ConnectException}
- * if the server is down on other end.
+ * @param masterSystemTime wall clock time from master if passed in the open region RPC
+ * @throws IOException In particular could throw {@link java.net.ConnectException} if the server
+ * is down on other end.
*/
- private static void updateLocation(final Connection connection,
- RegionInfo regionInfo, ServerName sn, long openSeqNum,
- long masterSystemTime)
- throws IOException {
-
- // use the maximum of what master passed us vs local time.
- long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime);
-
+ private static void updateLocation(Connection connection, RegionInfo regionInfo, ServerName sn,
+ long openSeqNum, long masterSystemTime) throws IOException {
// region replicas are kept in the primary region's row
- Put put = new Put(getMetaKeyForRegion(regionInfo), time);
+ Put put = new Put(getMetaKeyForRegion(regionInfo), masterSystemTime);
addRegionInfo(put, regionInfo);
- addLocation(put, sn, openSeqNum, time, regionInfo.getReplicaId());
+ addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
putToMetaTable(connection, put);
- LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
- " with server=" + sn);
+ LOG.info("Updated row {} with server=", regionInfo.getRegionNameAsString(), sn);
}
/**
@@ -1873,28 +1800,26 @@ public class MetaTableAccessor {
* Deletes the specified regions from META.
* @param connection connection we're using
* @param regionsInfo list of regions to be deleted from META
- * @throws IOException
*/
- public static void deleteRegions(Connection connection,
- List<RegionInfo> regionsInfo) throws IOException {
+ public static void deleteRegions(Connection connection, List<RegionInfo> regionsInfo)
+ throws IOException {
deleteRegions(connection, regionsInfo, EnvironmentEdgeManager.currentTime());
}
/**
* Deletes the specified regions from META.
* @param connection connection we're using
* @param regionsInfo list of regions to be deleted from META
- * @throws IOException
*/
public static void deleteRegions(Connection connection, List<RegionInfo> regionsInfo, long ts)
- throws IOException {
+ throws IOException {
List<Delete> deletes = new ArrayList<>(regionsInfo.size());
- for (RegionInfo hri: regionsInfo) {
+ for (RegionInfo hri : regionsInfo) {
Delete e = new Delete(hri.getRegionName());
e.addFamily(getCatalogFamily(), ts);
deletes.add(e);
}
deleteFromMetaTable(connection, deletes);
- LOG.info("Deleted " + regionsInfo.size() + " regions from META");
+ LOG.info("Deleted {} regions from META", regionsInfo.size());
LOG.debug("Deleted regions: {}", regionsInfo);
}
@@ -1903,7 +1828,6 @@ public class MetaTableAccessor {
* adds new ones. Regions added back have state CLOSED.
* @param connection connection we're using
* @param regionInfos list of regions to be added to META
- * @throws IOException
*/
public static void overwriteRegions(Connection connection,
List<RegionInfo> regionInfos, int regionReplication) throws IOException {
@@ -1916,7 +1840,7 @@ public class MetaTableAccessor {
// or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
//
// HBASE-13875 uses master timestamp for the mutations. The 20ms sleep is not needed
- addRegionsToMeta(connection, regionInfos, regionReplication, now+1);
+ addRegionsToMeta(connection, regionInfos, regionReplication, now + 1);
LOG.info("Overwritten " + regionInfos.size() + " regions to Meta");
LOG.debug("Overwritten regions: {} ", regionInfos);
}
@@ -1924,11 +1848,10 @@ public class MetaTableAccessor {
/**
* Deletes merge qualifiers for the specified merged region.
* @param connection connection we're using
- * @param mergedRegion
- * @throws IOException
+ * @param mergedRegion the merged region
*/
- public static void deleteMergeQualifiers(Connection connection,
- final RegionInfo mergedRegion) throws IOException {
+ public static void deleteMergeQualifiers(Connection connection, final RegionInfo mergedRegion)
+ throws IOException {
long time = EnvironmentEdgeManager.currentTime();
Delete delete = new Delete(mergedRegion.getRegionName());
delete.addColumns(getCatalogFamily(), HConstants.MERGEA_QUALIFIER, time);
@@ -1953,17 +1876,14 @@ public class MetaTableAccessor {
return p;
}
- public static Put addLocation(final Put p, final ServerName sn, long openSeqNum,
- long time, int replicaId) throws IOException {
- if (time <= 0) {
- time = EnvironmentEdgeManager.currentTime();
- }
+ public static Put addLocation(Put p, ServerName sn, long openSeqNum, int replicaId)
+ throws IOException {
CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY);
return p.add(builder.clear()
.setRow(p.getRow())
.setFamily(getCatalogFamily())
.setQualifier(getServerColumn(replicaId))
- .setTimestamp(time)
+ .setTimestamp(p.getTimeStamp())
.setType(Cell.Type.Put)
.setValue(Bytes.toBytes(sn.getAddress().toString()))
.build())
@@ -1971,7 +1891,7 @@ public class MetaTableAccessor {
.setRow(p.getRow())
.setFamily(getCatalogFamily())
.setQualifier(getStartCodeColumn(replicaId))
- .setTimestamp(time)
+ .setTimestamp(p.getTimeStamp())
.setType(Cell.Type.Put)
.setValue(Bytes.toBytes(sn.getStartcode()))
.build())
@@ -1979,34 +1899,33 @@ public class MetaTableAccessor {
.setRow(p.getRow())
.setFamily(getCatalogFamily())
.setQualifier(getSeqNumColumn(replicaId))
- .setTimestamp(time)
+ .setTimestamp(p.getTimeStamp())
.setType(Type.Put)
.setValue(Bytes.toBytes(openSeqNum))
.build());
}
- public static Put addEmptyLocation(final Put p, int replicaId) throws IOException {
- long now = EnvironmentEdgeManager.currentTime();
+ private static Put addEmptyLocation(Put p, int replicaId) throws IOException {
CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY);
return p.add(builder.clear()
.setRow(p.getRow())
.setFamily(getCatalogFamily())
.setQualifier(getServerColumn(replicaId))
- .setTimestamp(now)
+ .setTimestamp(p.getTimeStamp())
.setType(Type.Put)
.build())
.add(builder.clear()
.setRow(p.getRow())
.setFamily(getCatalogFamily())
.setQualifier(getStartCodeColumn(replicaId))
- .setTimestamp(now)
+ .setTimestamp(p.getTimeStamp())
.setType(Cell.Type.Put)
.build())
.add(builder.clear()
.setRow(p.getRow())
.setFamily(getCatalogFamily())
.setQualifier(getSeqNumColumn(replicaId))
- .setTimestamp(now)
+ .setTimestamp(p.getTimeStamp())
.setType(Cell.Type.Put)
.build());
}
@@ -2026,16 +1945,12 @@ public class MetaTableAccessor {
METALOG.debug("{} {}", p.getClass().getSimpleName(), p.toJSON());
}
- public static Put addSequenceNum(final Put p, long openSeqNum, long time,
- int replicaId) throws IOException {
- if (time <= 0) {
- time = EnvironmentEdgeManager.currentTime();
- }
+ private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws IOException {
return p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(p.getRow())
.setFamily(HConstants.CATALOG_FAMILY)
.setQualifier(getSeqNumColumn(replicaId))
- .setTimestamp(time)
+ .setTimestamp(p.getTimeStamp())
.setType(Type.Put)
.setValue(Bytes.toBytes(openSeqNum))
.build());
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5fb53d/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index 2a20b80..a1fde74 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -167,28 +167,26 @@ public class FavoredNodeAssignmentHelper {
}
/**
- * Generates and returns a Put containing the region info for the catalog table
- * and the servers
- * @param regionInfo
- * @param favoredNodeList
+ * Generates and returns a Put containing the region info for the catalog table and the servers
* @return Put object
*/
- static Put makePutFromRegionInfo(RegionInfo regionInfo, List<ServerName>favoredNodeList)
- throws IOException {
+ private static Put makePutFromRegionInfo(RegionInfo regionInfo, List<ServerName> favoredNodeList)
+ throws IOException {
Put put = null;
if (favoredNodeList != null) {
- put = MetaTableAccessor.makePutFromRegionInfo(regionInfo);
+ long time = EnvironmentEdgeManager.currentTime();
+ put = MetaTableAccessor.makePutFromRegionInfo(regionInfo, time);
byte[] favoredNodes = getFavoredNodes(favoredNodeList);
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
.setRow(put.getRow())
.setFamily(HConstants.CATALOG_FAMILY)
.setQualifier(FAVOREDNODES_QUALIFIER)
- .setTimestamp(EnvironmentEdgeManager.currentTime())
+ .setTimestamp(time)
.setType(Type.Put)
.setValue(favoredNodes)
.build());
- LOG.debug("Create the region " + regionInfo.getRegionNameAsString() +
- " with favored nodes " + favoredNodeList);
+ LOG.debug("Create the region {} with favored nodes {}", regionInfo.getRegionNameAsString(),
+ favoredNodeList);
}
return put;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5fb53d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index ff65f46..0f26bfa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -222,9 +222,6 @@ public class AssignmentManager implements ServerListener {
// Register Server Listener
master.getServerManager().registerListener(this);
- // Start the RegionStateStore
- regionStateStore.start();
-
// Start the Assignment Thread
startAssignmentThread();
}
@@ -250,7 +247,6 @@ public class AssignmentManager implements ServerListener {
// Stop the RegionStateStore
regionStates.clear();
- regionStateStore.stop();
// Unregister Server Listener
master.getServerManager().unregisterListener(this);
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5fb53d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index ab5c442..9fc7641 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -20,10 +20,8 @@
package org.apache.hadoop.hbase.master.assignment;
import java.io.IOException;
-import java.util.Arrays;
import java.util.Collections;
import java.util.List;
-
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType;
@@ -36,13 +34,13 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.MultiHConnection;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
@@ -64,22 +62,10 @@ public class RegionStateStore {
private final MasterServices master;
- private MultiHConnection multiHConnection;
-
public RegionStateStore(final MasterServices master) {
this.master = master;
}
- public void start() throws IOException {
- }
-
- public void stop() {
- if (multiHConnection != null) {
- multiHConnection.close();
- multiHConnection = null;
- }
- }
-
public interface RegionStateVisitor {
void visitRegionState(RegionInfo regionInfo, State state,
ServerName regionLocation, ServerName lastHost, long openSeqNum);
@@ -121,7 +107,7 @@ public class RegionStateStore {
final HRegionLocation hrl = locations[i];
if (hrl == null) continue;
- final RegionInfo regionInfo = hrl.getRegionInfo();
+ final RegionInfo regionInfo = hrl.getRegion();
if (regionInfo == null) continue;
final int replicaId = regionInfo.getReplicaId();
@@ -152,7 +138,7 @@ public class RegionStateStore {
}
}
- protected void updateMetaLocation(final RegionInfo regionInfo, final ServerName serverName)
+ private void updateMetaLocation(final RegionInfo regionInfo, final ServerName serverName)
throws IOException {
try {
MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName,
@@ -162,19 +148,21 @@ public class RegionStateStore {
}
}
- protected void updateUserRegionLocation(final RegionInfo regionInfo, final State state,
+ private void updateUserRegionLocation(final RegionInfo regionInfo, final State state,
final ServerName regionLocation, final ServerName lastHost, final long openSeqNum,
final long pid)
throws IOException {
+ long time = EnvironmentEdgeManager.currentTime();
final int replicaId = regionInfo.getReplicaId();
- final Put put = new Put(MetaTableAccessor.getMetaKeyForRegion(regionInfo));
+ final Put put = new Put(MetaTableAccessor.getMetaKeyForRegion(regionInfo), time);
MetaTableAccessor.addRegionInfo(put, regionInfo);
- final StringBuilder info = new StringBuilder("pid=" + pid + " updating hbase:meta row=");
- info.append(regionInfo.getRegionNameAsString()).append(", regionState=").append(state);
+ final StringBuilder info =
+ new StringBuilder("pid=").append(pid).append(" updating hbase:meta row=")
+ .append(regionInfo.getRegionNameAsString()).append(", regionState=").append(state);
if (openSeqNum >= 0) {
Preconditions.checkArgument(state == State.OPEN && regionLocation != null,
"Open region should be on a server");
- MetaTableAccessor.addLocation(put, regionLocation, openSeqNum, -1, replicaId);
+ MetaTableAccessor.addLocation(put, regionLocation, openSeqNum, replicaId);
info.append(", openSeqNum=").append(openSeqNum);
info.append(", regionLocation=").append(regionLocation);
} else if (regionLocation != null && !regionLocation.equals(lastHost)) {
@@ -202,21 +190,15 @@ public class RegionStateStore {
updateRegionLocation(regionInfo, state, put);
}
- protected void updateRegionLocation(final RegionInfo regionInfo, final State state,
- final Put... put) throws IOException {
- synchronized (this) {
- if (multiHConnection == null) {
- multiHConnection = new MultiHConnection(master.getConfiguration(), 1);
- }
- }
-
- try {
- multiHConnection.processBatchCallback(Arrays.asList(put), TableName.META_TABLE_NAME, null, null);
+ private void updateRegionLocation(RegionInfo regionInfo, State state, Put put)
+ throws IOException {
+ try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ table.put(put);
} catch (IOException e) {
// TODO: Revist!!!! Means that if a server is loaded, then we will abort our host!
// In tests we abort the Master!
String msg = String.format("FAILED persisting region=%s state=%s",
- regionInfo.getShortNameToLog(), state);
+ regionInfo.getShortNameToLog(), state);
LOG.error(msg, e);
master.abort(msg, e);
throw e;
@@ -236,11 +218,11 @@ public class RegionStateStore {
// ============================================================================================
// Update Region Merging State helpers
// ============================================================================================
- public void mergeRegions(final RegionInfo parent, final RegionInfo hriA,
- final RegionInfo hriB, final ServerName serverName) throws IOException {
+ public void mergeRegions(final RegionInfo parent, final RegionInfo hriA, final RegionInfo hriB,
+ final ServerName serverName) throws IOException {
final TableDescriptor htd = getTableDescriptor(parent.getTable());
MetaTableAccessor.mergeRegions(master.getConnection(), parent, hriA, hriB, serverName,
- getRegionReplication(htd), EnvironmentEdgeManager.currentTime());
+ getRegionReplication(htd));
}
// ============================================================================================
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5fb53d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 79ff3a7..6e1aceb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.NamespaceDescriptor;
@@ -112,6 +111,7 @@ import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
@@ -7053,7 +7053,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
LOG.debug("Opening region (readOnly filesystem): " + info);
}
if (info.getReplicaId() <= 0) {
- info = new HRegionInfo((HRegionInfo) info, 1);
+ info = RegionInfoBuilder.newBuilder(info).setReplicaId(1).build();
}
HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null);
r.writestate.setReadOnly(true);
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5fb53d/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index da25ecb..a2b3e45 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -1492,8 +1492,8 @@ public class HBaseFsck extends Configured implements Closeable {
*
* @return An array list of puts to do in bulk, null if tables have problems
*/
- private ArrayList<Put> generatePuts(
- SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
+ private ArrayList<Put> generatePuts(SortedMap<TableName, TableInfo> tablesInfo)
+ throws IOException {
ArrayList<Put> puts = new ArrayList<>();
boolean hasProblems = false;
for (Entry<TableName, TableInfo> e : tablesInfo.entrySet()) {
@@ -1505,8 +1505,9 @@ public class HBaseFsck extends Configured implements Closeable {
}
TableInfo ti = e.getValue();
- puts.add(MetaTableAccessor
- .makePutFromTableState(new TableState(ti.tableName, TableState.State.ENABLED)));
+ puts.add(MetaTableAccessor.makePutFromTableState(
+ new TableState(ti.tableName, TableState.State.ENABLED),
+ EnvironmentEdgeManager.currentTime()));
for (Entry<byte[], Collection<HbckInfo>> spl : ti.sc.getStarts().asMap()
.entrySet()) {
Collection<HbckInfo> his = spl.getValue();
@@ -1522,7 +1523,7 @@ public class HBaseFsck extends Configured implements Closeable {
// add the row directly to meta.
HbckInfo hi = his.iterator().next();
RegionInfo hri = hi.getHdfsHRI(); // hi.metaEntry;
- Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
+ Put p = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
puts.add(p);
}
}
@@ -2181,7 +2182,7 @@ public class HBaseFsck extends Configured implements Closeable {
.setOffline(false)
.setSplit(false)
.build();
- Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
+ Put p = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
mutations.add(p);
meta.mutateRow(mutations);
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5fb53d/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 86cb921..ec7f717 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -162,7 +162,7 @@ public class HBaseFsckRepair {
RegionInfo hri, Collection<ServerName> servers, int numReplicas) throws IOException {
Connection conn = ConnectionFactory.createConnection(conf);
Table meta = conn.getTable(TableName.META_TABLE_NAME);
- Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
+ Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
if (numReplicas > 1) {
Random r = new Random();
ServerName[] serversArr = servers.toArray(new ServerName[servers.size()]);
@@ -172,7 +172,7 @@ public class HBaseFsckRepair {
// see the additional replicas when it is asked to assign. The
// final value of these columns will be different and will be updated
// by the actual regionservers that start hosting the respective replicas
- MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), -1, i);
+ MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), i);
}
}
meta.put(put);
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5fb53d/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
index 609c54e..ec93207 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.junit.AfterClass;
@@ -381,31 +382,30 @@ public class TestMetaTableAccessor {
long seqNum1 = random.nextLong();
long seqNum100 = random.nextLong();
-
- Table meta = MetaTableAccessor.getMetaHTable(connection);
- try {
- MetaTableAccessor.updateRegionLocation(connection, primary, serverName0, seqNum0, -1);
+ try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
+ MetaTableAccessor.updateRegionLocation(connection, primary, serverName0, seqNum0,
+ EnvironmentEdgeManager.currentTime());
// assert that the server, startcode and seqNum columns are there for the primary region
assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 0, true);
// add replica = 1
- MetaTableAccessor.updateRegionLocation(connection, replica1, serverName1, seqNum1, -1);
+ MetaTableAccessor.updateRegionLocation(connection, replica1, serverName1, seqNum1,
+ EnvironmentEdgeManager.currentTime());
// check whether the primary is still there
assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 0, true);
// now check for replica 1
assertMetaLocation(meta, primary.getRegionName(), serverName1, seqNum1, 1, true);
// add replica = 1
- MetaTableAccessor.updateRegionLocation(connection, replica100, serverName100, seqNum100, -1);
+ MetaTableAccessor.updateRegionLocation(connection, replica100, serverName100, seqNum100,
+ EnvironmentEdgeManager.currentTime());
// check whether the primary is still there
assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 0, true);
// check whether the replica 1 is still there
assertMetaLocation(meta, primary.getRegionName(), serverName1, seqNum1, 1, true);
// now check for replica 1
assertMetaLocation(meta, primary.getRegionName(), serverName100, seqNum100, 100, true);
- } finally {
- meta.close();
}
}
@@ -490,9 +490,7 @@ public class TestMetaTableAccessor {
.setReplicaId(0)
.build();
-
- Table meta = MetaTableAccessor.getMetaHTable(connection);
- try {
+ try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
List<RegionInfo> regionInfos = Lists.newArrayList(parent);
MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 3);
@@ -502,8 +500,6 @@ public class TestMetaTableAccessor {
assertEmptyMetaLocation(meta, splitA.getRegionName(), 2);
assertEmptyMetaLocation(meta, splitB.getRegionName(), 1);
assertEmptyMetaLocation(meta, splitB.getRegionName(), 2);
- } finally {
- meta.close();
}
}
@@ -535,18 +531,14 @@ public class TestMetaTableAccessor {
.setReplicaId(0)
.build();
- Table meta = MetaTableAccessor.getMetaHTable(connection);
- try {
+ try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
List<RegionInfo> regionInfos = Lists.newArrayList(parentA, parentB);
MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 3);
- MetaTableAccessor.mergeRegions(connection, merged, parentA, parentB, serverName0, 3,
- HConstants.LATEST_TIMESTAMP);
+ MetaTableAccessor.mergeRegions(connection, merged, parentA, parentB, serverName0, 3);
assertEmptyMetaLocation(meta, merged.getRegionName(), 1);
assertEmptyMetaLocation(meta, merged.getRegionName(), 2);
- } finally {
- meta.close();
}
}
@@ -609,8 +601,7 @@ public class TestMetaTableAccessor {
.build();
ServerName sn = ServerName.valueOf("bar", 0, 0);
- Table meta = MetaTableAccessor.getMetaHTable(connection);
- try {
+ try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
List<RegionInfo> regionInfos = Lists.newArrayList(regionInfo);
MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1);
@@ -634,8 +625,6 @@ public class TestMetaTableAccessor {
assertEquals(masterSystemTime, serverCell.getTimestamp());
assertEquals(masterSystemTime, startCodeCell.getTimestamp());
assertEquals(masterSystemTime, seqNumCell.getTimestamp());
- } finally {
- meta.close();
}
}
@@ -667,8 +656,7 @@ public class TestMetaTableAccessor {
.build();
ServerName sn = ServerName.valueOf("bar", 0, 0);
- Table meta = MetaTableAccessor.getMetaHTable(connection);
- try {
+ try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
List<RegionInfo> regionInfos = Lists.newArrayList(regionInfoA, regionInfoB);
MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1);
@@ -689,9 +677,17 @@ public class TestMetaTableAccessor {
assertNotNull(serverCell);
assertEquals(serverNameTime, serverCell.getTimestamp());
- // now merge the regions, effectively deleting the rows for region a and b.
- MetaTableAccessor.mergeRegions(connection, mergedRegionInfo,
- regionInfoA, regionInfoB, sn, 1, masterSystemTime);
+ ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
+ edge.setValue(masterSystemTime);
+ EnvironmentEdgeManager.injectEdge(edge);
+ try {
+ // now merge the regions, effectively deleting the rows for region a and b.
+ MetaTableAccessor.mergeRegions(connection, mergedRegionInfo, regionInfoA, regionInfoB, sn,
+ 1);
+ } finally {
+ EnvironmentEdgeManager.reset();
+ }
+
result = meta.get(get);
serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
@@ -703,8 +699,6 @@ public class TestMetaTableAccessor {
assertNull(serverCell);
assertNull(startCodeCell);
assertNull(seqNumCell);
- } finally {
- meta.close();
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5fb53d/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index 9bd4443..91f123f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -308,14 +308,6 @@ public class MockMasterServices extends MockNoopMasterServices {
}
@Override
- public void start() throws IOException {
- }
-
- @Override
- public void stop() {
- }
-
- @Override
public void updateRegionLocation(RegionStates.RegionStateNode regionNode) throws IOException {
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba5fb53d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
index 0db73ba..3da0341 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.wal.WAL;
import org.junit.ClassRule;
import org.junit.Rule;
@@ -93,65 +94,65 @@ public class TestGetClosestAtOrBefore {
HRegion mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO,
rootdir, this.conf, metaBuilder.build());
try {
- // Write rows for three tables 'A', 'B', and 'C'.
- for (char c = 'A'; c < 'D'; c++) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("" + c));
- final int last = 128;
- final int interval = 2;
- for (int i = 0; i <= last; i += interval) {
- HRegionInfo hri = new HRegionInfo(htd.getTableName(),
- i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i),
- i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval));
-
- Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
- put.setDurability(Durability.SKIP_WAL);
- mr.put(put);
+ // Write rows for three tables 'A', 'B', and 'C'.
+ for (char c = 'A'; c < 'D'; c++) {
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("" + c));
+ final int last = 128;
+ final int interval = 2;
+ for (int i = 0; i <= last; i += interval) {
+ HRegionInfo hri = new HRegionInfo(htd.getTableName(),
+ i == 0 ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i),
+ i == last ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i + interval));
+
+ Put put =
+ MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
+ put.setDurability(Durability.SKIP_WAL);
+ mr.put(put);
+ }
}
- }
- InternalScanner s = mr.getScanner(new Scan());
- try {
- List<Cell> keys = new ArrayList<>();
- while (s.next(keys)) {
- LOG.info(Objects.toString(keys));
- keys.clear();
+ InternalScanner s = mr.getScanner(new Scan());
+ try {
+ List<Cell> keys = new ArrayList<>();
+ while (s.next(keys)) {
+ LOG.info(Objects.toString(keys));
+ keys.clear();
+ }
+ } finally {
+ s.close();
}
- } finally {
- s.close();
- }
- findRow(mr, 'C', 44, 44);
- findRow(mr, 'C', 45, 44);
- findRow(mr, 'C', 46, 46);
- findRow(mr, 'C', 43, 42);
- mr.flush(true);
- findRow(mr, 'C', 44, 44);
- findRow(mr, 'C', 45, 44);
- findRow(mr, 'C', 46, 46);
- findRow(mr, 'C', 43, 42);
- // Now delete 'C' and make sure I don't get entries from 'B'.
- byte [] firstRowInC = HRegionInfo.createRegionName(
- TableName.valueOf("" + 'C'),
+ findRow(mr, 'C', 44, 44);
+ findRow(mr, 'C', 45, 44);
+ findRow(mr, 'C', 46, 46);
+ findRow(mr, 'C', 43, 42);
+ mr.flush(true);
+ findRow(mr, 'C', 44, 44);
+ findRow(mr, 'C', 45, 44);
+ findRow(mr, 'C', 46, 46);
+ findRow(mr, 'C', 43, 42);
+ // Now delete 'C' and make sure I don't get entries from 'B'.
+ byte[] firstRowInC = HRegionInfo.createRegionName(TableName.valueOf("" + 'C'),
HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES, false);
- Scan scan = new Scan(firstRowInC);
- s = mr.getScanner(scan);
- try {
- List<Cell> keys = new ArrayList<>();
+ Scan scan = new Scan(firstRowInC);
+ s = mr.getScanner(scan);
+ try {
+ List<Cell> keys = new ArrayList<>();
while (s.next(keys)) {
- mr.delete(new Delete(CellUtil.cloneRow(keys.get(0))));
- keys.clear();
+ mr.delete(new Delete(CellUtil.cloneRow(keys.get(0))));
+ keys.clear();
+ }
+ } finally {
+ s.close();
}
- } finally {
- s.close();
- }
- // Assert we get null back (pass -1).
- findRow(mr, 'C', 44, -1);
- findRow(mr, 'C', 45, -1);
- findRow(mr, 'C', 46, -1);
- findRow(mr, 'C', 43, -1);
- mr.flush(true);
- findRow(mr, 'C', 44, -1);
- findRow(mr, 'C', 45, -1);
- findRow(mr, 'C', 46, -1);
- findRow(mr, 'C', 43, -1);
+ // Assert we get null back (pass -1).
+ findRow(mr, 'C', 44, -1);
+ findRow(mr, 'C', 45, -1);
+ findRow(mr, 'C', 46, -1);
+ findRow(mr, 'C', 43, -1);
+ mr.flush(true);
+ findRow(mr, 'C', 44, -1);
+ findRow(mr, 'C', 45, -1);
+ findRow(mr, 'C', 46, -1);
+ findRow(mr, 'C', 43, -1);
} finally {
HBaseTestingUtility.closeRegionAndWAL(mr);
}
|