hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nspiegelb...@apache.org
Subject svn commit: r1212713 - in /hbase/branches/0.89-fb/src: main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/regionserver/ test/java/org/apache/hadoop/hbase/regionserver/
Date Sat, 10 Dec 2011 01:56:37 GMT
Author: nspiegelberg
Date: Sat Dec 10 01:56:36 2011
New Revision: 1212713

URL: http://svn.apache.org/viewvc?rev=1212713&view=rev
Log:
[master] Regions can have favored nodes which are passed to DFSClient

Summary:
HBase-based block placement depends on being able to place each block of
a single region on a specific set of hosts. This change adds a favored
node field to HRegions, and passes those nodes to the filesystem
whenever a store file is written.

Test Plan:
New test suite TestRegionFavoredNodes. It uses hostname and port taken
directly from the datanodes, whereas practical usage will likely use
only the hostname of the region server. However, the HDFS test case
TestFavoredNodes#testPartiallySpecifiedFavoredNodes covers the case
where port is unspecified.

Reviewers: kranganathan, kannan

Reviewed By: kranganathan

CC: hbase-eng@lists, kranganathan, cgist, nspiegelberg

Differential Revision: 368588

Task ID: 735408

Added:
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java
Modified:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java?rev=1212713&r1=1212712&r2=1212713&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
Sat Dec 10 01:56:36 2011
@@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.io.hfile
 
 import java.io.DataOutput;
 import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -36,8 +38,10 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.util.Progressable;
 
 /**
  * Common functionality needed by all versions of {@link HFile} writers.
@@ -279,6 +283,35 @@ public abstract class AbstractHFileWrite
         null);
   }
 
+  /** A helper method to create HFile output streams in constructors */
+  protected static FSDataOutputStream createOutputStream(Configuration conf,
+      FileSystem fs, Path path, int bytesPerChecksum,
+      InetSocketAddress[] favoredNodes) throws IOException {
+    if (fs instanceof DistributedFileSystem) {
+      // Try to use the favoredNodes version via reflection to allow backwards-
+      // compatibility.
+      try {
+        return (FSDataOutputStream) DistributedFileSystem.class
+            .getDeclaredMethod("create", Path.class, FsPermission.class,
+                boolean.class, int.class, short.class, long.class, int.class,
+                Progressable.class, InetSocketAddress[].class)
+            .invoke(fs, path, FsPermission.getDefault(), true,
+                fs.getConf().getInt("io.file.buffer.size", 4096),
+                fs.getDefaultReplication(), fs.getDefaultBlockSize(),
+                bytesPerChecksum, null, favoredNodes);
+      } catch (InvocationTargetException ite) {
+        // Function was properly called, but threw it's own exception.
+        throw new IOException(ite.getCause());
+      } catch (Exception e) {
+        // Ignore all other exceptions. related to reflection failure.
+      }
+    }
+    return fs.create(path, FsPermission.getDefault(), true,
+        fs.getConf().getInt("io.file.buffer.size", 4096),
+        fs.getDefaultReplication(), fs.getDefaultBlockSize(), bytesPerChecksum,
+        null);
+  }
+
   /** Initializes the block cache to use for cache-on-write */
   protected void initBlockCache() {
     if (blockCache == null) {

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=1212713&r1=1212712&r2=1212713&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Sat Dec
10 01:56:36 2011
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.io.hfile
 import java.io.Closeable;
 import java.io.DataInput;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
@@ -254,6 +255,11 @@ public class HFile {
         final KeyComparator comparator) throws IOException;
 
     public abstract Writer createWriter(FileSystem fs, Path path,
+        int blockSize, int bytesPerChecksum, Compression.Algorithm compress,
+        final KeyComparator comparator, InetSocketAddress[] favoredNodes)
+        throws IOException;
+
+    public abstract Writer createWriter(FileSystem fs, Path path,
         int blockSize, int bytesPerChecksum, String compress,
         final KeyComparator comparator) throws IOException;
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java?rev=1212713&r1=1212712&r2=1212713&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
Sat Dec 10 01:56:36 2011
@@ -23,6 +23,7 @@ import java.io.ByteArrayOutputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
@@ -97,6 +98,15 @@ public class HFileWriterV1 extends Abstr
 
     @Override
     public Writer createWriter(FileSystem fs, Path path, int blockSize,
+        int bytesPerChecksum, Compression.Algorithm compressAlgo,
+        final KeyComparator comparator, InetSocketAddress[] favoredNodes)
+        throws IOException {
+      return new HFileWriterV1(conf, fs, path, blockSize, bytesPerChecksum,
+          compressAlgo, comparator, favoredNodes);
+    }
+
+    @Override
+    public Writer createWriter(FileSystem fs, Path path, int blockSize,
         int bytesPerChecksum, String compressAlgoName,
         final KeyComparator comparator) throws IOException {
       return new HFileWriterV1(conf, fs, path, blockSize, bytesPerChecksum,
@@ -145,6 +155,15 @@ public class HFileWriterV1 extends Abstr
         blockSize, compress, comparator);
   }
 
+  /** Constructor that takes a path, creates and closes the output stream. */
+  public HFileWriterV1(Configuration conf, FileSystem fs, Path path,
+      int blockSize, int bytesPerChecksum, Compression.Algorithm compress,
+      final KeyComparator comparator, InetSocketAddress[] favoredNodes)
+      throws IOException {
+    super(conf, createOutputStream(conf, fs, path, bytesPerChecksum,
+        favoredNodes), path, blockSize, compress, comparator);
+  }
+
   /** Constructor that takes a stream. */
   public HFileWriterV1(Configuration conf,
       final FSDataOutputStream outputStream, final int blockSize,

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java?rev=1212713&r1=1212712&r2=1212713&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
Sat Dec 10 01:56:36 2011
@@ -23,6 +23,7 @@ package org.apache.hadoop.hbase.io.hfile
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -95,6 +96,15 @@ public class HFileWriterV2 extends Abstr
 
     @Override
     public Writer createWriter(FileSystem fs, Path path, int blockSize,
+        int bytesPerChecksum, Compression.Algorithm compress,
+        final KeyComparator comparator, InetSocketAddress[] favoredNodes)
+        throws IOException {
+      return new HFileWriterV2(conf, fs, path, blockSize, bytesPerChecksum,
+          compress, comparator, favoredNodes);
+    }
+
+    @Override
+    public Writer createWriter(FileSystem fs, Path path, int blockSize,
         int bytesPerChecksum, String compress, final KeyComparator comparator)
         throws IOException {
       return new HFileWriterV2(conf, fs, path, blockSize, bytesPerChecksum,
@@ -144,6 +154,16 @@ public class HFileWriterV2 extends Abstr
     finishInit(conf);
   }
 
+  /** Constructor that takes a path, creates and closes the output stream. */
+  public HFileWriterV2(Configuration conf, FileSystem fs, Path path,
+      int blockSize, int bytesPerChecksum, Compression.Algorithm compressAlgo,
+      final KeyComparator comparator, InetSocketAddress[] favoredNodes)
+      throws IOException {
+    super(conf, createOutputStream(conf, fs, path, bytesPerChecksum,
+        favoredNodes), path, blockSize, compressAlgo, comparator);
+    finishInit(conf);
+  }
+
   /** Constructor that takes a stream. */
   public HFileWriterV2(final Configuration conf,
       final FSDataOutputStream outputStream, final int blockSize,

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1212713&r1=1212712&r2=1212713&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
Sat Dec 10 01:56:36 2011
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.io.UnsupportedEncodingException;
 import java.lang.reflect.Constructor;
+import java.net.InetSocketAddress;
 import java.text.ParseException;
 import java.util.AbstractList;
 import java.util.ArrayList;
@@ -206,6 +207,10 @@ public class HRegion implements HeapSize
     return minimumReadPoint;
   }
 
+  // When writing store files for this region, replicas will preferrably be
+  // placed on these nodes, if non-null.
+  private InetSocketAddress[] favoredNodes = null;
+
   /*
    * Data structure of write state flags used coordinating flushes,
    * compactions and closes.
@@ -1525,9 +1530,33 @@ public class HRegion implements HeapSize
     }
   }
 
+  /**
+   * @return the nodes on which to place replicas of all store files, or null if
+   * there are no favored nodes.=
+   */
+  public InetSocketAddress[] getFavoredNodes() {
+    return this.favoredNodes;
+  }
+
   //////////////////////////////////////////////////////////////////////////////
   // set() methods for client use.
   //////////////////////////////////////////////////////////////////////////////
+
+  /**
+   * Set the favored nodes on which to place replicas of all store files. The
+   * array can be null to set no preference for favored nodes, but elements of
+   * the array must not be null. Placement of replicas on favored nodes is best-
+   * effort only and the filesystem may choose different nodes.
+   * @param favoredNodes the favored nodes, or null
+   */
+  public void setFavoredNodes(InetSocketAddress[] favoredNodes) {
+    if (favoredNodes == null) {
+      this.favoredNodes = null;
+      return;
+    }
+    this.favoredNodes = Arrays.copyOf(favoredNodes, favoredNodes.length);
+  }
+
   /**
    * @param delete delete object
    * @param lockid existing lock id, or null for grab a lock

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1212713&r1=1212712&r2=1212713&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Sat
Dec 10 01:56:36 2011
@@ -591,7 +591,8 @@ public class Store extends SchemaConfigu
   throws IOException {
     return StoreFile.createWriter(this.fs, region.getTmpDir(), this.blocksize,
         this.compression, this.comparator, this.conf,
-        this.family.getBloomFilterType(), this.family.getBloomFilterErrorRate(), maxKeyCount);
+        this.family.getBloomFilterType(), this.family.getBloomFilterErrorRate(),
+        maxKeyCount, region.getFavoredNodes());
   }
 
   /*

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1212713&r1=1212712&r2=1212713&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
Sat Dec 10 01:56:36 2011
@@ -24,6 +24,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.MemoryUsage;
+import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Collection;
@@ -631,7 +632,7 @@ public class StoreFile {
           long maxKeyCount)
   throws IOException {
       return createWriter(fs, dir, blocksize, algorithm, c, conf, bloomType,
-        BloomFilterFactory.getErrorRate(conf), maxKeyCount);
+        BloomFilterFactory.getErrorRate(conf), maxKeyCount, null);
   }
 
   /**
@@ -647,6 +648,7 @@ public class StoreFile {
    * @param bloomType column family setting for bloom filters
    * @param bloomErrorRate column family setting for bloom filter error rate
    * @param maxKeyCount estimated maximum number of keys we expect to add
+   * @param favoredNodes if using DFS, try to place replicas on these nodes
    * @return HFile.Writer
    * @throws IOException
    */
@@ -658,7 +660,8 @@ public class StoreFile {
                                               final Configuration conf,
                                               BloomType bloomType,
                                               float bloomErrorRate,
-                                              long maxKeyCount)
+                                              long maxKeyCount,
+                                              InetSocketAddress[] favoredNodes)
       throws IOException {
 
     if (!fs.exists(dir)) {
@@ -671,7 +674,8 @@ public class StoreFile {
 
     return new Writer(fs, path, blocksize,
         algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm,
-        conf, c == null ? KeyValue.COMPARATOR: c, bloomType, bloomErrorRate, maxKeyCount);
+        conf, c == null ? KeyValue.COMPARATOR: c, bloomType, bloomErrorRate,
+        maxKeyCount, favoredNodes);
   }
 
   /**
@@ -780,8 +784,16 @@ public class StoreFile {
             Compression.Algorithm compress, final Configuration conf,
             final KVComparator comparator, BloomType bloomType,  long maxKeys)
             throws IOException {
+     this(fs, path, blocksize, compress, conf, comparator, bloomType,
+          BloomFilterFactory.getErrorRate(conf), maxKeys, null);
+    }
+
+    public Writer(FileSystem fs, Path path, int blocksize,
+        Compression.Algorithm compress, final Configuration conf,
+        final KVComparator comparator, BloomType bloomType,
+        float bloomErrorRate, long maxKeys) throws IOException {
       this(fs, path, blocksize, compress, conf, comparator, bloomType,
-          BloomFilterFactory.getErrorRate(conf), maxKeys);
+          bloomErrorRate, maxKeys, null);
     }
 
     /**
@@ -796,16 +808,18 @@ public class StoreFile {
      * @param bloomErrorRate error rate for bloom filter
      * @param maxKeys the expected maximum number of keys to be added. Was used
      *        for Bloom filter size in {@link HFile} format version 1.
+     * @param favoredNodes if using DFS, try to place replicas on these nodes
      * @throws IOException problem writing to FS
      */
     public Writer(FileSystem fs, Path path, int blocksize,
         Compression.Algorithm compress, final Configuration conf,
-        final KVComparator comparator, BloomType bloomType, float bloomErrorRate, long maxKeys)
+        final KVComparator comparator, BloomType bloomType,
+        float bloomErrorRate, long maxKeys, InetSocketAddress[] favoredNodes)
         throws IOException {
 
       writer = HFile.getWriterFactory(conf).createWriter(
           fs, path, blocksize, HFile.getBytesPerChecksum(conf, fs.getConf()),
-          compress, comparator.getRawComparator());
+          compress, comparator.getRawComparator(), favoredNodes);
 
       this.kvComparator = comparator;
 

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java?rev=1212713&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java
(added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java
Sat Dec 10 01:56:36 2011
@@ -0,0 +1,103 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/**
+ * Tests the ability to specify favored nodes for a region.
+ */
+public class TestRegionFavoredNodes {
+
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static HTable table;
+  private static final byte[] TABLE_NAME = Bytes.toBytes("table");
+  private static final byte[] COLUMN_FAMILY = Bytes.toBytes("family");
+  private static final int REPLICATION = 3;
+  private static final int REGION_SERVERS = 6;
+  private static final int FLUSHES = 3;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.startMiniCluster(REGION_SERVERS);
+    table = TEST_UTIL.createTable(TABLE_NAME, COLUMN_FAMILY);
+    int numRegions = TEST_UTIL.createMultiRegions(table, COLUMN_FAMILY);
+    TEST_UTIL.waitUntilAllRegionsAssigned(numRegions);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testFavoredNodes() throws Exception {
+    // Get the addresses of the datanodes in the cluster.
+    InetSocketAddress[] nodes = new InetSocketAddress[REGION_SERVERS];
+    List<DataNode> datanodes = TEST_UTIL.getDFSCluster().getDataNodes();
+    for (int i = 0; i < REGION_SERVERS; i++) {
+      nodes[i] = datanodes.get(i).getSelfAddr();
+    }
+
+    String[] nodeNames = new String[REGION_SERVERS];
+    for (int i = 0; i < REGION_SERVERS; i++) {
+      nodeNames[i] = nodes[i].getAddress().getHostAddress() + ":" +
+          nodes[i].getPort();
+    }
+
+    // For each region, choose some datanodes as the favored nodes then assign
+    // them as favored nodes through the HRegion.
+    InetSocketAddress[] favoredNodes = new InetSocketAddress[REPLICATION];
+    List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+    for (int i = 0; i < regions.size(); i++) {
+      for (int j = 0; j < REPLICATION; j++) {
+        favoredNodes[j] = nodes[(i + j) % REGION_SERVERS];
+      }
+      regions.get(i).setFavoredNodes(favoredNodes);
+    }
+
+    // Write some data to each region and flush. Repeat some number of times to
+    // get multiple files for each region.
+    for (int i = 0; i < FLUSHES; i++) {
+      TEST_UTIL.loadTable(table, COLUMN_FAMILY);
+      TEST_UTIL.flush();
+    }
+
+    // For each region, check the block locations of each file and ensure that
+    // they are consistent with the favored nodes for that region.
+    for (int i = 0; i < regions.size(); i++) {
+      HRegion region = regions.get(i);
+      List<String> files = region.getStoreFileList(new byte[][]{COLUMN_FAMILY});
+      for (String file : files) {
+        LocatedBlocks lbks = TEST_UTIL.getDFSCluster().getNameNode()
+            .getBlockLocations(new URI(file).getPath(), 0, Long.MAX_VALUE);
+
+        for (LocatedBlock lbk : lbks.getLocatedBlocks()) {
+          locations:
+          for (DatanodeInfo info : lbk.getLocations()) {
+            for (int j = 0; j < REPLICATION; j++) {
+              if (info.getName().equals(nodeNames[(i + j) % REGION_SERVERS])) {
+                continue locations;
+              }
+            }
+            // This block was at a location that was not a favored location.
+            fail("Block location " + info.getName() + " not a favored node");
+          }
+        }
+      }
+    }
+  }
+}



Mime
View raw message