hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From li...@apache.org
Subject svn commit: r1461253 - in /hbase/branches/0.89-fb/src: main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
Date Tue, 26 Mar 2013 18:18:38 GMT
Author: liyin
Date: Tue Mar 26 18:18:38 2013
New Revision: 1461253

URL: http://svn.apache.org/r1461253
Log:
[HBASE-8194] Fixing the non deterministic failure of TestHFileOutputFormat.

Author: manukranthk

Summary:
The test creates regions in the meta region and waits for the master to allot them to regionservers.
When the favored nodes were set, the timout was not completely reliable to say that they will
be set.
Instead setting the favored nodes while creating the regions directly in the starting would
ensure that the master creates the regions assigning selected favored nodes.

Test Plan: Unit Tests

Reviewers: liyintang, rshroff, adela

Reviewed By: rshroff

CC: hbase-eng@

Differential Revision: https://phabricator.fb.com/D728493

Task ID: 2138207

Modified:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java?rev=1461253&r1=1461252&r2=1461253&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
(original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
Tue Mar 26 18:18:38 2013
@@ -88,7 +88,7 @@ public class HFileOutputFormat extends F
     "hbase.hfileoutputformat.families.bloomfilter.typePerCF";
   
   static final String ENCODING_TYPE_PER_CF_KEY =
-  	"hbase.hfileoutputformat.families.encoding.typePerCF";
+    "hbase.hfileoutputformat.families.encoding.typePerCF";
 
   static final String TABLE_NAME = "hbase.hfileoutputformat.tablename";
   static final String UTF8 = "UTF-8";
@@ -110,7 +110,7 @@ public class HFileOutputFormat extends F
         Compression.Algorithm.NONE.getName());
     HTable tempTable = null;
     if (conf.get(TABLE_NAME) != null) {
-    	tempTable = new HTable(conf, conf.get(TABLE_NAME));
+      tempTable = new HTable(conf, conf.get(TABLE_NAME));
     }
     final HTable table = tempTable;
 
@@ -120,10 +120,10 @@ public class HFileOutputFormat extends F
     final Map<byte[], BloomType> bloomTypeMap = createFamilyBloomTypeMap(conf);
     
     final Map<byte[], HFileDataBlockEncoder> encoderTypeMap = 
-    	createFamilyDeltaEncodingMap(conf);
+      createFamilyDeltaEncodingMap(conf);
     
     final Pair<byte[][], byte[][]> startKeysAndFavoredNodes = 
-    	(table == null ? null : table.getStartKeysAndFavoredNodes());
+      (table == null ? null : table.getStartKeysAndFavoredNodes());
 
     return new RecordWriter<ImmutableBytesWritable, KeyValue>() {
       // Map of families to writers and how much has been output on the writer.
@@ -180,21 +180,21 @@ public class HFileOutputFormat extends F
       }
 
       private byte[] getFavoredNodesForKey(byte[] rowKey) {
-      	if (startKeysAndFavoredNodes == null) {
-      		return HConstants.EMPTY_BYTE_ARRAY;
-      	}
-      	byte[][] startKeys = startKeysAndFavoredNodes.getFirst();
-      	byte[][] favoredNodes = startKeysAndFavoredNodes.getSecond();
-      	if (startKeys == null || favoredNodes == null)
-    			return HConstants.EMPTY_BYTE_ARRAY;
-      	ConcurrentSkipListMap<byte [], byte[]> startKeysToFavoredNodes =
-      		new ConcurrentSkipListMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
-      	for (int i=0; i<startKeys.length; i++) {
-      		if (startKeys[i] == null || favoredNodes[i] == null)
-      			return HConstants.EMPTY_BYTE_ARRAY;
-      		startKeysToFavoredNodes.put(startKeys[i], favoredNodes[i]);
-      	}
-      	return startKeysToFavoredNodes.floorEntry(rowKey).getValue();
+        if (startKeysAndFavoredNodes == null) {
+          return HConstants.EMPTY_BYTE_ARRAY;
+        }
+        byte[][] startKeys = startKeysAndFavoredNodes.getFirst();
+        byte[][] favoredNodes = startKeysAndFavoredNodes.getSecond();
+        if (startKeys == null || favoredNodes == null)
+          return HConstants.EMPTY_BYTE_ARRAY;
+        ConcurrentSkipListMap<byte [], byte[]> startKeysToFavoredNodes =
+          new ConcurrentSkipListMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
+        for (int i=0; i<startKeys.length; i++) {
+          if (startKeys[i] == null || favoredNodes[i] == null)
+            return HConstants.EMPTY_BYTE_ARRAY;
+          startKeysToFavoredNodes.put(startKeys[i], favoredNodes[i]);
+        }
+        return startKeysToFavoredNodes.floorEntry(rowKey).getValue();
       }
 
       private void rollWriters() throws IOException {
@@ -229,13 +229,13 @@ public class HFileOutputFormat extends F
           bloomType = BloomType.NONE;
         }
         if (encoder == null) {
-        	encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.NONE, 
-        		DataBlockEncoding.NONE);
+          encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.NONE,
+            DataBlockEncoding.NONE);
         }
 
         LOG.info("Using " + encoder.getEncodingInCache() + " in cache and " +
-        	encoder.getEncodingOnDisk() + " on disk for the column family " +
-        	Bytes.toString(family));
+          encoder.getEncodingOnDisk() + " on disk for the column family " +
+          Bytes.toString(family));
 
         /* new bloom filter does not require maxKeys. */
         int maxKeys = 0;
@@ -351,8 +351,8 @@ public class HFileOutputFormat extends F
    */
   public static void configAsMapOutputFormat(Job job, HTable table) throws IOException {
     LOG.warn("Set up the HFileOutputFormat as MapperOutputFormat." +
-    		"It is the mapper task's responsibility to make sure that each mapper emits values
" +
-    		"for one region only and in a sorted order. !");
+        "It is the mapper task's responsibility to make sure that each mapper emits values
" +
+        "for one region only and in a sorted order. !");
     Configuration conf = job.getConfiguration();
     if (!KeyValue.class.equals(job.getMapOutputValueClass())) {
       LOG.error("Only support the KeyValue.class as MapOutputValueClass so far!");
@@ -569,7 +569,7 @@ public class HFileOutputFormat extends F
 
   protected static void configureDeltaEncoding(HTable table, Configuration conf)
   throws IOException {
-  	HTableDescriptor tableDescriptor = table.getTableDescriptor();
+    HTableDescriptor tableDescriptor = table.getTableDescriptor();
     if (tableDescriptor == null){
       return;
     }
@@ -587,11 +587,11 @@ public class HFileOutputFormat extends F
       encodingTypePerCFConfigValue.append('=');
       encodingTypePerCFConfigValue.append(
           URLEncoder.encode(familyDescriptor.getDataBlockEncodingOnDisk().toString(),
-          	UTF8));
+            UTF8));
       encodingTypePerCFConfigValue.append(':');
       encodingTypePerCFConfigValue.append(
           URLEncoder.encode(familyDescriptor.getDataBlockEncoding().toString(),
-          	UTF8));
+            UTF8));
     }
 
     conf.set(ENCODING_TYPE_PER_CF_KEY, encodingTypePerCFConfigValue.toString());
@@ -613,13 +613,13 @@ public class HFileOutputFormat extends F
       }
 
       try {
-      	String[] encodingsForFamily =
-      		URLDecoder.decode(familySplit[1], UTF8).split(":");
-      	HFileDataBlockEncoder encoder = new HFileDataBlockEncoderImpl(
-      		DataBlockEncoding.valueOf(URLDecoder.decode(encodingsForFamily[0], UTF8)), 
-      		DataBlockEncoding.valueOf(URLDecoder.decode(encodingsForFamily[1], UTF8)));
-      	encodingTypeMap.put(
-      		URLDecoder.decode(familySplit[0], UTF8).getBytes(),
+        String[] encodingsForFamily =
+          URLDecoder.decode(familySplit[1], UTF8).split(":");
+        HFileDataBlockEncoder encoder = new HFileDataBlockEncoderImpl(
+          DataBlockEncoding.valueOf(URLDecoder.decode(encodingsForFamily[0], UTF8)),
+          DataBlockEncoding.valueOf(URLDecoder.decode(encodingsForFamily[1], UTF8)));
+        encodingTypeMap.put(
+          URLDecoder.decode(familySplit[0], UTF8).getBytes(),
           encoder);
       } catch (UnsupportedEncodingException e) {
         // will not happen with UTF-8 encoding
@@ -630,9 +630,9 @@ public class HFileOutputFormat extends F
   }
 
   protected static void configureFavoredNodes(HTable table, Configuration conf) {
-  	if (table.getTableName() != null) {
-  		conf.set(TABLE_NAME, Bytes.toString(table.getTableName()));
-  	}
-  	// The rest of the stuff will be taken care of by the RecordWriter
+    if (table.getTableName() != null) {
+      conf.set(TABLE_NAME, Bytes.toString(table.getTableName()));
+    }
+    // The rest of the stuff will be taken care of by the RecordWriter
   }
 }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1461253&r1=1461252&r2=1461253&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
Tue Mar 26 18:18:38 2013
@@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.Writables;
@@ -745,23 +746,36 @@ public class HBaseTestingUtility {
   public int createMultiRegions(final Configuration c, final HTable table,
       final byte[] columnFamily)
   throws IOException {
+    return createMultiRegions(c, table, columnFamily, getTmpKeys());
+  }
+
+  public byte[][] getTmpKeys() {
     byte[][] KEYS = {
-      HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
-      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
-      Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
-      Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
-      Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
-      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
-      Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
-      Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
-      Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
-    };
-    return createMultiRegions(c, table, columnFamily, KEYS);
+        HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
+        Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
+        Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
+        Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
+        Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
+        Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
+        Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
+        Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
+        Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
+      };
+    return KEYS;
   }
 
   public int createMultiRegions(final Configuration c, final HTable table,
       final byte[] columnFamily, byte [][] startKeys)
   throws IOException {
+    return createMultiRegionsWithFavoredNodes(c,table,columnFamily,
+        new Pair<byte[][], byte[][]>(startKeys, null));
+  }
+
+  public int createMultiRegionsWithFavoredNodes(final Configuration c, final HTable table,
+      final byte[] columnFamily, Pair<byte[][], byte[][]> startKeysAndFavNodes)
+  throws IOException {
+    byte[][] startKeys = startKeysAndFavNodes.getFirst();
+    byte[][] favNodes = startKeysAndFavNodes.getSecond();
     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
     HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
     HTableDescriptor htd = table.getTableDescriptor();
@@ -783,6 +797,10 @@ public class HBaseTestingUtility {
       Put put = new Put(hri.getRegionName());
       put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
         Writables.getBytes(hri));
+      if (favNodes != null) {
+        put.add(HConstants.CATALOG_FAMILY, HConstants.FAVOREDNODES_QUALIFIER,
+            favNodes[i]);
+      }
       meta.put(put);
       LOG.info("createMultiRegions: inserted " + hri.toString());
       count++;
@@ -1232,7 +1250,7 @@ TOP_LOOP:
         Threads.sleepWithoutInterrupt(2000);
         continue;
       } catch (PreemptiveFastFailException ex) {
-      	// Be more patient
+        // Be more patient
         Threads.sleepWithoutInterrupt(2000);
         continue;
       }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java?rev=1461253&r1=1461252&r2=1461253&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
(original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
Tue Mar 26 18:18:38 2013
@@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -876,9 +877,9 @@ public class TestHFileOutputFormat  {
             Map<byte[], byte[]> metadataMap = reader.loadFileInfo();
 
             assertTrue("timeRange is not set",
-			metadataMap.get(StoreFile.TIMERANGE_KEY) != null);
+      metadataMap.get(StoreFile.TIMERANGE_KEY) != null);
             assertEquals("Incorrect bloom type used for column family " +
-			     familyStr + "(reader: " + reader + ")",
+           familyStr + "(reader: " + reader + ")",
                          configuredBloomFilter.get(familyStr),
                          reader.getBloomFilterType());
             break;
@@ -896,24 +897,24 @@ public class TestHFileOutputFormat  {
   }
 
   private void setupColumnFamiliesEncodingType(HTable table, 
-  		Map<String, DataBlockEncoding> familyToEncoding) throws IOException {
-  	HTableDescriptor mockTableDesc = new HTableDescriptor();
-  	for (Entry<String, DataBlockEncoding> entry : familyToEncoding.entrySet()) {
-  		mockTableDesc.addFamily(
-  			new HColumnDescriptor(entry.getKey().getBytes(), 
-  				1, 
-  				Compression.Algorithm.NONE.toString(), 
-  				true, 
-  				entry.getValue().toString(), 
-  				false, 
-  				false, 
-  				HColumnDescriptor.DEFAULT_BLOCKSIZE, 
-  				0,
-  				BloomType.NONE.toString(),
-  				HColumnDescriptor.DEFAULT_REPLICATION_SCOPE,
-  				HColumnDescriptor.DEFAULT_BLOOMFILTER_ERROR_RATE));
-  	}
-  	Mockito.doReturn(mockTableDesc).when(table).getTableDescriptor();
+      Map<String, DataBlockEncoding> familyToEncoding) throws IOException {
+    HTableDescriptor mockTableDesc = new HTableDescriptor();
+    for (Entry<String, DataBlockEncoding> entry : familyToEncoding.entrySet()) {
+      mockTableDesc.addFamily(
+        new HColumnDescriptor(entry.getKey().getBytes(),
+          1,
+          Compression.Algorithm.NONE.toString(),
+          true,
+          entry.getValue().toString(),
+          false,
+          false,
+          HColumnDescriptor.DEFAULT_BLOCKSIZE,
+          0,
+          BloomType.NONE.toString(),
+          HColumnDescriptor.DEFAULT_REPLICATION_SCOPE,
+          HColumnDescriptor.DEFAULT_BLOOMFILTER_ERROR_RATE));
+    }
+    Mockito.doReturn(mockTableDesc).when(table).getTableDescriptor();
   }
   
   /**
@@ -936,8 +937,8 @@ public class TestHFileOutputFormat  {
 
     int familyIndex = 0;
     for (byte[] family : FAMILIES) {
-    	configuredEncoding.put(Bytes.toString(family),
-    		EncodingTypeValues[familyIndex++ % EncodingTypeValues.length]);
+      configuredEncoding.put(Bytes.toString(family),
+        EncodingTypeValues[familyIndex++ % EncodingTypeValues.length]);
     }
 
     setupColumnFamiliesEncodingType(table, configuredEncoding);
@@ -984,7 +985,7 @@ public class TestHFileOutputFormat  {
 
             assertTrue("timeRange is not set", metadataMap.get(StoreFile.TIMERANGE_KEY) !=
null);
             assertEquals("Incorrect Encoding Type used for column family " 
-            	+ familyStr + "(reader: " + reader + ")",
+              + familyStr + "(reader: " + reader + ")",
               configuredEncoding.get(familyStr),
               reader.getHFileReader().getEncodingOnDisk());
             break;
@@ -1000,15 +1001,15 @@ public class TestHFileOutputFormat  {
       dir.getFileSystem(conf).delete(dir, true);
     }
   }
-  
+
   @Test
   public void testFavoredNodes() throws Exception {
-  	Random rand = new Random();
-  	for (int i=0; i<3; i++) {
-  		int tmp = (int)'b';
-  		byte c = (byte)(tmp + (Math.abs(rand.nextInt()))%24);
-  		testFavoredNodesPerChar(c);
-  	}
+    Random rand = new Random();
+    for (int i=0; i<3; i++) {
+      int tmp = (int)'b';
+      byte c = (byte)(tmp + (Math.abs(rand.nextInt()))%24);
+      testFavoredNodesPerChar(c);
+    }
   }
   private static final int FAVORED_NODES_NUM = 3;
   private static final int REGION_SERVERS = 10;
@@ -1016,65 +1017,51 @@ public class TestHFileOutputFormat  {
    * Testing FavoredNodes support for HFileOutputFormat
    */
   public void testFavoredNodesPerChar(byte c) throws Exception{
-  	util.startMiniCluster(REGION_SERVERS);
-  	Configuration conf = new Configuration(this.util.getConfiguration());
+    util.startMiniCluster(REGION_SERVERS);
+    Configuration conf = new Configuration(this.util.getConfiguration());
     RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
     TaskAttemptContext context = null;
     Path dir = util.getTestDir("TestFavoredNodes");
     byte[] familyName = Bytes.toBytes("family");
     byte[] tableName = Bytes.toBytes("TestFavoredNodes");
-    HTable table = util.createTable(tableName, familyName);
-    int countOfRegions = util.createMultiRegions(table, familyName);
-    util.waitUntilAllRegionsAssigned(countOfRegions);
-
-    InetSocketAddress[] nodes = new InetSocketAddress[REGION_SERVERS];
-    List<DataNode> datanodes = util.getDFSCluster().getDataNodes();
-    for (int i = 0; i < REGION_SERVERS; i++) {
-      nodes[i] = datanodes.get(i).getSelfAddr();
-    }
-
+    byte[][] startKeys = util.getTmpKeys();
     String[] nodeNames = new String[REGION_SERVERS];
+    List<DataNode> dataNodes = util.getDFSCluster().getDataNodes();
     for (int i = 0; i < REGION_SERVERS; i++) {
-      nodeNames[i] = nodes[i].getAddress().getHostAddress() + ":" +
-      	nodes[i].getPort();
+      DataNode node = dataNodes.get(i);
+      nodeNames[i] =
+          node.getSelfAddr().getAddress().getHostAddress() + ":" +
+              node.getSelfAddr().getPort();
     }
-
-    List<Put> puts = new ArrayList<Put>();
     int testIndex = 0;
-    List<HRegion> regions = util.getHBaseCluster().getRegions(tableName);
-    for (int i = 0; i < regions.size(); i++) {
-    	List<HServerAddress> favoredNodes = new ArrayList<HServerAddress>(FAVORED_NODES_NUM);
-    	HRegion region = regions.get(i);
-    	if (Bytes.BYTES_COMPARATOR.compare(region.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)
!= 0) {
-    		if (region.getStartKey()[0] == c) {
-      		testIndex = i;
-      	}
-    	}
-    	for (int j = 0; j < FAVORED_NODES_NUM; j++) {
-        favoredNodes.add(new HServerAddress(nodeNames[(i + j) % REGION_SERVERS]));
+    byte[][] favNodes = new byte[startKeys.length][];
+    for (int i = 0; i < startKeys.length; i++) {
+      List<HServerAddress> favoredNodes =
+          new ArrayList<HServerAddress>(FAVORED_NODES_NUM);
+      if (i>0 && startKeys[i][0] == c) {
+        testIndex = i;
+      }
+      for (int j = 0; j < FAVORED_NODES_NUM; j++) {
+        favoredNodes.add(new HServerAddress(nodeNames[(i + j) %
+                                            REGION_SERVERS]));
       }
-      String favoredNodesString = RegionPlacement.getFavoredNodes(favoredNodes);
-      Put put = new Put(region.getRegionName());
-      put.add(HConstants.CATALOG_FAMILY, HConstants.FAVOREDNODES_QUALIFIER,
-          favoredNodesString.getBytes());
-      puts.add(put);
+      String favoredNodesString =
+          RegionPlacement.getFavoredNodes(favoredNodes);
+      byte[] favoredNodesBytes = Bytes.toBytes(favoredNodesString);
+      favNodes[i] = favoredNodesBytes;
     }
+    HTable table = util.createTable(tableName, familyName);
+    int countOfRegions = util.createMultiRegionsWithFavoredNodes(conf, table, familyName,
+        new Pair<byte[][],byte[][]>(startKeys, favNodes));
+    util.waitUntilAllRegionsAssigned(countOfRegions);
 
-    // Write the region assignments to the meta table.
-    HTable metaTable = new HTable(conf, HConstants.META_TABLE_NAME);
-    metaTable.put(puts);
-    LOG.info("Updated the META with the new assignment plan");
-
-    // Allowing the Master thread to rescan and clean the empty meta rows
-    int sleepTime = conf.getInt("hbase.master.meta.thread.rescanfrequency", 100*1000);
-    Thread.sleep(sleepTime);
     try {
       Job job = new Job(conf, "testLocalMRIncrementalLoad");
       setupRandomGeneratorMapper(job);
       HFileOutputFormat.configureIncrementalLoad(job, table);
       FileOutputFormat.setOutputPath(job, dir);
       context = new TaskAttemptContext(job.getConfiguration(),
-      	new TaskAttemptID());
+        new TaskAttemptID());
       HFileOutputFormat hof = new HFileOutputFormat();
       writer = hof.getRecordWriter(context);
 
@@ -1110,22 +1097,22 @@ public class TestHFileOutputFormat  {
         }
       }
     } finally {
-    	dir.getFileSystem(conf).delete(dir, true);
-    	util.shutdownMiniCluster();
+      dir.getFileSystem(conf).delete(dir, true);
+      util.shutdownMiniCluster();
     }
   }
 
   private void writeKVs(RecordWriter<ImmutableBytesWritable, KeyValue> writer,
-  	byte[] family, byte keyByte) throws IOException, InterruptedException{
-  	byte[] k = new byte[3];
-  	int b1 = (int)keyByte;
-  	Random rand = new Random();
-		int tmp = rand.nextInt();
-		int b2 = Math.min(b1 + Math.abs(tmp)%26, (int)'z');
-		tmp = rand.nextInt();
-		int b3 = Math.min(b1 + Math.abs(tmp)%26, (int)'z');
-		
-		for (byte byte2 = (byte)b2; byte2 <= 'z'; byte2++) {
+    byte[] family, byte keyByte) throws IOException, InterruptedException{
+    byte[] k = new byte[3];
+    int b1 = (int)keyByte;
+    Random rand = new Random();
+    int tmp = rand.nextInt();
+    int b2 = Math.min(b1 + Math.abs(tmp)%26, (int)'z');
+    tmp = rand.nextInt();
+    int b3 = Math.min(b1 + Math.abs(tmp)%26, (int)'z');
+
+    for (byte byte2 = (byte)b2; byte2 <= 'z'; byte2++) {
       for (byte byte3 = (byte)b3; byte3 <= 'z'; byte3++) {
         k[0] = (byte)b1;
         k[1] = byte2;



Mime
View raw message