Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id D9DD818E09 for ; Mon, 2 Nov 2015 03:51:35 +0000 (UTC) Received: (qmail 16100 invoked by uid 500); 2 Nov 2015 03:51:35 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 16062 invoked by uid 500); 2 Nov 2015 03:51:35 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 16053 invoked by uid 99); 2 Nov 2015 03:51:35 -0000 Received: from Unknown (HELO spamd4-us-west.apache.org) (209.188.14.142) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 02 Nov 2015 03:51:35 +0000 Received: from localhost (localhost [127.0.0.1]) by spamd4-us-west.apache.org (ASF Mail Server at spamd4-us-west.apache.org) with ESMTP id E96D5C0E12 for ; Mon, 2 Nov 2015 03:51:34 +0000 (UTC) X-Virus-Scanned: Debian amavisd-new at spamd4-us-west.apache.org X-Spam-Flag: NO X-Spam-Score: 1.79 X-Spam-Level: * X-Spam-Status: No, score=1.79 tagged_above=-999 required=6.31 tests=[KAM_ASCII_DIVIDERS=0.8, KAM_LAZY_DOMAIN_SECURITY=1, T_RP_MATCHES_RCVD=-0.01] autolearn=disabled Received: from mx1-eu-west.apache.org ([10.40.0.8]) by localhost (spamd4-us-west.apache.org [10.40.0.11]) (amavisd-new, port 10024) with ESMTP id Pe8xqLRBzDII for ; Mon, 2 Nov 2015 03:51:13 +0000 (UTC) Received: from mailrelay1-us-west.apache.org (mailrelay1-us-west.apache.org [209.188.14.139]) by mx1-eu-west.apache.org (ASF Mail Server at mx1-eu-west.apache.org) with ESMTP id 6B2CF24E1C for ; Mon, 2 Nov 2015 03:51:12 +0000 (UTC) Received: from svn01-us-west.apache.org (svn.apache.org [10.41.0.6]) by mailrelay1-us-west.apache.org (ASF Mail Server at mailrelay1-us-west.apache.org) with ESMTP id C3D57E1029 for ; Mon, 2 Nov 2015 03:51:10 +0000 (UTC) Received: from svn01-us-west.apache.org (localhost [127.0.0.1]) by svn01-us-west.apache.org (ASF Mail Server at svn01-us-west.apache.org) with ESMTP id C2E873A019A for ; Mon, 2 Nov 2015 03:51:10 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1711891 [50/50] - in /hbase/hbase.apache.org/trunk: ./ apidocs/ apidocs/org/apache/hadoop/hbase/ apidocs/org/apache/hadoop/hbase/class-use/ apidocs/org/apache/hadoop/hbase/client/ apidocs/org/apache/hadoop/hbase/client/class-use/ apidocs/o... Date: Mon, 02 Nov 2015 03:51:05 -0000 To: commits@hbase.apache.org From: misty@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20151102035110.C2E873A019A@svn01-us-west.apache.org> Modified: hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html URL: http://svn.apache.org/viewvc/hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html?rev=1711891&r1=1711890&r2=1711891&view=diff ============================================================================== --- hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html (original) +++ hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html Mon Nov 2 03:51:02 2015 @@ -36,8 +36,8 @@ 26 @org.jamon.annotations.Argument(name = "regionServer", type = "HRegionServer")}, 27 optionalArguments = { 28 @org.jamon.annotations.Argument(name = "bcn", type = "String"), -29 @org.jamon.annotations.Argument(name = "bcv", type = "String"), -30 @org.jamon.annotations.Argument(name = "filter", type = "String"), +29 @org.jamon.annotations.Argument(name = "filter", type = "String"), +30 @org.jamon.annotations.Argument(name = "bcv", type = "String"), 31 @org.jamon.annotations.Argument(name = "format", type = "String")}) 32 public class RSStatusTmpl 33 extends org.jamon.AbstractTemplateProxy @@ -96,40 +96,40 @@ 86 return m_bcn__IsNotDefault; 87 } 88 private boolean m_bcn__IsNotDefault; -89 // 24, 1 -90 public void setBcv(String bcv) +89 // 21, 1 +90 public void setFilter(String filter) 91 { -92 // 24, 1 -93 m_bcv = bcv; -94 m_bcv__IsNotDefault = true; +92 // 21, 1 +93 m_filter = filter; +94 m_filter__IsNotDefault = true; 95 } -96 public String getBcv() +96 public String getFilter() 97 { -98 return m_bcv; +98 return m_filter; 99 } -100 private String m_bcv; -101 public boolean getBcv__IsNotDefault() +100 private String m_filter; +101 public boolean getFilter__IsNotDefault() 102 { -103 return m_bcv__IsNotDefault; +103 return m_filter__IsNotDefault; 104 } -105 private boolean m_bcv__IsNotDefault; -106 // 21, 1 -107 public void setFilter(String filter) +105 private boolean m_filter__IsNotDefault; +106 // 24, 1 +107 public void setBcv(String bcv) 108 { -109 // 21, 1 -110 m_filter = filter; -111 m_filter__IsNotDefault = true; +109 // 24, 1 +110 m_bcv = bcv; +111 m_bcv__IsNotDefault = true; 112 } -113 public String getFilter() +113 public String getBcv() 114 { -115 return m_filter; +115 return m_bcv; 116 } -117 private String m_filter; -118 public boolean getFilter__IsNotDefault() +117 private String m_bcv; +118 public boolean getBcv__IsNotDefault() 119 { -120 return m_filter__IsNotDefault; +120 return m_bcv__IsNotDefault; 121 } -122 private boolean m_filter__IsNotDefault; +122 private boolean m_bcv__IsNotDefault; 123 // 22, 1 124 public void setFormat(String format) 125 { @@ -165,17 +165,17 @@ 155 return this; 156 } 157 -158 protected String bcv; -159 public final org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcv(String p_bcv) +158 protected String filter; +159 public final org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFilter(String p_filter) 160 { -161 (getImplData()).setBcv(p_bcv); +161 (getImplData()).setFilter(p_filter); 162 return this; 163 } 164 -165 protected String filter; -166 public final org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFilter(String p_filter) +165 protected String bcv; +166 public final org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcv(String p_bcv) 167 { -168 (getImplData()).setFilter(p_filter); +168 (getImplData()).setBcv(p_bcv); 169 return this; 170 } 171 Modified: hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html URL: http://svn.apache.org/viewvc/hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html?rev=1711891&r1=1711890&r2=1711891&view=diff ============================================================================== --- hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html (original) +++ hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html Mon Nov 2 03:51:02 2015 @@ -37,8 +37,8 @@ 27 { 28 private final HRegionServer regionServer; 29 private final String bcn; -30 private final String bcv; -31 private final String filter; +30 private final String filter; +31 private final String bcv; 32 private final String format; 33 protected static org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData __jamon_setOptionalArguments(org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData p_implData) 34 { @@ -46,13 +46,13 @@ 36 { 37 p_implData.setBcn(""); 38 } -39 if(! p_implData.getBcv__IsNotDefault()) +39 if(! p_implData.getFilter__IsNotDefault()) 40 { -41 p_implData.setBcv(""); +41 p_implData.setFilter("general"); 42 } -43 if(! p_implData.getFilter__IsNotDefault()) +43 if(! p_implData.getBcv__IsNotDefault()) 44 { -45 p_implData.setFilter("general"); +45 p_implData.setBcv(""); 46 } 47 if(! p_implData.getFormat__IsNotDefault()) 48 { @@ -65,8 +65,8 @@ 55 super(p_templateManager, __jamon_setOptionalArguments(p_implData)); 56 regionServer = p_implData.getRegionServer(); 57 bcn = p_implData.getBcn(); -58 bcv = p_implData.getBcv(); -59 filter = p_implData.getFilter(); +58 filter = p_implData.getFilter(); +59 bcv = p_implData.getBcv(); 60 format = p_implData.getFormat(); 61 } 62 @@ -96,8 +96,8 @@ 86 // 41, 3 87 { 88 org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl __jamon__var_7 = new org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl(this.getTemplateManager()); -89 __jamon__var_7.setFormat("json" ); -90 __jamon__var_7.setFilter(filter); +89 __jamon__var_7.setFilter(filter); +90 __jamon__var_7.setFormat("json" ); 91 __jamon__var_7.renderNoFlush(jamonWriter); 92 } 93 // 41, 68 Modified: hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/HBaseFsck.html URL: http://svn.apache.org/viewvc/hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/HBaseFsck.html?rev=1711891&r1=1711890&r2=1711891&view=diff ============================================================================== --- hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/HBaseFsck.html (original) +++ hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/HBaseFsck.html Mon Nov 2 03:51:02 2015 @@ -3427,7 +3427,7 @@ 3417 || hri.isMetaRegion())) { 3418 return true; 3419 } -3420 PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(result); +3420 PairOfSameType<HRegionInfo> daughters = MetaTableAccessor.getDaughterRegions(result); 3421 for (HRegionLocation h : rl.getRegionLocations()) { 3422 if (h == null || h.getRegionInfo() == null) { 3423 continue; @@ -3450,7 +3450,7 @@ 3440 throw new IOException("Two entries in hbase:meta are same " + previous); 3441 } 3442 } -3443 PairOfSameType<HRegionInfo> mergeRegions = HRegionInfo.getMergeRegions(result); +3443 PairOfSameType<HRegionInfo> mergeRegions = MetaTableAccessor.getMergeRegions(result); 3444 for (HRegionInfo mergeRegion : new HRegionInfo[] { 3445 mergeRegions.getFirst(), mergeRegions.getSecond() }) { 3446 if (mergeRegion != null) { Modified: hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/HMerge.html URL: http://svn.apache.org/viewvc/hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/HMerge.html?rev=1711891&r1=1711890&r2=1711891&view=diff ============================================================================== --- hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/HMerge.html (original) +++ hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/HMerge.html Mon Nov 2 03:51:02 2015 @@ -269,7 +269,7 @@ 259 if (results == null) { 260 return null; 261 } -262 HRegionInfo region = HRegionInfo.getHRegionInfo(results); +262 HRegionInfo region = MetaTableAccessor.getHRegionInfo(results); 263 if (region == null) { 264 throw new NoSuchElementException("meta region entry missing " + 265 Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + @@ -303,7 +303,7 @@ 293 currentRow = metaScanner.next(); 294 continue; 295 } -296 HRegionInfo region = HRegionInfo.getHRegionInfo(currentRow); +296 HRegionInfo region = MetaTableAccessor.getHRegionInfo(currentRow); 297 if (!region.getTable().equals(this.tableName)) { 298 currentRow = metaScanner.next(); 299 continue; Modified: hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/Merge.html URL: http://svn.apache.org/viewvc/hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/Merge.html?rev=1711891&r1=1711890&r2=1711891&view=diff ============================================================================== --- hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/Merge.html (original) +++ hbase/hbase.apache.org/trunk/xref/org/apache/hadoop/hbase/util/Merge.html Mon Nov 2 03:51:02 2015 @@ -33,248 +33,249 @@ 23 24 import org.apache.commons.logging.Log; 25 import org.apache.commons.logging.LogFactory; -26 import org.apache.hadoop.hbase.classification.InterfaceAudience; -27 import org.apache.hadoop.conf.Configured; -28 import org.apache.hadoop.fs.FileSystem; -29 import org.apache.hadoop.fs.Path; -30 import org.apache.hadoop.hbase.HBaseInterfaceAudience; -31 import org.apache.hadoop.hbase.TableDescriptor; -32 import org.apache.hadoop.hbase.TableName; -33 import org.apache.hadoop.hbase.HBaseConfiguration; -34 import org.apache.hadoop.hbase.HConstants; -35 import org.apache.hadoop.hbase.HRegionInfo; -36 import org.apache.hadoop.hbase.HTableDescriptor; -37 import org.apache.hadoop.hbase.MasterNotRunningException; -38 import org.apache.hadoop.hbase.ZooKeeperConnectionException; -39 import org.apache.hadoop.hbase.client.Delete; -40 import org.apache.hadoop.hbase.client.Get; -41 import org.apache.hadoop.hbase.client.HBaseAdmin; -42 import org.apache.hadoop.hbase.client.Result; -43 import org.apache.hadoop.hbase.regionserver.HRegion; -44 import org.apache.hadoop.io.WritableComparator; -45 import org.apache.hadoop.util.Tool; -46 import org.apache.hadoop.util.ToolRunner; -47 -48 import com.google.common.base.Preconditions; -49 -50 /** -51 * Utility that can merge any two regions in the same table: adjacent, -52 * overlapping or disjoint. -53 */ -54 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) -55 public class Merge extends Configured implements Tool { -56 private static final Log LOG = LogFactory.getLog(Merge.class); -57 private Path rootdir; -58 private volatile MetaUtils utils; -59 private TableName tableName; // Name of table -60 private volatile byte [] region1; // Name of region 1 -61 private volatile byte [] region2; // Name of region 2 -62 private volatile HRegionInfo mergeInfo = null; -63 -64 @Override -65 public int run(String[] args) throws Exception { -66 if (parseArgs(args) != 0) { -67 return -1; -68 } -69 -70 // Verify file system is up. -71 FileSystem fs = FileSystem.get(getConf()); // get DFS handle -72 LOG.info("Verifying that file system is available..."); -73 try { -74 FSUtils.checkFileSystemAvailable(fs); -75 } catch (IOException e) { -76 LOG.fatal("File system is not available", e); -77 return -1; -78 } -79 -80 // Verify HBase is down -81 LOG.info("Verifying that HBase is not running..."); -82 try { -83 HBaseAdmin.checkHBaseAvailable(getConf()); -84 LOG.fatal("HBase cluster must be off-line, and is not. Aborting."); -85 return -1; -86 } catch (ZooKeeperConnectionException zkce) { -87 // If no zk, presume no master. -88 } catch (MasterNotRunningException e) { -89 // Expected. Ignore. -90 } -91 -92 // Initialize MetaUtils and and get the root of the HBase installation -93 -94 this.utils = new MetaUtils(getConf()); -95 this.rootdir = FSUtils.getRootDir(getConf()); -96 try { -97 mergeTwoRegions(); -98 return 0; -99 } catch (IOException e) { -100 LOG.fatal("Merge failed", e); -101 return -1; -102 -103 } finally { -104 if (this.utils != null) { -105 this.utils.shutdown(); -106 } -107 } -108 } -109 -110 /** @return HRegionInfo for merge result */ -111 HRegionInfo getMergedHRegionInfo() { -112 return this.mergeInfo; -113 } -114 -115 /* -116 * Merges two regions from a user table. -117 */ -118 private void mergeTwoRegions() throws IOException { -119 LOG.info("Merging regions " + Bytes.toStringBinary(this.region1) + " and " + -120 Bytes.toStringBinary(this.region2) + " in table " + this.tableName); -121 HRegion meta = this.utils.getMetaRegion(); -122 Get get = new Get(region1); -123 get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); -124 Result result1 = meta.get(get); -125 Preconditions.checkState(!result1.isEmpty(), -126 "First region cells can not be null"); -127 HRegionInfo info1 = HRegionInfo.getHRegionInfo(result1); -128 if (info1 == null) { -129 throw new NullPointerException("info1 is null using key " + -130 Bytes.toStringBinary(region1) + " in " + meta); -131 } -132 get = new Get(region2); -133 get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); -134 Result result2 = meta.get(get); -135 Preconditions.checkState(!result2.isEmpty(), -136 "Second region cells can not be null"); -137 HRegionInfo info2 = HRegionInfo.getHRegionInfo(result2); -138 if (info2 == null) { -139 throw new NullPointerException("info2 is null using key " + meta); -140 } -141 TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()), -142 this.rootdir, this.tableName); -143 HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2); -144 -145 LOG.info("Adding " + merged.getRegionInfo() + " to " + -146 meta.getRegionInfo()); -147 -148 HRegion.addRegionToMETA(meta, merged); -149 merged.close(); -150 } -151 -152 /* -153 * Actually merge two regions and update their info in the meta region(s) -154 * Returns HRegion object for newly merged region -155 */ -156 private HRegion merge(final HTableDescriptor htd, HRegion meta, -157 HRegionInfo info1, HRegionInfo info2) -158 throws IOException { -159 if (info1 == null) { -160 throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " + -161 Bytes.toStringBinary(meta.getRegionInfo().getRegionName())); -162 } -163 if (info2 == null) { -164 throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " + -165 Bytes.toStringBinary(meta.getRegionInfo().getRegionName())); -166 } -167 HRegion merged = null; -168 HRegion r1 = HRegion.openHRegion(info1, htd, utils.getLog(info1), getConf()); -169 try { -170 HRegion r2 = HRegion.openHRegion(info2, htd, utils.getLog(info2), getConf()); -171 try { -172 merged = HRegion.merge(r1, r2); -173 } finally { -174 if (!r2.isClosed()) { -175 r2.close(); -176 } -177 } -178 } finally { -179 if (!r1.isClosed()) { -180 r1.close(); -181 } -182 } -183 -184 // Remove the old regions from meta. -185 // HRegion.merge has already deleted their files -186 -187 removeRegionFromMeta(meta, info1); -188 removeRegionFromMeta(meta, info2); -189 -190 this.mergeInfo = merged.getRegionInfo(); -191 return merged; -192 } -193 -194 /* -195 * Removes a region's meta information from the passed <code>meta</code> -196 * region. -197 * -198 * @param meta hbase:meta HRegion to be updated -199 * @param regioninfo HRegionInfo of region to remove from <code>meta</code> -200 * -201 * @throws IOException -202 */ -203 private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo) -204 throws IOException { -205 if (LOG.isDebugEnabled()) { -206 LOG.debug("Removing region: " + regioninfo + " from " + meta); -207 } -208 -209 Delete delete = new Delete(regioninfo.getRegionName(), -210 System.currentTimeMillis()); -211 meta.delete(delete); -212 } -213 -214 /** -215 * Parse given arguments and assign table name and regions names. -216 * (generic args are handled by ToolRunner.) -217 * -218 * @param args the arguments to parse -219 * -220 * @throws IOException -221 */ -222 private int parseArgs(String[] args) throws IOException { -223 if (args.length != 3) { -224 usage(); -225 return -1; -226 } -227 tableName = TableName.valueOf(args[0]); -228 -229 region1 = Bytes.toBytesBinary(args[1]); -230 region2 = Bytes.toBytesBinary(args[2]); -231 int status = 0; -232 if (notInTable(tableName, region1) || notInTable(tableName, region2)) { -233 status = -1; -234 } else if (Bytes.equals(region1, region2)) { -235 LOG.error("Can't merge a region with itself"); -236 status = -1; -237 } -238 return status; -239 } -240 -241 private boolean notInTable(final TableName tn, final byte [] rn) { -242 if (WritableComparator.compareBytes(tn.getName(), 0, tn.getName().length, -243 rn, 0, tn.getName().length) != 0) { -244 LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " + -245 tn); -246 return true; -247 } -248 return false; -249 } -250 -251 private void usage() { -252 System.err -253 .println("For hadoop 0.21+, Usage: bin/hbase org.apache.hadoop.hbase.util.Merge " -254 + "[-Dfs.defaultFS=hdfs://nn:port] <table-name> <region-1> <region-2>\n"); -255 } -256 -257 public static void main(String[] args) { -258 int status; -259 try { -260 status = ToolRunner.run(HBaseConfiguration.create(), new Merge(), args); -261 } catch (Exception e) { -262 LOG.error("exiting due to error", e); -263 status = -1; -264 } -265 System.exit(status); -266 } -267 } +26 import org.apache.hadoop.hbase.MetaTableAccessor; +27 import org.apache.hadoop.hbase.classification.InterfaceAudience; +28 import org.apache.hadoop.conf.Configured; +29 import org.apache.hadoop.fs.FileSystem; +30 import org.apache.hadoop.fs.Path; +31 import org.apache.hadoop.hbase.HBaseInterfaceAudience; +32 import org.apache.hadoop.hbase.TableDescriptor; +33 import org.apache.hadoop.hbase.TableName; +34 import org.apache.hadoop.hbase.HBaseConfiguration; +35 import org.apache.hadoop.hbase.HConstants; +36 import org.apache.hadoop.hbase.HRegionInfo; +37 import org.apache.hadoop.hbase.HTableDescriptor; +38 import org.apache.hadoop.hbase.MasterNotRunningException; +39 import org.apache.hadoop.hbase.ZooKeeperConnectionException; +40 import org.apache.hadoop.hbase.client.Delete; +41 import org.apache.hadoop.hbase.client.Get; +42 import org.apache.hadoop.hbase.client.HBaseAdmin; +43 import org.apache.hadoop.hbase.client.Result; +44 import org.apache.hadoop.hbase.regionserver.HRegion; +45 import org.apache.hadoop.io.WritableComparator; +46 import org.apache.hadoop.util.Tool; +47 import org.apache.hadoop.util.ToolRunner; +48 +49 import com.google.common.base.Preconditions; +50 +51 /** +52 * Utility that can merge any two regions in the same table: adjacent, +53 * overlapping or disjoint. +54 */ +55 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) +56 public class Merge extends Configured implements Tool { +57 private static final Log LOG = LogFactory.getLog(Merge.class); +58 private Path rootdir; +59 private volatile MetaUtils utils; +60 private TableName tableName; // Name of table +61 private volatile byte [] region1; // Name of region 1 +62 private volatile byte [] region2; // Name of region 2 +63 private volatile HRegionInfo mergeInfo = null; +64 +65 @Override +66 public int run(String[] args) throws Exception { +67 if (parseArgs(args) != 0) { +68 return -1; +69 } +70 +71 // Verify file system is up. +72 FileSystem fs = FileSystem.get(getConf()); // get DFS handle +73 LOG.info("Verifying that file system is available..."); +74 try { +75 FSUtils.checkFileSystemAvailable(fs); +76 } catch (IOException e) { +77 LOG.fatal("File system is not available", e); +78 return -1; +79 } +80 +81 // Verify HBase is down +82 LOG.info("Verifying that HBase is not running..."); +83 try { +84 HBaseAdmin.checkHBaseAvailable(getConf()); +85 LOG.fatal("HBase cluster must be off-line, and is not. Aborting."); +86 return -1; +87 } catch (ZooKeeperConnectionException zkce) { +88 // If no zk, presume no master. +89 } catch (MasterNotRunningException e) { +90 // Expected. Ignore. +91 } +92 +93 // Initialize MetaUtils and and get the root of the HBase installation +94 +95 this.utils = new MetaUtils(getConf()); +96 this.rootdir = FSUtils.getRootDir(getConf()); +97 try { +98 mergeTwoRegions(); +99 return 0; +100 } catch (IOException e) { +101 LOG.fatal("Merge failed", e); +102 return -1; +103 +104 } finally { +105 if (this.utils != null) { +106 this.utils.shutdown(); +107 } +108 } +109 } +110 +111 /** @return HRegionInfo for merge result */ +112 HRegionInfo getMergedHRegionInfo() { +113 return this.mergeInfo; +114 } +115 +116 /* +117 * Merges two regions from a user table. +118 */ +119 private void mergeTwoRegions() throws IOException { +120 LOG.info("Merging regions " + Bytes.toStringBinary(this.region1) + " and " + +121 Bytes.toStringBinary(this.region2) + " in table " + this.tableName); +122 HRegion meta = this.utils.getMetaRegion(); +123 Get get = new Get(region1); +124 get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); +125 Result result1 = meta.get(get); +126 Preconditions.checkState(!result1.isEmpty(), +127 "First region cells can not be null"); +128 HRegionInfo info1 = MetaTableAccessor.getHRegionInfo(result1); +129 if (info1 == null) { +130 throw new NullPointerException("info1 is null using key " + +131 Bytes.toStringBinary(region1) + " in " + meta); +132 } +133 get = new Get(region2); +134 get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); +135 Result result2 = meta.get(get); +136 Preconditions.checkState(!result2.isEmpty(), +137 "Second region cells can not be null"); +138 HRegionInfo info2 = MetaTableAccessor.getHRegionInfo(result2); +139 if (info2 == null) { +140 throw new NullPointerException("info2 is null using key " + meta); +141 } +142 TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()), +143 this.rootdir, this.tableName); +144 HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2); +145 +146 LOG.info("Adding " + merged.getRegionInfo() + " to " + +147 meta.getRegionInfo()); +148 +149 HRegion.addRegionToMETA(meta, merged); +150 merged.close(); +151 } +152 +153 /* +154 * Actually merge two regions and update their info in the meta region(s) +155 * Returns HRegion object for newly merged region +156 */ +157 private HRegion merge(final HTableDescriptor htd, HRegion meta, +158 HRegionInfo info1, HRegionInfo info2) +159 throws IOException { +160 if (info1 == null) { +161 throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " + +162 Bytes.toStringBinary(meta.getRegionInfo().getRegionName())); +163 } +164 if (info2 == null) { +165 throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " + +166 Bytes.toStringBinary(meta.getRegionInfo().getRegionName())); +167 } +168 HRegion merged = null; +169 HRegion r1 = HRegion.openHRegion(info1, htd, utils.getLog(info1), getConf()); +170 try { +171 HRegion r2 = HRegion.openHRegion(info2, htd, utils.getLog(info2), getConf()); +172 try { +173 merged = HRegion.merge(r1, r2); +174 } finally { +175 if (!r2.isClosed()) { +176 r2.close(); +177 } +178 } +179 } finally { +180 if (!r1.isClosed()) { +181 r1.close(); +182 } +183 } +184 +185 // Remove the old regions from meta. +186 // HRegion.merge has already deleted their files +187 +188 removeRegionFromMeta(meta, info1); +189 removeRegionFromMeta(meta, info2); +190 +191 this.mergeInfo = merged.getRegionInfo(); +192 return merged; +193 } +194 +195 /* +196 * Removes a region's meta information from the passed <code>meta</code> +197 * region. +198 * +199 * @param meta hbase:meta HRegion to be updated +200 * @param regioninfo HRegionInfo of region to remove from <code>meta</code> +201 * +202 * @throws IOException +203 */ +204 private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo) +205 throws IOException { +206 if (LOG.isDebugEnabled()) { +207 LOG.debug("Removing region: " + regioninfo + " from " + meta); +208 } +209 +210 Delete delete = new Delete(regioninfo.getRegionName(), +211 System.currentTimeMillis()); +212 meta.delete(delete); +213 } +214 +215 /** +216 * Parse given arguments and assign table name and regions names. +217 * (generic args are handled by ToolRunner.) +218 * +219 * @param args the arguments to parse +220 * +221 * @throws IOException +222 */ +223 private int parseArgs(String[] args) throws IOException { +224 if (args.length != 3) { +225 usage(); +226 return -1; +227 } +228 tableName = TableName.valueOf(args[0]); +229 +230 region1 = Bytes.toBytesBinary(args[1]); +231 region2 = Bytes.toBytesBinary(args[2]); +232 int status = 0; +233 if (notInTable(tableName, region1) || notInTable(tableName, region2)) { +234 status = -1; +235 } else if (Bytes.equals(region1, region2)) { +236 LOG.error("Can't merge a region with itself"); +237 status = -1; +238 } +239 return status; +240 } +241 +242 private boolean notInTable(final TableName tn, final byte [] rn) { +243 if (WritableComparator.compareBytes(tn.getName(), 0, tn.getName().length, +244 rn, 0, tn.getName().length) != 0) { +245 LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " + +246 tn); +247 return true; +248 } +249 return false; +250 } +251 +252 private void usage() { +253 System.err +254 .println("For hadoop 0.21+, Usage: bin/hbase org.apache.hadoop.hbase.util.Merge " +255 + "[-Dfs.defaultFS=hdfs://nn:port] <table-name> <region-1> <region-2>\n"); +256 } +257 +258 public static void main(String[] args) { +259 int status; +260 try { +261 status = ToolRunner.run(HBaseConfiguration.create(), new Merge(), args); +262 } catch (Exception e) { +263 LOG.error("exiting due to error", e); +264 status = -1; +265 } +266 System.exit(status); +267 } +268 }