Return-Path: X-Original-To: apmail-hbase-commits-archive@www.apache.org Delivered-To: apmail-hbase-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 67CB0189B2 for ; Fri, 5 Feb 2016 04:32:16 +0000 (UTC) Received: (qmail 83856 invoked by uid 500); 5 Feb 2016 04:32:14 -0000 Delivered-To: apmail-hbase-commits-archive@hbase.apache.org Received: (qmail 83707 invoked by uid 500); 5 Feb 2016 04:32:14 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 80706 invoked by uid 99); 5 Feb 2016 04:32:12 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 05 Feb 2016 04:32:12 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id E2270E17FD; Fri, 5 Feb 2016 04:32:11 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: misty@apache.org To: commits@hbase.apache.org Date: Fri, 05 Feb 2016 04:32:30 -0000 Message-Id: <8b511ea8066c4221b988ecd769c730cf@git.apache.org> In-Reply-To: <49df44bf87274c6ca4833f867e09c715@git.apache.org> References: <49df44bf87274c6ca4833f867e09c715@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [20/51] [partial] hbase-site git commit: Published site at 18eff3c1c337003b2a419490e621f931d16936fb. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a8725a46/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html ---------------------------------------------------------------------- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html index 8fc3dbd..efdc708 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html @@ -824,743 +824,747 @@ 816 i++; 817 lowestLocalityServerIndex = serverIndicesSortedByLocality[i]; 818 } -819 LOG.debug("Lowest locality region server with non zero regions is " -820 + servers[lowestLocalityServerIndex].getHostname() + " with locality " -821 + localityPerServer[lowestLocalityServerIndex]); -822 return lowestLocalityServerIndex; -823 } -824 } -825 -826 int getLowestLocalityRegionOnServer(int serverIndex) { -827 if (regionFinder != null) { -828 float lowestLocality = 1.0f; -829 int lowestLocalityRegionIndex = 0; -830 if (regionsPerServer[serverIndex].length == 0) { -831 // No regions on that region server -832 return -1; -833 } -834 for (int j = 0; j < regionsPerServer[serverIndex].length; j++) { -835 int regionIndex = regionsPerServer[serverIndex][j]; -836 HDFSBlocksDistribution distribution = regionFinder -837 .getBlockDistribution(regions[regionIndex]); -838 float locality = distribution.getBlockLocalityIndex(servers[serverIndex].getHostname()); -839 if (locality < lowestLocality) { -840 lowestLocality = locality; -841 lowestLocalityRegionIndex = j; -842 } -843 } -844 LOG.debug(" Lowest locality region index is " + lowestLocalityRegionIndex -845 + " and its region server contains " + regionsPerServer[serverIndex].length -846 + " regions"); -847 return regionsPerServer[serverIndex][lowestLocalityRegionIndex]; -848 } else { -849 return -1; -850 } -851 } -852 -853 float getLocalityOfRegion(int region, int server) { -854 if (regionFinder != null) { -855 HDFSBlocksDistribution distribution = regionFinder.getBlockDistribution(regions[region]); -856 return distribution.getBlockLocalityIndex(servers[server].getHostname()); -857 } else { -858 return 0f; -859 } -860 } -861 -862 int getLeastLoadedTopServerForRegion(int region) { -863 if (regionFinder != null) { -864 List<ServerName> topLocalServers = regionFinder.getTopBlockLocations(regions[region]); -865 int leastLoadedServerIndex = -1; -866 int load = Integer.MAX_VALUE; -867 for (ServerName sn : topLocalServers) { -868 if (!serversToIndex.containsKey(sn.getHostAndPort())) { -869 continue; -870 } -871 int index = serversToIndex.get(sn.getHostAndPort()); -872 if (regionsPerServer[index] == null) { +819 if (LOG.isTraceEnabled()) { +820 LOG.trace("Lowest locality region server with non zero regions is " +821 + servers[lowestLocalityServerIndex].getHostname() + " with locality " +822 + localityPerServer[lowestLocalityServerIndex]); +823 } +824 return lowestLocalityServerIndex; +825 } +826 } +827 +828 int getLowestLocalityRegionOnServer(int serverIndex) { +829 if (regionFinder != null) { +830 float lowestLocality = 1.0f; +831 int lowestLocalityRegionIndex = 0; +832 if (regionsPerServer[serverIndex].length == 0) { +833 // No regions on that region server +834 return -1; +835 } +836 for (int j = 0; j < regionsPerServer[serverIndex].length; j++) { +837 int regionIndex = regionsPerServer[serverIndex][j]; +838 HDFSBlocksDistribution distribution = regionFinder +839 .getBlockDistribution(regions[regionIndex]); +840 float locality = distribution.getBlockLocalityIndex(servers[serverIndex].getHostname()); +841 if (locality < lowestLocality) { +842 lowestLocality = locality; +843 lowestLocalityRegionIndex = j; +844 } +845 } +846 if (LOG.isTraceEnabled()) { +847 LOG.debug(" Lowest locality region index is " + lowestLocalityRegionIndex +848 + " and its region server contains " + regionsPerServer[serverIndex].length +849 + " regions"); +850 } +851 return regionsPerServer[serverIndex][lowestLocalityRegionIndex]; +852 } else { +853 return -1; +854 } +855 } +856 +857 float getLocalityOfRegion(int region, int server) { +858 if (regionFinder != null) { +859 HDFSBlocksDistribution distribution = regionFinder.getBlockDistribution(regions[region]); +860 return distribution.getBlockLocalityIndex(servers[server].getHostname()); +861 } else { +862 return 0f; +863 } +864 } +865 +866 int getLeastLoadedTopServerForRegion(int region) { +867 if (regionFinder != null) { +868 List<ServerName> topLocalServers = regionFinder.getTopBlockLocations(regions[region]); +869 int leastLoadedServerIndex = -1; +870 int load = Integer.MAX_VALUE; +871 for (ServerName sn : topLocalServers) { +872 if (!serversToIndex.containsKey(sn.getHostAndPort())) { 873 continue; 874 } -875 int tempLoad = regionsPerServer[index].length; -876 if (tempLoad <= load) { -877 leastLoadedServerIndex = index; -878 load = tempLoad; -879 } -880 } -881 return leastLoadedServerIndex; -882 } else { -883 return -1; -884 } -885 } -886 -887 void calculateRegionServerLocalities() { -888 if (regionFinder == null) { -889 LOG.warn("Region location finder found null, skipping locality calculations."); -890 return; -891 } -892 for (int i = 0; i < regionsPerServer.length; i++) { -893 HDFSBlocksDistribution distribution = new HDFSBlocksDistribution(); -894 if (regionsPerServer[i].length > 0) { -895 for (int j = 0; j < regionsPerServer[i].length; j++) { -896 int regionIndex = regionsPerServer[i][j]; -897 distribution.add(regionFinder.getBlockDistribution(regions[regionIndex])); -898 } -899 } else { -900 LOG.debug("Server " + servers[i].getHostname() + " had 0 regions."); -901 } -902 localityPerServer[i] = distribution.getBlockLocalityIndex(servers[i].getHostname()); -903 } -904 } -905 -906 @VisibleForTesting -907 protected void setNumRegions(int numRegions) { -908 this.numRegions = numRegions; -909 } -910 -911 @VisibleForTesting -912 protected void setNumMovedRegions(int numMovedRegions) { -913 this.numMovedRegions = numMovedRegions; -914 } -915 -916 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SBSC_USE_STRINGBUFFER_CONCATENATION", -917 justification="Not important but should be fixed") -918 @Override -919 public String toString() { -920 String desc = "Cluster{" + -921 "servers=["; -922 for(ServerName sn:servers) { -923 desc += sn.getHostAndPort() + ", "; -924 } -925 desc += -926 ", serverIndicesSortedByRegionCount="+ -927 Arrays.toString(serverIndicesSortedByRegionCount) + -928 ", regionsPerServer=["; -929 -930 for (int[]r:regionsPerServer) { -931 desc += Arrays.toString(r); -932 } -933 desc += "]" + -934 ", numMaxRegionsPerTable=" + -935 Arrays.toString(numMaxRegionsPerTable) + -936 ", numRegions=" + -937 numRegions + -938 ", numServers=" + -939 numServers + -940 ", numTables=" + -941 numTables + -942 ", numMovedRegions=" + -943 numMovedRegions + -944 '}'; -945 return desc; -946 } -947 } -948 -949 // slop for regions -950 protected float slop; -951 protected Configuration config; -952 protected RackManager rackManager; -953 private static final Random RANDOM = new Random(System.currentTimeMillis()); -954 private static final Log LOG = LogFactory.getLog(BaseLoadBalancer.class); -955 -956 // Regions of these tables are put on the master by default. -957 private static final String[] DEFAULT_TABLES_ON_MASTER = -958 new String[] {AccessControlLists.ACL_TABLE_NAME.getNameAsString(), -959 TableName.NAMESPACE_TABLE_NAME.getNameAsString(), -960 TableName.META_TABLE_NAME.getNameAsString()}; -961 -962 public static final String TABLES_ON_MASTER = -963 "hbase.balancer.tablesOnMaster"; -964 -965 protected final Set<String> tablesOnMaster = new HashSet<String>(); -966 protected MetricsBalancer metricsBalancer = null; -967 protected ClusterStatus clusterStatus = null; -968 protected ServerName masterServerName; -969 protected MasterServices services; -970 -971 /** -972 * By default, regions of some small system tables such as meta, -973 * namespace, and acl are assigned to the active master. If you don't -974 * want to assign any region to the active master, you need to -975 * configure "hbase.balancer.tablesOnMaster" to "none". -976 */ -977 protected static String[] getTablesOnMaster(Configuration conf) { -978 String valueString = conf.get(TABLES_ON_MASTER); -979 if (valueString == null) { -980 return DEFAULT_TABLES_ON_MASTER; -981 } -982 valueString = valueString.trim(); -983 if (valueString.equalsIgnoreCase("none")) { -984 return null; +875 int index = serversToIndex.get(sn.getHostAndPort()); +876 if (regionsPerServer[index] == null) { +877 continue; +878 } +879 int tempLoad = regionsPerServer[index].length; +880 if (tempLoad <= load) { +881 leastLoadedServerIndex = index; +882 load = tempLoad; +883 } +884 } +885 return leastLoadedServerIndex; +886 } else { +887 return -1; +888 } +889 } +890 +891 void calculateRegionServerLocalities() { +892 if (regionFinder == null) { +893 LOG.warn("Region location finder found null, skipping locality calculations."); +894 return; +895 } +896 for (int i = 0; i < regionsPerServer.length; i++) { +897 HDFSBlocksDistribution distribution = new HDFSBlocksDistribution(); +898 if (regionsPerServer[i].length > 0) { +899 for (int j = 0; j < regionsPerServer[i].length; j++) { +900 int regionIndex = regionsPerServer[i][j]; +901 distribution.add(regionFinder.getBlockDistribution(regions[regionIndex])); +902 } +903 } else { +904 LOG.debug("Server " + servers[i].getHostname() + " had 0 regions."); +905 } +906 localityPerServer[i] = distribution.getBlockLocalityIndex(servers[i].getHostname()); +907 } +908 } +909 +910 @VisibleForTesting +911 protected void setNumRegions(int numRegions) { +912 this.numRegions = numRegions; +913 } +914 +915 @VisibleForTesting +916 protected void setNumMovedRegions(int numMovedRegions) { +917 this.numMovedRegions = numMovedRegions; +918 } +919 +920 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SBSC_USE_STRINGBUFFER_CONCATENATION", +921 justification="Not important but should be fixed") +922 @Override +923 public String toString() { +924 String desc = "Cluster{" + +925 "servers=["; +926 for(ServerName sn:servers) { +927 desc += sn.getHostAndPort() + ", "; +928 } +929 desc += +930 ", serverIndicesSortedByRegionCount="+ +931 Arrays.toString(serverIndicesSortedByRegionCount) + +932 ", regionsPerServer=["; +933 +934 for (int[]r:regionsPerServer) { +935 desc += Arrays.toString(r); +936 } +937 desc += "]" + +938 ", numMaxRegionsPerTable=" + +939 Arrays.toString(numMaxRegionsPerTable) + +940 ", numRegions=" + +941 numRegions + +942 ", numServers=" + +943 numServers + +944 ", numTables=" + +945 numTables + +946 ", numMovedRegions=" + +947 numMovedRegions + +948 '}'; +949 return desc; +950 } +951 } +952 +953 // slop for regions +954 protected float slop; +955 protected Configuration config; +956 protected RackManager rackManager; +957 private static final Random RANDOM = new Random(System.currentTimeMillis()); +958 private static final Log LOG = LogFactory.getLog(BaseLoadBalancer.class); +959 +960 // Regions of these tables are put on the master by default. +961 private static final String[] DEFAULT_TABLES_ON_MASTER = +962 new String[] {AccessControlLists.ACL_TABLE_NAME.getNameAsString(), +963 TableName.NAMESPACE_TABLE_NAME.getNameAsString(), +964 TableName.META_TABLE_NAME.getNameAsString()}; +965 +966 public static final String TABLES_ON_MASTER = +967 "hbase.balancer.tablesOnMaster"; +968 +969 protected final Set<String> tablesOnMaster = new HashSet<String>(); +970 protected MetricsBalancer metricsBalancer = null; +971 protected ClusterStatus clusterStatus = null; +972 protected ServerName masterServerName; +973 protected MasterServices services; +974 +975 /** +976 * By default, regions of some small system tables such as meta, +977 * namespace, and acl are assigned to the active master. If you don't +978 * want to assign any region to the active master, you need to +979 * configure "hbase.balancer.tablesOnMaster" to "none". +980 */ +981 protected static String[] getTablesOnMaster(Configuration conf) { +982 String valueString = conf.get(TABLES_ON_MASTER); +983 if (valueString == null) { +984 return DEFAULT_TABLES_ON_MASTER; 985 } -986 return StringUtils.getStrings(valueString); -987 } -988 -989 /** -990 * Check if configured to put any tables on the active master -991 */ -992 public static boolean tablesOnMaster(Configuration conf) { -993 String[] tables = getTablesOnMaster(conf); -994 return tables != null && tables.length > 0; -995 } -996 -997 @Override -998 public void setConf(Configuration conf) { -999 setSlop(conf); -1000 if (slop < 0) slop = 0; -1001 else if (slop > 1) slop = 1; -1002 -1003 this.config = conf; -1004 String[] tables = getTablesOnMaster(conf); -1005 if (tables != null && tables.length > 0) { -1006 Collections.addAll(tablesOnMaster, tables); -1007 } -1008 this.rackManager = new RackManager(getConf()); -1009 regionFinder.setConf(conf); -1010 } -1011 -1012 protected void setSlop(Configuration conf) { -1013 this.slop = conf.getFloat("hbase.regions.slop", (float) 0.2); +986 valueString = valueString.trim(); +987 if (valueString.equalsIgnoreCase("none")) { +988 return null; +989 } +990 return StringUtils.getStrings(valueString); +991 } +992 +993 /** +994 * Check if configured to put any tables on the active master +995 */ +996 public static boolean tablesOnMaster(Configuration conf) { +997 String[] tables = getTablesOnMaster(conf); +998 return tables != null && tables.length > 0; +999 } +1000 +1001 @Override +1002 public void setConf(Configuration conf) { +1003 setSlop(conf); +1004 if (slop < 0) slop = 0; +1005 else if (slop > 1) slop = 1; +1006 +1007 this.config = conf; +1008 String[] tables = getTablesOnMaster(conf); +1009 if (tables != null && tables.length > 0) { +1010 Collections.addAll(tablesOnMaster, tables); +1011 } +1012 this.rackManager = new RackManager(getConf()); +1013 regionFinder.setConf(conf); 1014 } 1015 -1016 /** -1017 * Check if a region belongs to some small system table. -1018 * If so, the primary replica may be expected to be put on the master regionserver. -1019 */ -1020 public boolean shouldBeOnMaster(HRegionInfo region) { -1021 return tablesOnMaster.contains(region.getTable().getNameAsString()) -1022 && region.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID; -1023 } -1024 -1025 /** -1026 * Balance the regions that should be on master regionserver. -1027 */ -1028 protected List<RegionPlan> balanceMasterRegions( -1029 Map<ServerName, List<HRegionInfo>> clusterMap) { -1030 if (masterServerName == null -1031 || clusterMap == null || clusterMap.size() <= 1) return null; -1032 List<RegionPlan> plans = null; -1033 List<HRegionInfo> regions = clusterMap.get(masterServerName); -1034 if (regions != null) { -1035 Iterator<ServerName> keyIt = null; -1036 for (HRegionInfo region: regions) { -1037 if (shouldBeOnMaster(region)) continue; -1038 -1039 // Find a non-master regionserver to host the region -1040 if (keyIt == null || !keyIt.hasNext()) { -1041 keyIt = clusterMap.keySet().iterator(); -1042 } -1043 ServerName dest = keyIt.next(); -1044 if (masterServerName.equals(dest)) { -1045 if (!keyIt.hasNext()) { -1046 keyIt = clusterMap.keySet().iterator(); -1047 } -1048 dest = keyIt.next(); -1049 } -1050 -1051 // Move this region away from the master regionserver -1052 RegionPlan plan = new RegionPlan(region, masterServerName, dest); -1053 if (plans == null) { -1054 plans = new ArrayList<RegionPlan>(); -1055 } -1056 plans.add(plan); -1057 } -1058 } -1059 for (Map.Entry<ServerName, List<HRegionInfo>> server: clusterMap.entrySet()) { -1060 if (masterServerName.equals(server.getKey())) continue; -1061 for (HRegionInfo region: server.getValue()) { -1062 if (!shouldBeOnMaster(region)) continue; -1063 -1064 // Move this region to the master regionserver -1065 RegionPlan plan = new RegionPlan(region, server.getKey(), masterServerName); -1066 if (plans == null) { -1067 plans = new ArrayList<RegionPlan>(); -1068 } -1069 plans.add(plan); -1070 } -1071 } -1072 return plans; -1073 } -1074 -1075 /** -1076 * Assign the regions that should be on master regionserver. -1077 */ -1078 protected Map<ServerName, List<HRegionInfo>> assignMasterRegions( -1079 Collection<HRegionInfo> regions, List<ServerName> servers) { -1080 if (servers == null || regions == null || regions.isEmpty()) { -1081 return null; -1082 } -1083 Map<ServerName, List<HRegionInfo>> assignments -1084 = new TreeMap<ServerName, List<HRegionInfo>>(); -1085 if (masterServerName != null && servers.contains(masterServerName)) { -1086 assignments.put(masterServerName, new ArrayList<HRegionInfo>()); -1087 for (HRegionInfo region: regions) { -1088 if (shouldBeOnMaster(region)) { -1089 assignments.get(masterServerName).add(region); -1090 } -1091 } -1092 } -1093 return assignments; -1094 } -1095 -1096 @Override -1097 public Configuration getConf() { -1098 return this.config; -1099 } -1100 -1101 @Override -1102 public synchronized void setClusterStatus(ClusterStatus st) { -1103 this.clusterStatus = st; -1104 regionFinder.setClusterStatus(st); -1105 } -1106 -1107 @Override -1108 public void setMasterServices(MasterServices masterServices) { -1109 masterServerName = masterServices.getServerName(); -1110 this.services = masterServices; -1111 this.regionFinder.setServices(masterServices); -1112 } -1113 -1114 public void setRackManager(RackManager rackManager) { -1115 this.rackManager = rackManager; +1016 protected void setSlop(Configuration conf) { +1017 this.slop = conf.getFloat("hbase.regions.slop", (float) 0.2); +1018 } +1019 +1020 /** +1021 * Check if a region belongs to some small system table. +1022 * If so, the primary replica may be expected to be put on the master regionserver. +1023 */ +1024 public boolean shouldBeOnMaster(HRegionInfo region) { +1025 return tablesOnMaster.contains(region.getTable().getNameAsString()) +1026 && region.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID; +1027 } +1028 +1029 /** +1030 * Balance the regions that should be on master regionserver. +1031 */ +1032 protected List<RegionPlan> balanceMasterRegions( +1033 Map<ServerName, List<HRegionInfo>> clusterMap) { +1034 if (masterServerName == null +1035 || clusterMap == null || clusterMap.size() <= 1) return null; +1036 List<RegionPlan> plans = null; +1037 List<HRegionInfo> regions = clusterMap.get(masterServerName); +1038 if (regions != null) { +1039 Iterator<ServerName> keyIt = null; +1040 for (HRegionInfo region: regions) { +1041 if (shouldBeOnMaster(region)) continue; +1042 +1043 // Find a non-master regionserver to host the region +1044 if (keyIt == null || !keyIt.hasNext()) { +1045 keyIt = clusterMap.keySet().iterator(); +1046 } +1047 ServerName dest = keyIt.next(); +1048 if (masterServerName.equals(dest)) { +1049 if (!keyIt.hasNext()) { +1050 keyIt = clusterMap.keySet().iterator(); +1051 } +1052 dest = keyIt.next(); +1053 } +1054 +1055 // Move this region away from the master regionserver +1056 RegionPlan plan = new RegionPlan(region, masterServerName, dest); +1057 if (plans == null) { +1058 plans = new ArrayList<RegionPlan>(); +1059 } +1060 plans.add(plan); +1061 } +1062 } +1063 for (Map.Entry<ServerName, List<HRegionInfo>> server: clusterMap.entrySet()) { +1064 if (masterServerName.equals(server.getKey())) continue; +1065 for (HRegionInfo region: server.getValue()) { +1066 if (!shouldBeOnMaster(region)) continue; +1067 +1068 // Move this region to the master regionserver +1069 RegionPlan plan = new RegionPlan(region, server.getKey(), masterServerName); +1070 if (plans == null) { +1071 plans = new ArrayList<RegionPlan>(); +1072 } +1073 plans.add(plan); +1074 } +1075 } +1076 return plans; +1077 } +1078 +1079 /** +1080 * Assign the regions that should be on master regionserver. +1081 */ +1082 protected Map<ServerName, List<HRegionInfo>> assignMasterRegions( +1083 Collection<HRegionInfo> regions, List<ServerName> servers) { +1084 if (servers == null || regions == null || regions.isEmpty()) { +1085 return null; +1086 } +1087 Map<ServerName, List<HRegionInfo>> assignments +1088 = new TreeMap<ServerName, List<HRegionInfo>>(); +1089 if (masterServerName != null && servers.contains(masterServerName)) { +1090 assignments.put(masterServerName, new ArrayList<HRegionInfo>()); +1091 for (HRegionInfo region: regions) { +1092 if (shouldBeOnMaster(region)) { +1093 assignments.get(masterServerName).add(region); +1094 } +1095 } +1096 } +1097 return assignments; +1098 } +1099 +1100 @Override +1101 public Configuration getConf() { +1102 return this.config; +1103 } +1104 +1105 @Override +1106 public synchronized void setClusterStatus(ClusterStatus st) { +1107 this.clusterStatus = st; +1108 regionFinder.setClusterStatus(st); +1109 } +1110 +1111 @Override +1112 public void setMasterServices(MasterServices masterServices) { +1113 masterServerName = masterServices.getServerName(); +1114 this.services = masterServices; +1115 this.regionFinder.setServices(masterServices); 1116 } 1117 -1118 protected boolean needsBalance(Cluster c) { -1119 ClusterLoadState cs = new ClusterLoadState(c.clusterState); -1120 if (cs.getNumServers() < MIN_SERVER_BALANCE) { -1121 if (LOG.isDebugEnabled()) { -1122 LOG.debug("Not running balancer because only " + cs.getNumServers() -1123 + " active regionserver(s)"); -1124 } -1125 return false; -1126 } -1127 if(areSomeRegionReplicasColocated(c)) return true; -1128 // Check if we even need to do any load balancing -1129 // HBASE-3681 check sloppiness first -1130 float average = cs.getLoadAverage(); // for logging -1131 int floor = (int) Math.floor(average * (1 - slop)); -1132 int ceiling = (int) Math.ceil(average * (1 + slop)); -1133 if (!(cs.getMaxLoad() > ceiling || cs.getMinLoad() < floor)) { -1134 NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad(); -1135 if (LOG.isTraceEnabled()) { -1136 // If nothing to balance, then don't say anything unless trace-level logging. -1137 LOG.trace("Skipping load balancing because balanced cluster; " + -1138 "servers=" + cs.getNumServers() + -1139 " regions=" + cs.getNumRegions() + " average=" + average + -1140 " mostloaded=" + serversByLoad.lastKey().getLoad() + -1141 " leastloaded=" + serversByLoad.firstKey().getLoad()); -1142 } -1143 return false; -1144 } -1145 return true; -1146 } -1147 -1148 /** -1149 * Subclasses should implement this to return true if the cluster has nodes that hosts -1150 * multiple replicas for the same region, or, if there are multiple racks and the same -1151 * rack hosts replicas of the same region -1152 * @param c Cluster information -1153 * @return whether region replicas are currently co-located -1154 */ -1155 protected boolean areSomeRegionReplicasColocated(Cluster c) { -1156 return false; -1157 } -1158 -1159 /** -1160 * Generates a bulk assignment plan to be used on cluster startup using a -1161 * simple round-robin assignment. -1162 * <p> -1163 * Takes a list of all the regions and all the servers in the cluster and -1164 * returns a map of each server to the regions that it should be assigned. -1165 * <p> -1166 * Currently implemented as a round-robin assignment. Same invariant as load -1167 * balancing, all servers holding floor(avg) or ceiling(avg). -1168 * -1169 * TODO: Use block locations from HDFS to place regions with their blocks -1170 * -1171 * @param regions all regions -1172 * @param servers all servers -1173 * @return map of server to the regions it should take, or null if no -1174 * assignment is possible (ie. no regions or no servers) -1175 */ -1176 @Override -1177 public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions, -1178 List<ServerName> servers) { -1179 metricsBalancer.incrMiscInvocations(); -1180 Map<ServerName, List<HRegionInfo>> assignments = assignMasterRegions(regions, servers); -1181 if (assignments != null && !assignments.isEmpty()) { -1182 servers = new ArrayList<ServerName>(servers); -1183 // Guarantee not to put other regions on master -1184 servers.remove(masterServerName); -1185 List<HRegionInfo> masterRegions = assignments.get(masterServerName); -1186 if (!masterRegions.isEmpty()) { -1187 regions = new ArrayList<HRegionInfo>(regions); -1188 for (HRegionInfo region: masterRegions) { -1189 regions.remove(region); -1190 } -1191 } -1192 } -1193 if (regions == null || regions.isEmpty()) { -1194 return assignments; -1195 } -1196 -1197 int numServers = servers == null ? 0 : servers.size(); -1198 if (numServers == 0) { -1199 LOG.warn("Wanted to do round robin assignment but no servers to assign to"); -1200 return null; -1201 } -1202 -1203 // TODO: instead of retainAssignment() and roundRobinAssignment(), we should just run the -1204 // normal LB.balancerCluster() with unassignedRegions. We only need to have a candidate -1205 // generator for AssignRegionAction. The LB will ensure the regions are mostly local -1206 // and balanced. This should also run fast with fewer number of iterations. -1207 -1208 if (numServers == 1) { // Only one server, nothing fancy we can do here -1209 ServerName server = servers.get(0); -1210 assignments.put(server, new ArrayList<HRegionInfo>(regions)); -1211 return assignments; -1212 } -1213 -1214 Cluster cluster = createCluster(servers, regions); -1215 List<HRegionInfo> unassignedRegions = new ArrayList<HRegionInfo>(); -1216 -1217 roundRobinAssignment(cluster, regions, unassignedRegions, -1218 servers, assignments); -1219 -1220 List<HRegionInfo> lastFewRegions = new ArrayList<HRegionInfo>(); -1221 // assign the remaining by going through the list and try to assign to servers one-by-one -1222 int serverIdx = RANDOM.nextInt(numServers); -1223 for (HRegionInfo region : unassignedRegions) { -1224 boolean assigned = false; -1225 for (int j = 0; j < numServers; j++) { // try all servers one by one -1226 ServerName serverName = servers.get((j + serverIdx) % numServers); -1227 if (!cluster.wouldLowerAvailability(region, serverName)) { -1228 List<HRegionInfo> serverRegions = assignments.get(serverName); -1229 if (serverRegions == null) { -1230 serverRegions = new ArrayList<HRegionInfo>(); -1231 assignments.put(serverName, serverRegions); -1232 } -1233 serverRegions.add(region); -1234 cluster.doAssignRegion(region, serverName); -1235 serverIdx = (j + serverIdx + 1) % numServers; //remain from next server -1236 assigned = true; -1237 break; -1238 } -1239 } -1240 if (!assigned) { -1241 lastFewRegions.add(region); -1242 } -1243 } -1244 // just sprinkle the rest of the regions on random regionservers. The balanceCluster will -1245 // make it optimal later. we can end up with this if numReplicas > numServers. -1246 for (HRegionInfo region : lastFewRegions) { -1247 int i = RANDOM.nextInt(numServers); -1248 ServerName server = servers.get(i); -1249 List<HRegionInfo> serverRegions = assignments.get(server); -1250 if (serverRegions == null) { -1251 serverRegions = new ArrayList<HRegionInfo>(); -1252 assignments.put(server, serverRegions); -1253 } -1254 serverRegions.add(region); -1255 cluster.doAssignRegion(region, server); -1256 } -1257 return assignments; -1258 } -1259 -1260 protected Cluster createCluster(List<ServerName> servers, -1261 Collection<HRegionInfo> regions) { -1262 // Get the snapshot of the current assignments for the regions in question, and then create -1263 // a cluster out of it. Note that we might have replicas already assigned to some servers -1264 // earlier. So we want to get the snapshot to see those assignments, but this will only contain -1265 // replicas of the regions that are passed (for performance). -1266 Map<ServerName, List<HRegionInfo>> clusterState = getRegionAssignmentsByServer(regions); -1267 -1268 for (ServerName server : servers) { -1269 if (!clusterState.containsKey(server)) { -1270 clusterState.put(server, EMPTY_REGION_LIST); -1271 } -1272 } -1273 return new Cluster(regions, clusterState, null, this.regionFinder, -1274 rackManager); -1275 } -1276 -1277 /** -1278 * Generates an immediate assignment plan to be used by a new master for -1279 * regions in transition that do not have an already known destination. -1280 * -1281 * Takes a list of regions that need immediate assignment and a list of all -1282 * available servers. Returns a map of regions to the server they should be -1283 * assigned to. +1118 public void setRackManager(RackManager rackManager) { +1119 this.rackManager = rackManager; +1120 } +1121 +1122 protected boolean needsBalance(Cluster c) { +1123 ClusterLoadState cs = new ClusterLoadState(c.clusterState); +1124 if (cs.getNumServers() < MIN_SERVER_BALANCE) { +1125 if (LOG.isDebugEnabled()) { +1126 LOG.debug("Not running balancer because only " + cs.getNumServers() +1127 + " active regionserver(s)"); +1128 } +1129 return false; +1130 } +1131 if(areSomeRegionReplicasColocated(c)) return true; +1132 // Check if we even need to do any load balancing +1133 // HBASE-3681 check sloppiness first +1134 float average = cs.getLoadAverage(); // for logging +1135 int floor = (int) Math.floor(average * (1 - slop)); +1136 int ceiling = (int) Math.ceil(average * (1 + slop)); +1137 if (!(cs.getMaxLoad() > ceiling || cs.getMinLoad() < floor)) { +1138 NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad(); +1139 if (LOG.isTraceEnabled()) { +1140 // If nothing to balance, then don't say anything unless trace-level logging. +1141 LOG.trace("Skipping load balancing because balanced cluster; " + +1142 "servers=" + cs.getNumServers() + +1143 " regions=" + cs.getNumRegions() + " average=" + average + +1144 " mostloaded=" + serversByLoad.lastKey().getLoad() + +1145 " leastloaded=" + serversByLoad.firstKey().getLoad()); +1146 } +1147 return false; +1148 } +1149 return true; +1150 } +1151 +1152 /** +1153 * Subclasses should implement this to return true if the cluster has nodes that hosts +1154 * multiple replicas for the same region, or, if there are multiple racks and the same +1155 * rack hosts replicas of the same region +1156 * @param c Cluster information +1157 * @return whether region replicas are currently co-located +1158 */ +1159 protected boolean areSomeRegionReplicasColocated(Cluster c) { +1160 return false; +1161 } +1162 +1163 /** +1164 * Generates a bulk assignment plan to be used on cluster startup using a +1165 * simple round-robin assignment. +1166 * <p> +1167 * Takes a list of all the regions and all the servers in the cluster and +1168 * returns a map of each server to the regions that it should be assigned. +1169 * <p> +1170 * Currently implemented as a round-robin assignment. Same invariant as load +1171 * balancing, all servers holding floor(avg) or ceiling(avg). +1172 * +1173 * TODO: Use block locations from HDFS to place regions with their blocks +1174 * +1175 * @param regions all regions +1176 * @param servers all servers +1177 * @return map of server to the regions it should take, or null if no +1178 * assignment is possible (ie. no regions or no servers) +1179 */ +1180 @Override +1181 public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions, +1182 List<ServerName> servers) { +1183 metricsBalancer.incrMiscInvocations(); +1184 Map<ServerName, List<HRegionInfo>> assignments = assignMasterRegions(regions, servers); +1185 if (assignments != null && !assignments.isEmpty()) { +1186 servers = new ArrayList<ServerName>(servers); +1187 // Guarantee not to put other regions on master +1188 servers.remove(masterServerName); +1189 List<HRegionInfo> masterRegions = assignments.get(masterServerName); +1190 if (!masterRegions.isEmpty()) { +1191 regions = new ArrayList<HRegionInfo>(regions); +1192 for (HRegionInfo region: masterRegions) { +1193 regions.remove(region); +1194 } +1195 } +1196 } +1197 if (regions == null || regions.isEmpty()) { +1198 return assignments; +1199 } +1200 +1201 int numServers = servers == null ? 0 : servers.size(); +1202 if (numServers == 0) { +1203 LOG.warn("Wanted to do round robin assignment but no servers to assign to"); +1204 return null; +1205 } +1206 +1207 // TODO: instead of retainAssignment() and roundRobinAssignment(), we should just run the +1208 // normal LB.balancerCluster() with unassignedRegions. We only need to have a candidate +1209 // generator for AssignRegionAction. The LB will ensure the regions are mostly local +1210 // and balanced. This should also run fast with fewer number of iterations. +1211 +1212 if (numServers == 1) { // Only one server, nothing fancy we can do here +1213 ServerName server = servers.get(0); +1214 assignments.put(server, new ArrayList<HRegionInfo>(regions)); +1215 return assignments; +1216 } +1217 +1218 Cluster cluster = createCluster(servers, regions); +1219 List<HRegionInfo> unassignedRegions = new ArrayList<HRegionInfo>(); +1220 +1221 roundRobinAssignment(cluster, regions, unassignedRegions, +1222 servers, assignments); +1223 +1224 List<HRegionInfo> lastFewRegions = new ArrayList<HRegionInfo>(); +1225 // assign the remaining by going through the list and try to assign to servers one-by-one +1226 int serverIdx = RANDOM.nextInt(numServers); +1227 for (HRegionInfo region : unassignedRegions) { +1228 boolean assigned = false; +1229 for (int j = 0; j < numServers; j++) { // try all servers one by one +1230 ServerName serverName = servers.get((j + serverIdx) % numServers); +1231 if (!cluster.wouldLowerAvailability(region, serverName)) { +1232 List<HRegionInfo> serverRegions = assignments.get(serverName); +1233 if (serverRegions == null) { +1234 serverRegions = new ArrayList<HRegionInfo>(); +1235 assignments.put(serverName, serverRegions); +1236 } +1237 serverRegions.add(region); +1238 cluster.doAssignRegion(region, serverName); +1239 serverIdx = (j + serverIdx + 1) % numServers; //remain from next server +1240 assigned = true; +1241 break;