hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jdcry...@apache.org
Subject svn commit: r1471050 - in /hbase/trunk: ./ dev-support/ hbase-common/ hbase-examples/ hbase-it/ hbase-server/ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/
Date Tue, 23 Apr 2013 17:14:52 GMT
Author: jdcryans
Date: Tue Apr 23 17:14:51 2013
New Revision: 1471050

URL: http://svn.apache.org/r1471050
Log:
HBASE-8390  Trunk/0.95 cannot simply compile against Hadoop 1.0
HBASE-8391  StochasticLoadBalancer doesn't call needsBalance

Modified:
    hbase/trunk/dev-support/test-patch.sh
    hbase/trunk/hbase-common/pom.xml
    hbase/trunk/hbase-examples/pom.xml
    hbase/trunk/hbase-it/pom.xml
    hbase/trunk/hbase-server/pom.xml
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
    hbase/trunk/pom.xml

Modified: hbase/trunk/dev-support/test-patch.sh
URL: http://svn.apache.org/viewvc/hbase/trunk/dev-support/test-patch.sh?rev=1471050&r1=1471049&r2=1471050&view=diff
==============================================================================
--- hbase/trunk/dev-support/test-patch.sh (original)
+++ hbase/trunk/dev-support/test-patch.sh Tue Apr 23 17:14:51 2013
@@ -375,6 +375,34 @@ checkHadoop20Compile () {
   return 0
 }
 
+###############################################################################
+### Attempt to compile against the hadoop 1.0
+checkHadoop10Compile () {
+  echo ""
+  echo ""
+  echo "======================================================================"
+  echo "======================================================================"
+  echo "    Checking against hadoop 1.0 build"
+  echo "======================================================================"
+  echo "======================================================================"
+  echo ""
+  echo ""
+
+  export MAVEN_OPTS="${MAVEN_OPTS}"
+  # build core and tests
+  $MVN clean test help:active-profiles -X -DskipTests -Dhadoop.profile=1.0 -D${PROJECT_NAME}PatchProcess
> $PATCH_DIR/trunk1.0JavacWarnings.txt 2>&1
+  if [[ $? != 0 ]] ; then
+    JIRA_COMMENT="$JIRA_COMMENT
+
+    {color:red}-1 hadoop1.0{color}.  The patch failed to compile against the hadoop 1.0 profile."
+    cleanupAndExit 1
+  fi
+  JIRA_COMMENT="$JIRA_COMMENT
+
+    {color:green}+1 hadoop1.0{color}.  The patch compiles against the hadoop 1.0 profile."
+  return 0
+}
+
 
 ###############################################################################
 ### Check there are no javadoc warnings
@@ -836,6 +864,8 @@ if [[ $? != 0 ]] ; then
   cleanupAndExit 1
 fi
 
+checkHadoop10Compile
+(( RESULT = RESULT + $? ))
 checkHadoop20Compile
 (( RESULT = RESULT + $? ))
 checkJavadocWarnings

Modified: hbase/trunk/hbase-common/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/pom.xml?rev=1471050&r1=1471049&r2=1471050&view=diff
==============================================================================
--- hbase/trunk/hbase-common/pom.xml (original)
+++ hbase/trunk/hbase-common/pom.xml Tue Apr 23 17:14:51 2013
@@ -211,11 +211,11 @@
       </properties>
     </profile>
 
-    <!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
-    activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
+    <!-- profile against Hadoop 1.1.x: This is the default. It has to have the same
+    activation property as the parent Hadoop 1.1.x profile to make sure it gets run at
     the same time. -->
     <profile>
-      <id>hadoop-1.0</id>
+      <id>hadoop-1.1</id>
       <activation>
         <property>
           <name>!hadoop.profile</name>
@@ -229,6 +229,29 @@
       </dependencies>
     </profile>
 
+    <!-- profile against Hadoop 1.0.x: 
+          mvn -Dhadoop.profile=1.0
+    -->
+    <profile>
+      <id>hadoop-1.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>1.0</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>commons-io</groupId>
+          <artifactId>commons-io</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+
     <!--
       profile for building against Hadoop 2.0.0-alpha. Activate using:
        mvn -Dhadoop.profile=2.0

Modified: hbase/trunk/hbase-examples/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-examples/pom.xml?rev=1471050&r1=1471049&r2=1471050&view=diff
==============================================================================
--- hbase/trunk/hbase-examples/pom.xml (original)
+++ hbase/trunk/hbase-examples/pom.xml Tue Apr 23 17:14:51 2013
@@ -110,11 +110,11 @@
      <!-- Profiles for building against different hadoop versions -->
      <!-- There are a lot of common dependencies used here, should investigate
 if we can combine these profiles somehow -->
-     <!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
-  activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
+     <!-- profile against Hadoop 1.1.x: This is the default. It has to have the same
+  activation property as the parent Hadoop 1.1.x profile to make sure it gets run at
   the same time. -->
      <profile>
-         <id>hadoop-1.0</id>
+         <id>hadoop-1.1</id>
          <activation>
              <property>
                  <name>!hadoop.profile</name>

Modified: hbase/trunk/hbase-it/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-it/pom.xml?rev=1471050&r1=1471049&r2=1471050&view=diff
==============================================================================
--- hbase/trunk/hbase-it/pom.xml (original)
+++ hbase/trunk/hbase-it/pom.xml Tue Apr 23 17:14:51 2013
@@ -186,11 +186,11 @@
       </properties>
     </profile>
 
-    <!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
-    activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
+    <!-- profile against Hadoop 1.1.x: This is the default. It has to have the same
+    activation property as the parent Hadoop 1.1.x profile to make sure it gets run at
     the same time. -->
     <profile>
-      <id>hadoop-1.0</id>
+      <id>hadoop-1.1</id>
       <activation>
         <property>
           <name>!hadoop.profile</name>

Modified: hbase/trunk/hbase-server/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/pom.xml?rev=1471050&r1=1471049&r2=1471050&view=diff
==============================================================================
--- hbase/trunk/hbase-server/pom.xml (original)
+++ hbase/trunk/hbase-server/pom.xml Tue Apr 23 17:14:51 2013
@@ -564,11 +564,11 @@
     <!-- Profiles for building against different hadoop versions -->
     <!-- There are a lot of common dependencies used here, should investigate
     if we can combine these profiles somehow -->
-    <!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
-    activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
+    <!-- profile against Hadoop 1.1.x: This is the default. It has to have the same
+    activation property as the parent Hadoop 1.1.x profile to make sure it gets run at
     the same time. -->
     <profile>
-      <id>hadoop-1.0</id>
+      <id>hadoop-1.1</id>
       <activation>
         <property>
           <name>!hadoop.profile</name>
@@ -585,6 +585,25 @@
         </dependency>
       </dependencies>
     </profile>
+    <profile>
+      <id>hadoop-1.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>1.0</value>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-core</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-test</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
     <!--
       profile for building against Hadoop 2.0.0-alpha. Activate using:
        mvn -Dhadoop.profile=2.0

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java?rev=1471050&r1=1471049&r2=1471050&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
(original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
Tue Apr 23 17:14:51 2013
@@ -25,6 +25,7 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.NavigableMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -294,13 +295,25 @@ public abstract class BaseLoadBalancer i
   }
 
   protected boolean needsBalance(ClusterLoadState cs) {
+    if (cs.getNumServers() == 0) {
+      LOG.debug("numServers=0 so skipping load balancing");
+      return false;
+    }
     // Check if we even need to do any load balancing
-    float average = cs.getLoadAverage(); // for logging
     // HBASE-3681 check sloppiness first
+    float average = cs.getLoadAverage(); // for logging
     int floor = (int) Math.floor(average * (1 - slop));
     int ceiling = (int) Math.ceil(average * (1 + slop));
-
-    return cs.getMinLoad() > ceiling || cs.getMaxLoad() < floor;
+    if (!(cs.getMinLoad() > ceiling || cs.getMaxLoad() < floor)) {
+      NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();
+      LOG.info("Skipping load balancing because balanced cluster; " +
+          "servers=" + cs.getNumServers() + " " +
+          "regions=" + cs.getNumRegions() + " average=" + average + " " +
+          "mostloaded=" + serversByLoad.lastKey().getLoad() +
+          " leastloaded=" + serversByLoad.firstKey().getLoad());
+      return false;
+    }
+    return true;
   }
 
   /**

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java?rev=1471050&r1=1471049&r2=1471050&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java
(original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java
Tue Apr 23 17:14:51 2013
@@ -183,29 +183,13 @@ public class DefaultLoadBalancer extends
     boolean emptyRegionServerPresent = false;
     long startTime = System.currentTimeMillis();
 
-
     ClusterLoadState cs = new ClusterLoadState(clusterMap);
 
+    if (!this.needsBalance(cs)) return null;
+    
     int numServers = cs.getNumServers();
-    if (numServers == 0) {
-      LOG.debug("numServers=0 so skipping load balancing");
-      return null;
-    }
     NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();
-
     int numRegions = cs.getNumRegions();
-
-    if (!this.needsBalance(cs)) {
-      // Skipped because no server outside (min,max) range
-      float average = cs.getLoadAverage(); // for logging
-      LOG.info("Skipping load balancing because balanced cluster; " +
-        "servers=" + numServers + " " +
-        "regions=" + numRegions + " average=" + average + " " +
-        "mostloaded=" + serversByLoad.lastKey().getLoad() +
-        " leastloaded=" + serversByLoad.firstKey().getLoad());
-      return null;
-    }
-
     int min = numRegions / numServers;
     int max = numRegions % numServers == 0 ? min : min + 1;
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java?rev=1471050&r1=1471049&r2=1471050&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
(original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
Tue Apr 23 17:14:51 2013
@@ -116,7 +116,7 @@ public class StochasticLoadBalancer exte
   // values are defaults
   private int maxSteps = 15000;
   private int stepsPerRegion = 110;
-  private long maxRunningTime = 1 * 60 * 1000; //5 min
+  private long maxRunningTime = 60 * 1000; //1 min
   private int maxMoves = 600;
   private int numRegionLoadsToRemember = 15;
   private float loadMultiplier = 100;
@@ -179,10 +179,8 @@ public class StochasticLoadBalancer exte
    */
   @Override
   public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>>
clusterState) {
-
-    // No need to balance a one node cluster.
-    if (clusterState.size() <= 1) {
-      LOG.debug("Skipping load balance as cluster has only one node.");
+    
+    if (!needsBalance(new ClusterLoadState(clusterState))) {
       return null;
     }
 
@@ -242,7 +240,7 @@ public class StochasticLoadBalancer exte
       List<RegionPlan> plans = createRegionPlans(cluster);
 
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Finished computing new laod balance plan.  Computation took "
+        LOG.debug("Finished computing new load balance plan.  Computation took "
             + (endTime - startTime) + "ms to try " + step
             + " different iterations.  Found a solution that moves " + plans.size()
             + " regions; Going from a computed cost of " + initCost + " to a new cost of
"

Modified: hbase/trunk/pom.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/pom.xml?rev=1471050&r1=1471049&r2=1471050&view=diff
==============================================================================
--- hbase/trunk/pom.xml (original)
+++ hbase/trunk/pom.xml Tue Apr 23 17:14:51 2013
@@ -1410,9 +1410,9 @@
     profiles with activation properties matching the profile here.
     Generally, it should be sufficient to copy the first
     few lines of the profile you want to match.  -->
-    <!-- profile against Hadoop 1.0.x: This is the default. -->
+    <!-- profile against Hadoop 1.1.x: This is the default. -->
     <profile>
-      <id>hadoop-1.0</id>
+      <id>hadoop-1.1</id>
       <activation>
         <property>
           <name>!hadoop.profile</name>
@@ -1467,6 +1467,68 @@
         </dependencies>
       </dependencyManagement>
     </profile>
+
+    <!-- profile for building against Hadoop 1.0.x: -->
+    <profile>
+      <id>hadoop-1.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>1.0</value>
+        </property>
+      </activation>
+      <modules>
+        <module>hbase-hadoop1-compat</module>
+      </modules>
+      <properties>
+        <hadoop.version>1.0.4</hadoop.version>
+        <!-- Need to set this for the Hadoop 1 compat module -->
+        <hadoop-one.version>${hadoop.version}</hadoop-one.version>
+        <slf4j.version>1.4.3</slf4j.version>
+        <compat.module>hbase-hadoop1-compat</compat.module>
+        <assembly.file>src/main/assembly/hadoop-one-compat.xml</assembly.file>
+      </properties>
+      <dependencyManagement>
+        <dependencies>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-core</artifactId>
+            <version>${hadoop.version}</version>
+            <optional>true</optional>
+            <exclusions>
+              <exclusion>
+                <groupId>hsqldb</groupId>
+                <artifactId>hsqldb</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>net.sf.kosmosfs</groupId>
+                <artifactId>kfs</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.eclipse.jdt</groupId>
+                <artifactId>core</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>net.java.dev.jets3t</groupId>
+                <artifactId>jets3t</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>oro</groupId>
+                <artifactId>oro</artifactId>
+              </exclusion>
+            </exclusions>
+          </dependency>
+          <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-test</artifactId>
+            <version>${hadoop.version}</version>
+            <optional>true</optional>
+            <scope>test</scope>
+          </dependency>
+        </dependencies>
+      </dependencyManagement>
+    </profile>
+
     <!-- profile for building against Hadoop 2.0.x
     Activate using: mvn -Dhadoop.profile=2.0 -->
     <profile>



Mime
View raw message