ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From d...@apache.org
Subject ambari git commit: AMBARI-14156 Analyze configs changes done to 500 node cluster for AMS config updates (dsen)
Date Wed, 02 Dec 2015 17:27:52 GMT
Repository: ambari
Updated Branches:
  refs/heads/branch-2.1 7061caeae -> d27589ce9


AMBARI-14156 Analyze configs changes done to 500 node cluster for AMS config updates (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d27589ce
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d27589ce
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d27589ce

Branch: refs/heads/branch-2.1
Commit: d27589ce991f9500ef4a69e71323c7b1f68693ec
Parents: 7061cae
Author: Dmytro Sen <dsen@apache.org>
Authored: Wed Dec 2 19:14:54 2015 +0200
Committer: Dmytro Sen <dsen@apache.org>
Committed: Wed Dec 2 19:16:27 2015 +0200

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog213.java       | 22 +++++++-
 .../0.1.0/configuration/ams-hbase-site.xml      |  8 +++
 .../server/upgrade/UpgradeCatalog213Test.java   | 58 +++++++++++++++++++-
 3 files changed, 84 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d27589ce/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
index d0cb87d..b614b02 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
@@ -86,10 +86,14 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
   private static final String TOPOLOGY_CONFIG = "topology";
   private static final String KAFKA_BROKER = "kafka-broker";
   private static final String KAFKA_ENV_CONFIG = "kafka-env";
-  private static final String KAFKA_ENV_CONTENT_KERBEROS_PARAMS = "export KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}";
+  private static final String KAFKA_ENV_CONTENT_KERBEROS_PARAMS =
+    "export KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}";
   private static final String AMS_ENV = "ams-env";
   private static final String AMS_HBASE_ENV = "ams-hbase-env";
   private static final String AMS_SITE = "ams-site";
+  private static final String AMS_HBASE_SITE = "ams-hbase-site";
+  private static final String AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY =
+    "zookeeper.session.timeout.localHBaseCluster";
   private static final String HBASE_ENV_CONFIG = "hbase-env";
   private static final String FLUME_ENV_CONFIG = "flume-env";
   private static final String HIVE_SITE_CONFIG = "hive-site";
@@ -214,9 +218,9 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
 
   private void executeBlueprintDDLUpdates() throws AmbariException, SQLException {
     dbAccessor.addColumn(BLUEPRINT_TABLE, new DBAccessor.DBColumnInfo(SECURITY_TYPE_COLUMN,
-        String.class, 32, "NONE", false));
+      String.class, 32, "NONE", false));
     dbAccessor.addColumn(BLUEPRINT_TABLE, new DBAccessor.DBColumnInfo(SECURITY_DESCRIPTOR_REF_COLUMN,
-        String.class, null, null, true));
+      String.class, null, null, true));
   }
 
   /**
@@ -1052,6 +1056,18 @@ public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
 
             updateConfigurationPropertiesForCluster(cluster, AMS_SITE, newProperties, true,
true);
           }
+
+          Config amsHbaseSite = cluster.getDesiredConfigByType(AMS_HBASE_SITE);
+          if (amsHbaseSite != null) {
+            Map<String, String> amsHbaseSiteProperties = amsHbaseSite.getProperties();
+            String zkTimeout = amsHbaseSiteProperties.get(AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY);
+            // if old default, set new default
+            if ("20000".equals(zkTimeout)) {
+              Map<String, String> newProperties = new HashMap<>();
+              newProperties.put(AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY, "120000");
+              updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_SITE, newProperties,
true, true);
+            }
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/d27589ce/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-site.xml
b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-site.xml
index 3f4a9d4..d91bce1 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-site.xml
@@ -187,6 +187,14 @@
     </description>
   </property>
   <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>4294967296</value>
+    <description>
+      Maximum HFile size. If the sum of the sizes of a region’s HFiles has grown
+      to exceed this value, the region is split in two. Default is 10Gb.
+    </description>
+  </property>
+  <property>
     <name>hbase.hregion.memstore.block.multiplier</name>
     <value>4</value>
     <description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d27589ce/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
index b561aa0..ee01ab0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
@@ -790,6 +790,62 @@ public class UpgradeCatalog213Test {
   }
 
   @Test
+  public void testAmsHbaseSiteUpdateConfigs() throws Exception{
+
+    Map<String, String> oldPropertiesAmsHbaseSite = new HashMap<String, String>()
{
+      {
+        //Including only those properties that might be present in an older version.
+        put("zookeeper.session.timeout.localHBaseCluster", String.valueOf(20000));
+      }
+    };
+    Map<String, String> newPropertiesAmsSite = new HashMap<String, String>()
{
+      {
+        put("zookeeper.session.timeout.localHBaseCluster", String.valueOf(120000));
+      }
+    };
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+    Config mockAmsHbaseSite = easyMockSupport.createNiceMock(Config.class);
+
+    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", cluster);
+    }}).once();
+    expect(cluster.getDesiredConfigByType("ams-hbase-site")).andReturn(mockAmsHbaseSite).atLeastOnce();
+    expect(mockAmsHbaseSite.getProperties()).andReturn(oldPropertiesAmsHbaseSite).atLeastOnce();
+
+    Injector injector = easyMockSupport.createNiceMock(Injector.class);
+    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
+
+    replay(injector, clusters, mockAmsHbaseSite, cluster);
+
+    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+      .addMockedMethod("createConfiguration")
+      .addMockedMethod("getClusters", new Class[] { })
+      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
+      .createNiceMock();
+
+    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+    Capture<ConfigurationRequest> configurationRequestCapture = EasyMock.newCapture();
+    ConfigurationResponse configurationResponseMock = easyMockSupport.createMock(ConfigurationResponse.class);
+
+    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(controller.createConfiguration(capture(configurationRequestCapture))).andReturn(configurationResponseMock).once();
+
+    replay(controller, injector2, configurationResponseMock);
+    new UpgradeCatalog213(injector2).updateAMSConfigs();
+    easyMockSupport.verifyAll();
+
+    ConfigurationRequest configurationRequest = configurationRequestCapture.getValue();
+    Map<String, String> updatedProperties = configurationRequest.getProperties();
+    assertTrue(Maps.difference(newPropertiesAmsSite, updatedProperties).areEqual());
+  }
+
+  @Test
   public void testUpdateAlertDefinitions() {
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     UpgradeCatalog213 upgradeCatalog213 = new UpgradeCatalog213(injector);
@@ -932,7 +988,7 @@ public class UpgradeCatalog213Test {
             Map.class, boolean.class, boolean.class)
         .createMock();
     upgradeCatalog213.updateConfigurationPropertiesForCluster(mockClusterExpected,
-        "kafka-env", updates, true, false);
+      "kafka-env", updates, true, false);
     expectLastCall().once();
 
     expect(mockAmbariManagementController.createConfiguration(EasyMock.<ConfigurationRequest>anyObject())).andReturn(mockConfigurationResponse);


Mime
View raw message