Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id D36F0200C86 for ; Wed, 31 May 2017 22:21:43 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id D1A8C160BDB; Wed, 31 May 2017 20:21:43 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id A329B160BCB for ; Wed, 31 May 2017 22:21:42 +0200 (CEST) Received: (qmail 59149 invoked by uid 500); 31 May 2017 20:21:41 -0000 Mailing-List: contact commits-help@ambari.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: ambari-dev@ambari.apache.org Delivered-To: mailing list commits@ambari.apache.org Received: (qmail 59112 invoked by uid 99); 31 May 2017 20:21:41 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 31 May 2017 20:21:41 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 22AD3E110C; Wed, 31 May 2017 20:21:41 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: jonathanhurley@apache.org To: commits@ambari.apache.org Date: Wed, 31 May 2017 20:21:44 -0000 Message-Id: <978ffd5bb57b411c9c169baf17fe3d1c@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [4/4] ambari git commit: Merge branch 'trunk' into branch-feature-AMBARI-12556 archived-at: Wed, 31 May 2017 20:21:44 -0000 Merge branch 'trunk' into branch-feature-AMBARI-12556 Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fb2076c7 Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fb2076c7 Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fb2076c7 Branch: refs/heads/branch-feature-AMBARI-12556 Commit: fb2076c718c5bcafb1e83c35a841111d30c6204d Parents: 138aa48 dc30b4e Author: Jonathan Hurley Authored: Wed May 31 15:23:58 2017 -0400 Committer: Jonathan Hurley Committed: Wed May 31 15:23:58 2017 -0400 ---------------------------------------------------------------------- ambari-infra/ambari-infra-manager/README.md | 92 ++- .../ambari-infra-manager/docs/api/swagger.yaml | 784 +++++++++++++++++++ .../docs/images/batch-1.png | Bin 0 -> 20521 bytes .../docs/images/batch-2.png | Bin 0 -> 29388 bytes .../docs/images/batch-3.png | Bin 0 -> 14105 bytes .../docs/images/batch-4.png | Bin 0 -> 23277 bytes .../infra/common/InfraManagerConstants.java | 2 +- .../infra/conf/InfraManagerApiDocConfig.java | 35 +- .../conf/batch/InfraManagerBatchConfig.java | 8 +- .../ambari/infra/job/dummy/DummyItemWriter.java | 13 + .../infra/job/dummy/DummyJobListener.java | 39 + .../infra/job/dummy/DummyStepListener.java | 41 + .../apache/ambari/infra/rest/JobResource.java | 2 +- .../internal/UpgradeResourceProvider.java | 8 +- 14 files changed, 1009 insertions(+), 15 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/fb2076c7/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java ---------------------------------------------------------------------- diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java index 6f452b0,a8b7fb4..345bf5f --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java @@@ -99,11 -117,12 +99,12 @@@ import org.apache.ambari.server.state.s import org.apache.ambari.server.state.stack.upgrade.Task; import org.apache.ambari.server.state.stack.upgrade.TaskWrapper; import org.apache.ambari.server.state.stack.upgrade.UpdateStackGrouping; -import org.apache.ambari.server.state.stack.upgrade.UpgradeScope; import org.apache.ambari.server.state.stack.upgrade.UpgradeType; import org.apache.ambari.server.state.svccomphost.ServiceComponentHostServerActionEvent; + import org.apache.ambari.server.utils.StageUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; +import org.codehaus.jackson.annotate.JsonProperty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@@ -784,18 -995,287 +785,23 @@@ public class UpgradeResourceProvider ex return upgradeEntity; } - private RequestStageContainer createRequest(UpgradeContext upgradeContext) { - /** - * Handles the creation or resetting of configurations based on whether an - * upgrade or downgrade is occurring. This method will not do anything when - * the target stack version is the same as the cluster's current stack version - * since, by definition, no new configurations are automatically created when - * upgrading with the same stack (ie HDP 2.2.0.0 -> HDP 2.2.1.0). - *

- * When upgrading or downgrade between stacks (HDP 2.2.0.0 -> HDP 2.3.0.0) - * then this will perform the following: - *

    - *
  • Upgrade: Create new configurations that are a merge between the current - * stack and the desired stack. If a value has changed between stacks, then - * the target stack value should be taken unless the cluster's value differs - * from the old stack. This can occur if a property has been customized after - * installation.
  • - *
  • Downgrade: Reset the latest configurations from the cluster's original - * stack. The new configurations that were created on upgrade must be left - * intact until all components have been reverted, otherwise heartbeats will - * fail due to missing configurations.
  • - *
- * - * - * @param stackName Stack name such as HDP, HDPWIN, BIGTOP - * @param cluster - * the cluster - * @param version - * the version - * @param direction - * upgrade or downgrade - * @param upgradePack - * upgrade pack used for upgrade or downgrade. This is needed to determine - * which services are effected. - * @param userName - * username performing the action - * @throws AmbariException - */ - public void applyStackAndProcessConfigurations(String stackName, Cluster cluster, String version, Direction direction, UpgradePack upgradePack, String userName) - throws AmbariException { - RepositoryVersionEntity targetRve = s_repoVersionDAO.findByStackNameAndVersion(stackName, version); - if (null == targetRve) { - LOG.info("Could not find version entity for {}; not setting new configs", version); - return; - } - - if (null == userName) { - userName = getManagementController().getAuthName(); - } - - // if the current and target stacks are the same (ie HDP 2.2.0.0 -> 2.2.1.0) - // then we should never do anything with configs on either upgrade or - // downgrade; however if we are going across stacks, we have to do the stack - // checks differently depending on whether this is an upgrade or downgrade - StackEntity targetStack = targetRve.getStack(); - StackId currentStackId = cluster.getCurrentStackVersion(); - StackId desiredStackId = cluster.getDesiredStackVersion(); - StackId targetStackId = new StackId(targetStack); - // Only change configs if moving to a different stack. - switch (direction) { - case UPGRADE: - if (currentStackId.equals(targetStackId)) { - return; - } - break; - case DOWNGRADE: - if (desiredStackId.equals(targetStackId)) { - return; - } - break; - } - - Map> newConfigurationsByType = null; - ConfigHelper configHelper = getManagementController().getConfigHelper(); - - if (direction == Direction.UPGRADE) { - // populate a map of default configurations for the old stack (this is - // used when determining if a property has been customized and should be - // overriden with the new stack value) - Map> oldStackDefaultConfigurationsByType = configHelper.getDefaultProperties( - currentStackId, cluster, true); - - // populate a map with default configurations from the new stack - newConfigurationsByType = configHelper.getDefaultProperties(targetStackId, cluster, true); - - // We want to skip updating config-types of services that are not in the upgrade pack. - // Care should be taken as some config-types could be in services that are in and out - // of the upgrade pack. We should never ignore config-types of services in upgrade pack. - Set skipConfigTypes = new HashSet<>(); - Set upgradePackServices = new HashSet<>(); - Set upgradePackConfigTypes = new HashSet<>(); - AmbariMetaInfo ambariMetaInfo = s_metaProvider.get(); - - // ensure that we get the service info from the target stack - // (since it could include new configuration types for a service) - Map stackServicesMap = ambariMetaInfo.getServices( - targetStack.getStackName(), targetStack.getStackVersion()); - - for (Grouping group : upgradePack.getGroups(direction)) { - for (UpgradePack.OrderService service : group.services) { - if (service.serviceName == null || upgradePackServices.contains(service.serviceName)) { - // No need to re-process service that has already been looked at - continue; - } - - upgradePackServices.add(service.serviceName); - ServiceInfo serviceInfo = stackServicesMap.get(service.serviceName); - if (serviceInfo == null) { - continue; - } - - // add every configuration type for all services defined in the - // upgrade pack - Set serviceConfigTypes = serviceInfo.getConfigTypeAttributes().keySet(); - for (String serviceConfigType : serviceConfigTypes) { - if (!upgradePackConfigTypes.contains(serviceConfigType)) { - upgradePackConfigTypes.add(serviceConfigType); - } - } - } - } - - // build a set of configurations that should not be merged since their - // services are not installed - Set servicesNotInUpgradePack = new HashSet<>(stackServicesMap.keySet()); - servicesNotInUpgradePack.removeAll(upgradePackServices); - for (String serviceNotInUpgradePack : servicesNotInUpgradePack) { - ServiceInfo serviceInfo = stackServicesMap.get(serviceNotInUpgradePack); - Set configTypesOfServiceNotInUpgradePack = serviceInfo.getConfigTypeAttributes().keySet(); - for (String configType : configTypesOfServiceNotInUpgradePack) { - if (!upgradePackConfigTypes.contains(configType) && !skipConfigTypes.contains(configType)) { - skipConfigTypes.add(configType); - } - } - } - - // remove any configurations from the target stack that are not used - // because the services are not installed - Iterator iterator = newConfigurationsByType.keySet().iterator(); - while (iterator.hasNext()) { - String configType = iterator.next(); - if (skipConfigTypes.contains(configType)) { - LOG.info("Stack Upgrade: Removing configs for config-type {}", configType); - iterator.remove(); - } - } - - // now that the map has been populated with the default configurations - // from the stack/service, overlay the existing configurations on top - Map existingDesiredConfigurationsByType = cluster.getDesiredConfigs(); - for (Map.Entry existingEntry : existingDesiredConfigurationsByType.entrySet()) { - String configurationType = existingEntry.getKey(); - if(skipConfigTypes.contains(configurationType)) { - LOG.info("Stack Upgrade: Skipping config-type {} as upgrade-pack contains no updates to its service", configurationType); - continue; - } - - // NPE sanity, although shouldn't even happen since we are iterating - // over the desired configs to start with - Config currentClusterConfig = cluster.getDesiredConfigByType(configurationType); - if (null == currentClusterConfig) { - continue; - } - - // get current stack default configurations on install - Map configurationTypeDefaultConfigurations = oldStackDefaultConfigurationsByType.get( - configurationType); - - // NPE sanity for current stack defaults - if (null == configurationTypeDefaultConfigurations) { - configurationTypeDefaultConfigurations = Collections.emptyMap(); - } - - // get the existing configurations - Map existingConfigurations = currentClusterConfig.getProperties(); - - // if the new stack configurations don't have the type, then simply add - // all of the existing in - Map newDefaultConfigurations = newConfigurationsByType.get( - configurationType); - - if (null == newDefaultConfigurations) { - newConfigurationsByType.put(configurationType, existingConfigurations); - continue; - } else { - // TODO, should we remove existing configs whose value is NULL even though they don't have a value in the new stack? - - // Remove any configs in the new stack whose value is NULL, unless they currently exist and the value is not NULL. - Iterator> iter = newDefaultConfigurations.entrySet().iterator(); - while (iter.hasNext()) { - Map.Entry entry = iter.next(); - if (entry.getValue() == null) { - iter.remove(); - } - } - } - - // for every existing configuration, see if an entry exists; if it does - // not exist, then put it in the map, otherwise we'll have to compare - // the existing value to the original stack value to see if its been - // customized - for (Map.Entry existingConfigurationEntry : existingConfigurations.entrySet()) { - String existingConfigurationKey = existingConfigurationEntry.getKey(); - String existingConfigurationValue = existingConfigurationEntry.getValue(); - - // if there is already an entry, we now have to try to determine if - // the value was customized after stack installation - if (newDefaultConfigurations.containsKey(existingConfigurationKey)) { - String newDefaultConfigurationValue = newDefaultConfigurations.get( - existingConfigurationKey); - - if (!StringUtils.equals(existingConfigurationValue, newDefaultConfigurationValue)) { - // the new default is different from the existing cluster value; - // only override the default value if the existing value differs - // from the original stack - String oldDefaultValue = configurationTypeDefaultConfigurations.get( - existingConfigurationKey); - - if (!StringUtils.equals(existingConfigurationValue, oldDefaultValue)) { - // at this point, we've determined that there is a difference - // between default values between stacks, but the value was - // also customized, so keep the customized value - newDefaultConfigurations.put(existingConfigurationKey, existingConfigurationValue); - } - } - } else { - // there is no entry in the map, so add the existing key/value pair - newDefaultConfigurations.put(existingConfigurationKey, existingConfigurationValue); - } - } - - /* - for every new configuration which does not exist in the existing - configurations, see if it was present in the current stack - - stack 2.x has foo-site/property (on-ambari-upgrade is false) - stack 2.y has foo-site/property - the current cluster (on 2.x) does not have it - - In this case, we should NOT add it back as clearly stack advisor has removed it - */ - Iterator> newDefaultConfigurationsIterator = newDefaultConfigurations.entrySet().iterator(); - while( newDefaultConfigurationsIterator.hasNext() ){ - Map.Entry newConfigurationEntry = newDefaultConfigurationsIterator.next(); - String newConfigurationPropertyName = newConfigurationEntry.getKey(); - if (configurationTypeDefaultConfigurations.containsKey(newConfigurationPropertyName) - && !existingConfigurations.containsKey(newConfigurationPropertyName)) { - LOG.info( - "The property {}/{} exists in both {} and {} but is not part of the current set of configurations and will therefore not be included in the configuration merge", - configurationType, newConfigurationPropertyName, currentStackId, targetStackId); - - // remove the property so it doesn't get merged in - newDefaultConfigurationsIterator.remove(); - } - } - } - } else { - // downgrade - cluster.applyLatestConfigurations(cluster.getCurrentStackVersion()); - } - - // !!! update the stack - cluster.setDesiredStackVersion( - new StackId(targetStack.getStackName(), targetStack.getStackVersion()), true); - - // !!! configs must be created after setting the stack version - if (null != newConfigurationsByType) { - configHelper.createConfigTypes(cluster, getManagementController(), newConfigurationsByType, - userName, "Configuration created for Upgrade"); - } - } - - private RequestStageContainer createRequest(Cluster cluster, Direction direction, String version) throws AmbariException { ++ private RequestStageContainer createRequest(UpgradeContext upgradeContext) throws AmbariException { ActionManager actionManager = getManagementController().getActionManager(); RequestStageContainer requestStages = new RequestStageContainer( actionManager.getNextRequestId(), null, s_requestFactory.get(), actionManager); - requestStages.setRequestContext(String.format("%s to %s", direction.getVerb(true), version)); + Direction direction = upgradeContext.getDirection(); + RepositoryVersionEntity repositoryVersion = upgradeContext.getRepositoryVersion(); + + requestStages.setRequestContext(String.format("%s %s %s", direction.getVerb(true), + direction.getPreposition(), repositoryVersion.getVersion())); + ++ Cluster cluster = upgradeContext.getCluster(); + Map> clusterHostInfo = StageUtils.getClusterHostInfo(cluster); + String clusterHostInfoJson = StageUtils.getGson().toJson(clusterHostInfo); + requestStages.setClusterHostInfo(clusterHostInfoJson); + return requestStages; }