ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nc...@apache.org
Subject ambari git commit: AMBARI-16854. Failed to install packages for HDP 2.4 and 2.5 (ncole)
Date Wed, 25 May 2016 15:07:26 GMT
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 f0991217c -> cbccbb01e


AMBARI-16854. Failed to install packages for HDP 2.4 and 2.5 (ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cbccbb01
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cbccbb01
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cbccbb01

Branch: refs/heads/branch-2.4
Commit: cbccbb01ef15408bb6d2d910ef43d3b12b850c7d
Parents: f099121
Author: Nate Cole <ncole@hortonworks.com>
Authored: Wed May 25 11:07:13 2016 -0400
Committer: Nate Cole <ncole@hortonworks.com>
Committed: Wed May 25 11:07:13 2016 -0400

----------------------------------------------------------------------
 .../ClusterStackVersionResourceProvider.java    |  86 ++++++++
 ...ClusterStackVersionResourceProviderTest.java | 206 ++++++++++++++++++-
 2 files changed, 291 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cbccbb01/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 9dc5c4d..05ee6c9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -81,7 +81,9 @@ import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.utils.StageUtils;
+import org.apache.ambari.server.utils.VersionUtils;
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.math.NumberUtils;
 
 import com.google.gson.Gson;
 import com.google.inject.Inject;
@@ -337,6 +339,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
       stackId = currentStackVersion;
     }
 
+
     // why does the JSON body parser convert JSON primitives into strings!?
     Float successFactor = INSTALL_PACKAGES_SUCCESS_FACTOR;
     String successFactorProperty = (String) propertyMap.get(CLUSTER_STACK_VERSION_STAGE_SUCCESS_FACTOR);
@@ -353,6 +356,58 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
               desiredRepoVersion, stackId));
     }
 
+    VersionDefinitionXml desiredVersionDefinition = null;
+    try {
+      desiredVersionDefinition = repoVersionEnt.getRepositoryXml();
+    } catch (Exception e) {
+      throw new IllegalArgumentException(
+          String.format("Version %s is backed by a version definition, but it could not be
parsed", desiredRepoVersion), e);
+    }
+
+    /*
+    If there is a repository that is already ATTEMPTED to be installed and the version
+    is GREATER than the one trying to install, we must fail (until we can support that via
Patch Upgrades)
+
+    For example:
+
+    1. Install 2.3.0.0
+    2. Register and Install 2.5.0.0 (with or without package-version; it gets computed correctly)
+    3. Register 2.4 (without package-version)
+
+    Installation of 2.4 will fail because the way agents invoke installation is to
+    install by name.  if the package-version is not known, then the 'newest' is ALWAYS installed.
+    In this case, 2.5.0.0.  2.4 is never picked up.
+    */
+    for (ClusterVersionEntity clusterVersion : clusterVersionDAO.findByCluster(clName)) {
+      RepositoryVersionEntity clusterRepoVersion = clusterVersion.getRepositoryVersion();
+
+      int compare = compareVersions(clusterRepoVersion.getVersion(), desiredRepoVersion);
+
+      // ignore earlier versions
+      if (compare <= 0) {
+        continue;
+      }
+
+      // !!! the version is greater to the one to install
+
+      // if the stacks are different, then don't fail (further check same-stack version strings)
+      if (!StringUtils.equals(clusterRepoVersion.getStackName(), repoVersionEnt.getStackName()))
{
+        continue;
+      }
+
+      // if there is no backing VDF for the desired version, allow the operation (legacy
behavior)
+      if (null == desiredVersionDefinition) {
+        continue;
+      }
+
+      // backing VDF does not define the package version, cannot install (allows a VDF with
package-version)
+      if (null == desiredVersionDefinition.release.packageVersion) {
+        String msg = String.format("Ambari cannot install version %s.  Version %s is already
installed.",
+          desiredRepoVersion, clusterRepoVersion.getVersion());
+        throw new IllegalArgumentException(msg);
+      }
+    }
+
     List<OperatingSystemEntity> operatingSystems = repoVersionEnt.getOperatingSystems();
     Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<String, List<RepositoryEntity>>();
     for (OperatingSystemEntity operatingSystem : operatingSystems) {
@@ -829,4 +884,35 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     clusterVersionDAO.updateVersions(clusterId, target, current);
   }
 
+  /**
+   * Additional check over {@link VersionUtils#compareVersions(String, String)} that
+   * compares build numbers
+   */
+  private static int compareVersions(String version1, String version2) {
+    // check _exact_ equality
+    if (StringUtils.equals(version1, version2)) {
+      return 0;
+    }
+
+    int compare = VersionUtils.compareVersions(version1, version2);
+    if (0 != compare) {
+      return compare;
+    }
+
+    int v1 = 0;
+    int v2 = 0;
+    if (version1.indexOf('-') > -1) {
+      v1 = NumberUtils.toInt(version1.substring(version1.indexOf('-')), 0);
+    }
+
+    if (version2.indexOf('-') > -1) {
+      v2 = NumberUtils.toInt(version2.substring(version2.indexOf('-')), 0);
+    }
+
+    compare = v2 - v1;
+
+    return (compare == 0) ? 0 : (compare < 0) ? -1 : 1;
+  }
+
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbccbb01/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 3c9a91d..8347a7b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -323,6 +323,8 @@ public class ClusterStackVersionResourceProviderTest {
     TopologyManager topologyManager = injector.getInstance(TopologyManager.class);
     StageUtils.setTopologyManager(topologyManager);
 
+    expect(clusterVersionDAO.findByCluster(anyObject(String.class))).andReturn(Collections.<ClusterVersionEntity>emptyList()).once();
+
     // replay
     replay(managementController, response, clusters, resourceProviderFactory, csvResourceProvider,
             cluster, repositoryVersionDAOMock, configHelper, schDatanode, schNamenode, schAMS,
actionManager,
@@ -730,6 +732,8 @@ public class ClusterStackVersionResourceProviderTest {
                     anyObject(StackId.class),
                     anyObject(String.class))).andReturn(repoVersion);
 
+    expect(clusterVersionDAO.findByCluster(anyObject(String.class))).andReturn(Collections.<ClusterVersionEntity>emptyList()).once();
+
     Capture<org.apache.ambari.server.actionmanager.Request> c = Capture.newInstance();
     Capture<ExecuteActionRequest> ear = Capture.newInstance();
 
@@ -961,6 +965,8 @@ public class ClusterStackVersionResourceProviderTest {
     TopologyManager topologyManager = injector.getInstance(TopologyManager.class);
     StageUtils.setTopologyManager(topologyManager);
 
+    expect(clusterVersionDAO.findByCluster(anyObject(String.class))).andReturn(Collections.<ClusterVersionEntity>emptyList()).once();
+
     // replay
     replay(managementController, response, clusters, hdfsService, hbaseService, resourceProviderFactory,
csvResourceProvider,
             cluster, repositoryVersionDAOMock, configHelper, schDatanode, schNamenode, schHBM,
actionManager,
@@ -1296,8 +1302,206 @@ public class ClusterStackVersionResourceProviderTest {
             new StackId(newDesiredStack.getStackName(), newDesiredStack.getStackVersion()));
   }
 
+  @Test
+  public void testCreateResourcesMixed() throws Exception {
+    Resource.Type type = Resource.Type.ClusterStackVersion;
+
+    AmbariManagementController managementController = createMock(AmbariManagementController.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    Map<String, String> hostLevelParams = new HashMap<>();
+    StackId stackId = new StackId("HDP", "2.0.1");
+
+    File f = new File("src/test/resources/hbase_version_test.xml");
+    String xml = IOUtils.toString(new FileInputStream(f));
+    // munge it
+    xml = xml.replace("<package-version>2_3_4_0_3396</package-version>", "");
+
+    StackEntity stack = new StackEntity();
+    stack.setStackName("HDP");
+
+    RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
+    repoVersion.setStack(stack);
+    repoVersion.setId(1l);
+    repoVersion.setOperatingSystems(OS_JSON);
+    repoVersion.setVersionXml(xml);
+    repoVersion.setVersionXsd("version_definition.xsd");
+    repoVersion.setType(RepositoryType.STANDARD);
+
+
+    Map<String, Host> hostsForCluster = new HashMap<String, Host>();
+    int hostCount = 10;
+    for (int i = 0; i < hostCount; i++) {
+      String hostname = "host" + i;
+      Host host = createNiceMock(hostname, Host.class);
+      expect(host.getHostName()).andReturn(hostname).anyTimes();
+      expect(host.getOsFamily()).andReturn("redhat6").anyTimes();
+      expect(host.getMaintenanceState(EasyMock.anyLong())).andReturn(
+          MaintenanceState.OFF).anyTimes();
+      expect(host.getAllHostVersions()).andReturn(
+          Collections.<HostVersionEntity>emptyList()).anyTimes();
+
+      replay(host);
+      hostsForCluster.put(hostname, host);
+    }
+
+    final ServiceComponentHost schDatanode = createMock(ServiceComponentHost.class);
+    expect(schDatanode.getServiceName()).andReturn("HDFS").anyTimes();
+    expect(schDatanode.getServiceComponentName()).andReturn("DATANODE").anyTimes();
+    final ServiceComponentHost schNamenode = createMock(ServiceComponentHost.class);
+    expect(schNamenode.getServiceName()).andReturn("HDFS").anyTimes();
+    expect(schNamenode.getServiceComponentName()).andReturn("NAMENODE").anyTimes();
+    final ServiceComponentHost schAMS = createMock(ServiceComponentHost.class);
+    expect(schAMS.getServiceName()).andReturn("AMBARI_METRICS").anyTimes();
+    expect(schAMS.getServiceComponentName()).andReturn("METRICS_COLLECTOR").anyTimes();
+    // First host contains versionable components
+    final List<ServiceComponentHost> schsH1 = new ArrayList<ServiceComponentHost>(){{
+      add(schDatanode);
+      add(schNamenode);
+      add(schAMS);
+    }};
+    // Second host does not contain versionable components
+    final List<ServiceComponentHost> schsH2 = new ArrayList<ServiceComponentHost>(){{
+      add(schAMS);
+    }};
+
+
+    ServiceOsSpecific.Package hdfsPackage = new ServiceOsSpecific.Package();
+    hdfsPackage.setName("hdfs");
+    List<ServiceOsSpecific.Package> packages = Collections.singletonList(hdfsPackage);
+
+    ActionManager actionManager = createNiceMock(ActionManager.class);
+
+    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
+    ResourceProviderFactory resourceProviderFactory = createNiceMock(ResourceProviderFactory.class);
+    ResourceProvider csvResourceProvider = createNiceMock(ClusterStackVersionResourceProvider.class);
+
+    AbstractControllerResourceProvider.init(resourceProviderFactory);
+
+    Map<String, Map<String, String>> hostConfigTags = new HashMap<String,
Map<String, String>>();
+    expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
+
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+    expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+    expect(managementController.getActionManager()).andReturn(actionManager).anyTimes();
+    expect(managementController.getJdkResourceUrl()).andReturn("/JdkResourceUrl").anyTimes();
+    expect(managementController.getPackagesForServiceHost(anyObject(ServiceInfo.class),
+            (Map<String, String>) anyObject(List.class), anyObject(String.class))).
+            andReturn(packages).times((hostCount - 1) * 2); // 1 host has no versionable
components, other hosts have 2 services
+//            // that's why we don't send commands to it
+
+    expect(resourceProviderFactory.getHostResourceProvider(anyObject(Set.class), anyObject(Map.class),
+            eq(managementController))).andReturn(csvResourceProvider).anyTimes();
+
+    expect(clusters.getCluster(anyObject(String.class))).andReturn(cluster);
+    expect(clusters.getHostsForCluster(anyObject(String.class))).andReturn(
+        hostsForCluster).anyTimes();
+
+    String clusterName = "Cluster100";
+    expect(cluster.getClusterId()).andReturn(1L).anyTimes();
+    expect(cluster.getHosts()).andReturn(hostsForCluster.values()).atLeastOnce();
+    expect(cluster.getServices()).andReturn(new HashMap<String, Service>()).anyTimes();
+    expect(cluster.getCurrentStackVersion()).andReturn(stackId);
+    expect(cluster.getServiceComponentHosts(anyObject(String.class))).andAnswer(new IAnswer<List<ServiceComponentHost>>()
{
+      @Override
+      public List<ServiceComponentHost> answer() throws Throwable {
+        String hostname = (String) EasyMock.getCurrentArguments()[0];
+        if (hostname.equals("host2")) {
+          return schsH2;
+        } else {
+          return schsH1;
+        }
+      }
+    }).anyTimes();
+
+    ExecutionCommand executionCommand = createNiceMock(ExecutionCommand.class);
+    ExecutionCommandWrapper executionCommandWrapper = createNiceMock(ExecutionCommandWrapper.class);
+
+    expect(executionCommandWrapper.getExecutionCommand()).andReturn(executionCommand).anyTimes();
+
+    Stage stage = createNiceMock(Stage.class);
+    expect(stage.getExecutionCommandWrapper(anyObject(String.class), anyObject(String.class))).
+            andReturn(executionCommandWrapper).anyTimes();
+
+    expect(executionCommand.getHostLevelParams()).andReturn(hostLevelParams).anyTimes();
+
+    Map<Role, Float> successFactors = new HashMap<>();
+    expect(stage.getSuccessFactors()).andReturn(successFactors).atLeastOnce();
+
+    // Check that we create proper stage count
+    expect(stageFactory.createNew(anyLong(), anyObject(String.class),
+        anyObject(String.class), anyLong(),
+        anyObject(String.class), anyObject(String.class), anyObject(String.class),
+        anyObject(String.class))).andReturn(stage).
+        times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
+
+    expect(repositoryVersionDAOMock.findByStackAndVersion(anyObject(StackId.class),
+        anyObject(String.class))).andReturn(repoVersion);
+
+    expect(actionManager.getRequestTasks(anyLong())).andReturn(Collections.<HostRoleCommand>emptyList()).anyTimes();
+
+    ClusterEntity clusterEntity = new ClusterEntity();
+    clusterEntity.setClusterId(1l);
+    clusterEntity.setClusterName(clusterName);
+    ClusterVersionEntity cve = new ClusterVersionEntity(clusterEntity,
+            repoVersion, RepositoryVersionState.INSTALL_FAILED, 0, "");
+    expect(clusterVersionDAO.findByClusterAndStackAndVersion(anyObject(String.class),
+            anyObject(StackId.class), anyObject(String.class))).andReturn(cve);
+
+    TopologyManager topologyManager = injector.getInstance(TopologyManager.class);
+    StageUtils.setTopologyManager(topologyManager);
+
+
+    // !!! make it look like there is already a versioned installed that is less than the
one being installed
+    ClusterVersionEntity bad = new ClusterVersionEntity();
+    RepositoryVersionEntity badRve = new RepositoryVersionEntity();
+    badRve.setStack(stack);
+    badRve.setVersion("2.2.1.0-1000");
+    bad.setRepositoryVersion(badRve);
+
+    expect(clusterVersionDAO.findByCluster(anyObject(String.class))).andReturn(Collections.<ClusterVersionEntity>singletonList(bad)).once();
+
+    // replay
+    replay(managementController, response, clusters, resourceProviderFactory, csvResourceProvider,
+            cluster, repositoryVersionDAOMock, configHelper, schDatanode, schNamenode, schAMS,
actionManager,
+            executionCommand, executionCommandWrapper,stage, stageFactory, clusterVersionDAO);
+
+    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
+        type,
+        PropertyHelper.getPropertyIds(type),
+        PropertyHelper.getKeyPropertyIds(type),
+        managementController);
+
+    injector.injectMembers(provider);
+
+    // add the property map to a set for the request.  add more maps for multiple creates
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String,
Object>>();
+
+    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+
+    // add properties to the request map
+    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID,
"Cluster100");
+    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID,
"2.2.0.1-885");
+    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_STACK_PROPERTY_ID,
"HDP");
+    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID,
"2.1.1");
+
+    propertySet.add(properties);
+
+    // create the request
+    Request request = PropertyHelper.getCreateRequest(propertySet, null);
+
+    try {
+      provider.createResources(request);
+      Assert.fail("Expecting the create to fail due to an already installed version");
+    } catch (IllegalArgumentException iae) {
+      // !!! expected
+    }
+
+  }
+
 
-  public class MockModule extends AbstractModule {
+  private class MockModule extends AbstractModule {
     @Override
     protected void configure() {
       bind(RepositoryVersionDAO.class).toInstance(repositoryVersionDAOMock);


Mime
View raw message