From common-commits-return-82534-archive-asf-public=cust-asf.ponee.io@hadoop.apache.org Wed May 9 19:30:39 2018 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id D4822180649 for ; Wed, 9 May 2018 19:30:37 +0200 (CEST) Received: (qmail 26864 invoked by uid 500); 9 May 2018 17:30:23 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 24976 invoked by uid 99); 9 May 2018 17:30:22 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 09 May 2018 17:30:22 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 83673F6CAF; Wed, 9 May 2018 17:30:21 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: zhz@apache.org To: common-commits@hadoop.apache.org Date: Wed, 09 May 2018 17:30:58 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [39/50] [abbrv] hadoop git commit: YARN-7137. [YARN-3926] Move newly added APIs to unstable in YARN-3926 branch. Contributed by Wangda Tan. http://git-wip-us.apache.org/repos/asf/hadoop/blob/f17a2e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java new file mode 100644 index 0000000..bcadf76 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java @@ -0,0 +1,261 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; +import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.DeletionService; +import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; +import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; +import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService; +import org.apache.hadoop.yarn.server.nodemanager.NodeManager; +import org.apache.hadoop.yarn.server.nodemanager.NodeManagerTestBase; +import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandler; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerChain; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.NodeResourceUpdaterPlugin; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePlugin; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePluginManager; +import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TestResourcePluginManager extends NodeManagerTestBase { + private NodeManager nm; + + ResourcePluginManager stubResourcePluginmanager() { + // Stub ResourcePluginManager + final ResourcePluginManager rpm = mock(ResourcePluginManager.class); + Map plugins = new HashMap<>(); + + // First resource plugin + ResourcePlugin resourcePlugin = mock(ResourcePlugin.class); + NodeResourceUpdaterPlugin nodeResourceUpdaterPlugin = mock( + NodeResourceUpdaterPlugin.class); + when(resourcePlugin.getNodeResourceHandlerInstance()).thenReturn( + nodeResourceUpdaterPlugin); + plugins.put("resource1", resourcePlugin); + + // Second resource plugin + resourcePlugin = mock(ResourcePlugin.class); + when(resourcePlugin.createResourceHandler(any(Context.class), any( + CGroupsHandler.class), any(PrivilegedOperationExecutor.class))) + .thenReturn(new CustomizedResourceHandler()); + plugins.put("resource2", resourcePlugin); + when(rpm.getNameToPlugins()).thenReturn(plugins); + return rpm; + } + + @After + public void tearDown() { + if (nm != null) { + try { + ServiceOperations.stop(nm); + } catch (Throwable t) { + // ignore + } + } + } + + private class CustomizedResourceHandler implements ResourceHandler { + + @Override + public List bootstrap(Configuration configuration) + throws ResourceHandlerException { + return null; + } + + @Override + public List preStart(Container container) + throws ResourceHandlerException { + return null; + } + + @Override + public List reacquireContainer(ContainerId containerId) + throws ResourceHandlerException { + return null; + } + + @Override + public List postComplete(ContainerId containerId) + throws ResourceHandlerException { + return null; + } + + @Override + public List teardown() + throws ResourceHandlerException { + return null; + } + } + + private class MyMockNM extends NodeManager { + private final ResourcePluginManager rpm; + + public MyMockNM(ResourcePluginManager rpm) { + this.rpm = rpm; + } + + @Override + protected NodeStatusUpdater createNodeStatusUpdater(Context context, + Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { + ((NodeManager.NMContext)context).setResourcePluginManager(rpm); + return new BaseNodeStatusUpdaterForTest(context, dispatcher, healthChecker, + metrics, new BaseResourceTrackerForTest()); + } + + @Override + protected ContainerManagerImpl createContainerManager(Context context, + ContainerExecutor exec, DeletionService del, + NodeStatusUpdater nodeStatusUpdater, + ApplicationACLsManager aclsManager, + LocalDirsHandlerService diskhandler) { + return new MyContainerManager(context, exec, del, nodeStatusUpdater, + metrics, diskhandler); + } + + @Override + protected ResourcePluginManager createResourcePluginManager() { + return rpm; + } + } + + public class MyLCE extends LinuxContainerExecutor { + private PrivilegedOperationExecutor poe = mock(PrivilegedOperationExecutor.class); + + @Override + protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() { + return poe; + } + } + + /* + * Make sure ResourcePluginManager is initialized during NM start up. + */ + @Test(timeout = 30000) + public void testResourcePluginManagerInitialization() throws Exception { + final ResourcePluginManager rpm = stubResourcePluginmanager(); + nm = new MyMockNM(rpm); + + YarnConfiguration conf = createNMConfig(); + nm.init(conf); + verify(rpm, times(1)).initialize( + any(Context.class)); + } + + /* + * Make sure ResourcePluginManager is invoked during NM update. + */ + @Test(timeout = 30000) + public void testNodeStatusUpdaterWithResourcePluginsEnabled() throws Exception { + final ResourcePluginManager rpm = stubResourcePluginmanager(); + + nm = new MyMockNM(rpm); + + YarnConfiguration conf = createNMConfig(); + nm.init(conf); + nm.start(); + + NodeResourceUpdaterPlugin nodeResourceUpdaterPlugin = + rpm.getNameToPlugins().get("resource1") + .getNodeResourceHandlerInstance(); + + verify(nodeResourceUpdaterPlugin, times(1)).updateConfiguredResource( + any(Resource.class)); + } + + /* + * Make sure ResourcePluginManager is used to initialize ResourceHandlerChain + */ + @Test(timeout = 30000) + public void testLinuxContainerExecutorWithResourcePluginsEnabled() throws Exception { + final ResourcePluginManager rpm = stubResourcePluginmanager(); + final LinuxContainerExecutor lce = new MyLCE(); + + nm = new NodeManager() { + @Override + protected NodeStatusUpdater createNodeStatusUpdater(Context context, + Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { + ((NMContext)context).setResourcePluginManager(rpm); + return new BaseNodeStatusUpdaterForTest(context, dispatcher, healthChecker, + metrics, new BaseResourceTrackerForTest()); + } + + @Override + protected ContainerManagerImpl createContainerManager(Context context, + ContainerExecutor exec, DeletionService del, + NodeStatusUpdater nodeStatusUpdater, + ApplicationACLsManager aclsManager, + LocalDirsHandlerService diskhandler) { + return new MyContainerManager(context, exec, del, nodeStatusUpdater, + metrics, diskhandler); + } + + @Override + protected ContainerExecutor createContainerExecutor(Configuration conf) { + ((NMContext)this.getNMContext()).setResourcePluginManager(rpm); + lce.setConf(conf); + return lce; + } + }; + + YarnConfiguration conf = createNMConfig(); + + nm.init(conf); + nm.start(); + + ResourceHandler handler = lce.getResourceHandler(); + Assert.assertNotNull(handler); + Assert.assertTrue(handler instanceof ResourceHandlerChain); + + boolean newHandlerAdded = false; + for (ResourceHandler h : ((ResourceHandlerChain) handler) + .getResourceHandlerList()) { + if (h instanceof CustomizedResourceHandler) { + newHandlerAdded = true; + break; + } + } + Assert.assertTrue("New ResourceHandler should be added", newHandlerAdded); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/f17a2e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuDiscoverer.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuDiscoverer.java new file mode 100644 index 0000000..83bace2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuDiscoverer.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.GpuDeviceInformation; +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.List; + +public class TestGpuDiscoverer { + private String getTestParentFolder() { + File f = new File("target/temp/" + TestGpuDiscoverer.class.getName()); + return f.getAbsolutePath(); + } + + private void touchFile(File f) throws IOException { + new FileOutputStream(f).close(); + } + + @Before + public void before() throws IOException { + String folder = getTestParentFolder(); + File f = new File(folder); + FileUtils.deleteDirectory(f); + f.mkdirs(); + } + + @Test + public void testLinuxGpuResourceDiscoverPluginConfig() throws Exception { + // Only run this on demand. + Assume.assumeTrue(Boolean.valueOf( + System.getProperty("RunLinuxGpuResourceDiscoverPluginConfigTest"))); + + // test case 1, check default setting. + Configuration conf = new Configuration(false); + GpuDiscoverer plugin = new GpuDiscoverer(); + plugin.initialize(conf); + Assert.assertEquals(GpuDiscoverer.DEFAULT_BINARY_NAME, + plugin.getPathOfGpuBinary()); + Assert.assertNotNull(plugin.getEnvironmentToRunCommand().get("PATH")); + Assert.assertTrue( + plugin.getEnvironmentToRunCommand().get("PATH").contains("nvidia")); + + // test case 2, check mandatory set path. + File fakeBinary = new File(getTestParentFolder(), + GpuDiscoverer.DEFAULT_BINARY_NAME); + touchFile(fakeBinary); + conf.set(YarnConfiguration.NM_GPU_PATH_TO_EXEC, getTestParentFolder()); + plugin = new GpuDiscoverer(); + plugin.initialize(conf); + Assert.assertEquals(fakeBinary.getAbsolutePath(), + plugin.getPathOfGpuBinary()); + Assert.assertNull(plugin.getEnvironmentToRunCommand().get("PATH")); + + // test case 3, check mandatory set path, but binary doesn't exist so default + // path will be used. + fakeBinary.delete(); + plugin = new GpuDiscoverer(); + plugin.initialize(conf); + Assert.assertEquals(GpuDiscoverer.DEFAULT_BINARY_NAME, + plugin.getPathOfGpuBinary()); + Assert.assertTrue( + plugin.getEnvironmentToRunCommand().get("PATH").contains("nvidia")); + } + + @Test + public void testGpuDiscover() throws YarnException { + // Since this is more of a performance unit test, only run if + // RunUserLimitThroughput is set (-DRunUserLimitThroughput=true) + Assume.assumeTrue( + Boolean.valueOf(System.getProperty("runGpuDiscoverUnitTest"))); + Configuration conf = new Configuration(false); + GpuDiscoverer plugin = new GpuDiscoverer(); + plugin.initialize(conf); + GpuDeviceInformation info = plugin.getGpuDeviceInformation(); + + Assert.assertTrue(info.getGpus().size() > 0); + Assert.assertEquals(plugin.getMinorNumbersOfGpusUsableByYarn().size(), + info.getGpus().size()); + } + + @Test + public void getNumberOfUsableGpusFromConfig() throws YarnException { + Configuration conf = new Configuration(false); + conf.set(YarnConfiguration.NM_GPU_ALLOWED_DEVICES, "0,1,2,4"); + GpuDiscoverer plugin = new GpuDiscoverer(); + plugin.initialize(conf); + + List minorNumbers = plugin.getMinorNumbersOfGpusUsableByYarn(); + Assert.assertEquals(4, minorNumbers.size()); + + Assert.assertTrue(0 == minorNumbers.get(0)); + Assert.assertTrue(1 == minorNumbers.get(1)); + Assert.assertTrue(2 == minorNumbers.get(2)); + Assert.assertTrue(4 == minorNumbers.get(3)); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/f17a2e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/TestGpuDeviceInformationParser.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/TestGpuDeviceInformationParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/TestGpuDeviceInformationParser.java new file mode 100644 index 0000000..e22597d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/TestGpuDeviceInformationParser.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.junit.Assert; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; + +public class TestGpuDeviceInformationParser { + @Test + public void testParse() throws IOException, YarnException { + File f = new File("src/test/resources/nvidia-smi-sample-xml-output"); + String s = FileUtils.readFileToString(f, "UTF-8"); + + GpuDeviceInformationParser parser = new GpuDeviceInformationParser(); + + GpuDeviceInformation info = parser.parseXml(s); + Assert.assertEquals("375.66", info.getDriverVersion()); + Assert.assertEquals(2, info.getGpus().size()); + PerGpuDeviceInformation gpu1 = info.getGpus().get(1); + Assert.assertEquals("Tesla P100-PCIE-12GB", gpu1.getProductName()); + Assert.assertEquals(16384, gpu1.getGpuMemoryUsage().getTotalMemoryMiB()); + Assert.assertEquals(10.3f, + gpu1.getGpuUtilizations().getOverallGpuUtilization(), 1e-6); + Assert.assertEquals(34f, gpu1.getTemperature().getCurrentGpuTemp(), 1e-6); + Assert.assertEquals(85f, gpu1.getTemperature().getMaxGpuTemp(), 1e-6); + Assert.assertEquals(82f, gpu1.getTemperature().getSlowThresholdGpuTemp(), + 1e-6); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/f17a2e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/nvidia-smi-sample-xml-output ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/nvidia-smi-sample-xml-output b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/nvidia-smi-sample-xml-output new file mode 100644 index 0000000..5ccb722 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/nvidia-smi-sample-xml-output @@ -0,0 +1,547 @@ + + + + + + + Wed Sep 6 21:52:51 2017 + 375.66 + 2 + + Tesla P100-PCIE-12GB + Tesla + Disabled + Disabled + Disabled + Disabled + 1920 + + N/A + N/A + + 0320717030197 + GPU-28604e81-21ec-cc48-6759-bf2648b22e16 + 0 + 86.00.3A.00.02 + No + 0x400 + 900-2H400-0110-030 + + H400.0202.00.01 + 1.1 + 4.1 + N/A + + + N/A + N/A + + + None + + + 04 + 00 + 0000 + 15F710DE + 0000:04:00.0 + 11DA10DE + + + 3 + 3 + + + 16x + 16x + + + + N/A + N/A + + 0 + 0 KB/s + 0 KB/s + + N/A + P0 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 12193 MiB + 0 MiB + 12193 MiB + + + 16384 MiB + 2 MiB + 16382 MiB + + Default + + 0 % + 0 % + 0 % + 0 % + + + 0 + 0 + 0 ms + + + Enabled + Enabled + + + + + 0 + 0 + N/A + 0 + 0 + 0 + 0 + + + 0 + 0 + N/A + 0 + 0 + 0 + 0 + + + + + 0 + 0 + N/A + 0 + 0 + 0 + 0 + + + 0 + 0 + N/A + 0 + 0 + 0 + 0 + + + + + + 0 + + + + + 0 + + + + No + + + 31 C + 85 C + 82 C + + + P0 + Supported + 24.84 W + 250.00 W + 250.00 W + 250.00 W + 125.00 W + 250.00 W + + + 405 MHz + 405 MHz + 715 MHz + 835 MHz + + + 1189 MHz + 715 MHz + + + 1189 MHz + 715 MHz + + + 1328 MHz + 1328 MHz + 715 MHz + 1328 MHz + + + N/A + N/A + + + + 715 MHz + 1328 MHz + 1316 MHz + 1303 MHz + 1290 MHz + 1278 MHz + 1265 MHz + 1252 MHz + 1240 MHz + 1227 MHz + 1215 MHz + 1202 MHz + 1189 MHz + 1177 MHz + 1164 MHz + 1151 MHz + 1139 MHz + 1126 MHz + 1113 MHz + 1101 MHz + 1088 MHz + 1075 MHz + 1063 MHz + 1050 MHz + 1037 MHz + 1025 MHz + 1012 MHz + 999 MHz + 987 MHz + 974 MHz + 961 MHz + 949 MHz + 936 MHz + 923 MHz + 911 MHz + 898 MHz + 885 MHz + 873 MHz + 860 MHz + 847 MHz + 835 MHz + 822 MHz + 810 MHz + 797 MHz + 784 MHz + 772 MHz + 759 MHz + 746 MHz + 734 MHz + 721 MHz + 708 MHz + 696 MHz + 683 MHz + 670 MHz + 658 MHz + 645 MHz + 632 MHz + 620 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + + + + + + + + + + Tesla P100-PCIE-12GB + Tesla + Disabled + Disabled + Disabled + Disabled + 1920 + + N/A + N/A + + 0320717031755 + GPU-46915a82-3fd2-8e11-ae26-a80b607c04f3 + 1 + 86.00.3A.00.02 + No + 0x8200 + 900-2H400-0110-030 + + H400.0202.00.01 + 1.1 + 4.1 + N/A + + + N/A + N/A + + + None + + + 82 + 00 + 0000 + 15F710DE + 0000:82:00.0 + 11DA10DE + + + 3 + 3 + + + 16x + 16x + + + + N/A + N/A + + 0 + 0 KB/s + 0 KB/s + + N/A + P0 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 12193 MiB + 0 MiB + 12193 MiB + + + 16384 MiB + 2 MiB + 16382 MiB + + Default + + 10.3 % + 0 % + 0 % + 0 % + + + 0 + 0 + 0 ms + + + Enabled + Enabled + + + + + 0 + 0 + N/A + 0 + 0 + 0 + 0 + + + 0 + 0 + N/A + 0 + 0 + 0 + 0 + + + + + 0 + 0 + N/A + 0 + 0 + 0 + 0 + + + 0 + 0 + N/A + 0 + 0 + 0 + 0 + + + + + + 0 + + + + + 0 + + + + No + + + 34 C + 85 C + 82 C + + + P0 + Supported + 25.54 W + 250.00 W + 250.00 W + 250.00 W + 125.00 W + 250.00 W + + + 405 MHz + 405 MHz + 715 MHz + 835 MHz + + + 1189 MHz + 715 MHz + + + 1189 MHz + 715 MHz + + + 1328 MHz + 1328 MHz + 715 MHz + 1328 MHz + + + N/A + N/A + + + + 715 MHz + 1328 MHz + 1316 MHz + 1303 MHz + 1290 MHz + 1278 MHz + 1265 MHz + 1252 MHz + 1240 MHz + 1227 MHz + 1215 MHz + 1202 MHz + 1189 MHz + 1177 MHz + 1164 MHz + 1151 MHz + 1139 MHz + 1126 MHz + 1113 MHz + 1101 MHz + 1088 MHz + 1075 MHz + 1063 MHz + 1050 MHz + 1037 MHz + 1025 MHz + 1012 MHz + 999 MHz + 987 MHz + 974 MHz + 961 MHz + 949 MHz + 936 MHz + 923 MHz + 911 MHz + 898 MHz + 885 MHz + 873 MHz + 860 MHz + 847 MHz + 835 MHz + 822 MHz + 810 MHz + 797 MHz + 784 MHz + 772 MHz + 759 MHz + 746 MHz + 734 MHz + 721 MHz + 708 MHz + 696 MHz + 683 MHz + 670 MHz + 658 MHz + 645 MHz + 632 MHz + 620 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + + + + + + + + + \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/f17a2e47/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java index bff8612..f3b15ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -497,7 +497,7 @@ public class AppInfo { public Map getPreemptedResourceSecondsMap() { return preemptedResourceSecondsMap; } - + public List getResourceRequests() { return this.resourceRequests; } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org