Return-Path: X-Original-To: apmail-hadoop-mapreduce-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-mapreduce-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 45B6F89BA for ; Thu, 8 Sep 2011 01:40:17 +0000 (UTC) Received: (qmail 6282 invoked by uid 500); 8 Sep 2011 01:40:17 -0000 Delivered-To: apmail-hadoop-mapreduce-commits-archive@hadoop.apache.org Received: (qmail 6228 invoked by uid 500); 8 Sep 2011 01:40:17 -0000 Mailing-List: contact mapreduce-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: mapreduce-dev@hadoop.apache.org Delivered-To: mailing list mapreduce-commits@hadoop.apache.org Received: (qmail 6220 invoked by uid 99); 8 Sep 2011 01:40:17 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 08 Sep 2011 01:40:17 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 08 Sep 2011 01:40:06 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id A001B2388B71; Thu, 8 Sep 2011 01:39:32 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1166495 [4/6] - in /hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ conf/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/mai... Date: Thu, 08 Sep 2011 01:39:23 -0000 To: mapreduce-commits@hadoop.apache.org From: todd@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20110908013932.A001B2388B71@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java Thu Sep 8 01:39:07 2011 @@ -22,68 +22,319 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.Map; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

ApplicationSubmissionContext represents the all of the + * information needed by the ResourceManager to launch + * the ApplicationMaster for an application.

+ * + *

It includes details such as: + *

    + *
  • {@link ApplicationId} of the application.
  • + *
  • + * {@link Resource} necessary to run the ApplicationMaster. + *
  • + *
  • Application user.
  • + *
  • Application name.
  • + *
  • {@link Priority} of the application.
  • + *
  • Security tokens (if security is enabled).
  • + *
  • + * {@link LocalResource} necessary for running the + * ApplicationMaster container such + * as binaries, jar, shared-objects, side-files etc. + *
  • + *
  • + * Environment variables for the launched ApplicationMaster + * process. + *
  • + *
  • Command to launch the ApplicationMaster.
  • + *
+ *

+ * + * @see ClientRMProtocol#submitApplication(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) + */ +@Public +@Stable public interface ApplicationSubmissionContext { - public abstract ApplicationId getApplicationId(); - public abstract String getApplicationName(); - public abstract Resource getMasterCapability(); - - public abstract Map getAllResources(); - public abstract URL getResource(String key); - - public abstract Map getAllResourcesTodo(); - public abstract LocalResource getResourceTodo(String key); - - public abstract List getFsTokenList(); - public abstract String getFsToken(int index); - public abstract int getFsTokenCount(); - - public abstract ByteBuffer getFsTokensTodo(); - - public abstract Map getAllEnvironment(); - public abstract String getEnvironment(String key); - - public abstract List getCommandList(); - public abstract String getCommand(int index); - public abstract int getCommandCount(); - - public abstract String getQueue(); - public abstract Priority getPriority(); - public abstract String getUser(); + /** + * Get the ApplicationId of the submitted application. + * @return ApplicationId of the submitted application + */ + @Public + @Stable + public ApplicationId getApplicationId(); + + /** + * Set the ApplicationId of the submitted application. + * @param appplicationId ApplicationId of the submitted + * application + */ + @Public + @Stable + public void setApplicationId(ApplicationId appplicationId); - - - public abstract void setApplicationId(ApplicationId appplicationId); - public abstract void setApplicationName(String applicationName); - public abstract void setMasterCapability(Resource masterCapability); - - public abstract void addAllResources(Map resources); - public abstract void setResource(String key, URL url); - public abstract void removeResource(String key); - public abstract void clearResources(); - - public abstract void addAllResourcesTodo(Map resourcesTodo); - public abstract void setResourceTodo(String key, LocalResource localResource); - public abstract void removeResourceTodo(String key); - public abstract void clearResourcesTodo(); - - public abstract void addAllFsTokens(List fsTokens); - public abstract void addFsToken(String fsToken); - public abstract void removeFsToken(int index); - public abstract void clearFsTokens(); - - public abstract void setFsTokensTodo(ByteBuffer fsTokensTodo); - - public abstract void addAllEnvironment(Map environment); - public abstract void setEnvironment(String key, String env); - public abstract void removeEnvironment(String key); - public abstract void clearEnvironment(); - - public abstract void addAllCommands(List commands); - public abstract void addCommand(String command); - public abstract void removeCommand(int index); - public abstract void clearCommands(); - - public abstract void setQueue(String queue); - public abstract void setPriority(Priority priority); - public abstract void setUser(String user); + /** + * Get the application name. + * @return application name + */ + @Public + @Stable + public String getApplicationName(); + + /** + * Set the application name. + * @param applicationName application name + */ + @Public + @Stable + public void setApplicationName(String applicationName); + + /** + * Get the queue to which the application is being submitted. + * @return queue to which the application is being submitted + */ + @Public + @Stable + public String getQueue(); + + /** + * Set the queue to which the application is being submitted + * @param queue queue to which the application is being submitted + */ + @Public + @Stable + public void setQueue(String queue); + + /** + * Get the Priority of the application. + * @return Priority of the application + */ + @Public + @Stable + public Priority getPriority(); + + /** + * Set the Priority of the application. + * @param priority Priority of the application + */ + @Public + @Stable + public void setPriority(Priority priority); + + /** + * Get the user submitting the application. + * @return user submitting the application + */ + @Public + @Stable + public String getUser(); + + /** + * Set the user submitting the application. + * @param user user submitting the application + */ + @Public + @Stable + public void setUser(String user); + + /** + * Get the Resource required to run the + * ApplicationMaster. + * @return Resource required to run the + * ApplicationMaster + */ + @Public + @Stable + public Resource getMasterCapability(); + + /** + * Set Resource required to run the + * ApplicationMaster. + * @param masterCapability Resource required to run the + * ApplicationMaster + */ + @Public + @Stable + public void setMasterCapability(Resource masterCapability); + + @Private + @Unstable + public Map getAllResources(); + + @Private + @Unstable + public URL getResource(String key); + + @Private + @Unstable + public void addAllResources(Map resources); + + @Private + @Unstable + public void setResource(String key, URL url); + + @Private + @Unstable + public void removeResource(String key); + + @Private + @Unstable + public void clearResources(); + + /** + * Get all the LocalResource required to run the + * ApplicationMaster. + * @return LocalResource required to run the + * ApplicationMaster + */ + @Public + @Stable + public Map getAllResourcesTodo(); + + @Private + @Unstable + public LocalResource getResourceTodo(String key); + + /** + * Add all the LocalResource required to run the + * ApplicationMaster. + * @param resources all LocalResource required to run the + * ApplicationMaster + */ + @Public + @Stable + public void addAllResourcesTodo(Map resources); + + @Private + @Unstable + public void setResourceTodo(String key, LocalResource localResource); + + @Private + @Unstable + public void removeResourceTodo(String key); + + @Private + @Unstable + public void clearResourcesTodo(); + + @Private + @Unstable + public List getFsTokenList(); + + @Private + @Unstable + public String getFsToken(int index); + + @Private + @Unstable + public int getFsTokenCount(); + + @Private + @Unstable + public void addAllFsTokens(List fsTokens); + + @Private + @Unstable + public void addFsToken(String fsToken); + + @Private + @Unstable + public void removeFsToken(int index); + + @Private + @Unstable + public void clearFsTokens(); + + /** + * Get file-system tokens for the ApplicationMaster. + * @return file-system tokens for the ApplicationMaster + */ + @Public + @Stable + public ByteBuffer getFsTokensTodo(); + + /** + * Set file-system tokens for the ApplicationMaster. + * @param fsTokens file-system tokens for the ApplicationMaster + */ + @Public + @Stable + public void setFsTokensTodo(ByteBuffer fsTokens); + + /** + * Get the environment variables for the + * ApplicationMaster. + * @return environment variables for the ApplicationMaster + */ + @Public + @Stable + public Map getAllEnvironment(); + + @Private + @Unstable + public String getEnvironment(String key); + + /** + * Add all of the environment variables for the + * ApplicationMaster. + * @param environment environment variables for the + * ApplicationMaster + */ + @Public + @Stable + public void addAllEnvironment(Map environment); + + @Private + @Unstable + public void setEnvironment(String key, String env); + + @Private + @Unstable + public void removeEnvironment(String key); + + @Private + @Unstable + public void clearEnvironment(); + + /** + * Get the commands to launch the ApplicationMaster. + * @return commands to launch the ApplicationMaster + */ + @Public + @Stable + public List getCommandList(); + + @Private + @Unstable + public String getCommand(int index); + + @Private + @Unstable + public int getCommandCount(); + + /** + * Add all of the commands to launch the + * ApplicationMaster. + * @param commands commands to launch the ApplicationMaster + */ + @Public + @Stable + public void addAllCommands(List commands); + + @Private + @Unstable + public void addCommand(String command); + + @Private + @Unstable + public void removeCommand(int index); + + @Private + @Unstable + public void clearCommands(); } \ No newline at end of file Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java Thu Sep 8 01:39:07 2011 @@ -22,50 +22,233 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.Map; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

ContainerLaunchContext represents the all of the information + * needed by the NodeManager to launch a container.

+ * + *

It includes details such as: + *

    + *
  • {@link ContainerId} of the container.
  • + *
  • {@link Resource} allocated to the container.
  • + *
  • User to whom the container is allocated.
  • + *
  • Security tokens (if security is enabled).
  • + *
  • + * {@link LocalResource} necessary for running the container such + * as binaries, jar, shared-objects, side-files etc. + *
  • + *
  • Optional, application-specific binary service data.
  • + *
  • Environment variables for the launched process.
  • + *
  • Command to launch the container.
  • + *
+ *

+ * + * @see ContainerManager#startContainer(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) + */ +@Public +@Stable public interface ContainerLaunchContext { + /** + * Get ContainerId of container to be launched. + * @return ContainerId of container to be launched + */ + @Public + @Stable ContainerId getContainerId(); + + /** + * Set ContainerId of container to be launched. + * @param containerId et ContainerId of container to be launched + */ + @Public + @Stable + void setContainerId(ContainerId containerId); + + /** + * Get the user to whom the container has been allocated. + * @return the user to whom the container has been allocated + */ + @Public + @Stable String getUser(); - Resource getResource(); - - Map getAllLocalResources(); - LocalResource getLocalResource(String key); - - ByteBuffer getContainerTokens(); - - Map getAllServiceData(); - ByteBuffer getServiceData(String key); - - Map getAllEnv(); - String getEnv(String key); - - List getCommandList(); - String getCommand(int index); - int getCommandCount(); - - void setContainerId(ContainerId containerId); + /** + * Set the user to whom the container has been allocated + * @param user user to whom the container has been allocated + */ + @Public + @Stable void setUser(String user); + + /** + * Get the Resource allocated to the container by the + * ResourceManager. + * @return Resource allocated to the container by the + * ResourceManager + */ + @Public + @Stable + Resource getResource(); + + /** + * Set the Resource allocated to the container by the + * ResourceManager. + * @param resource allocated resource + */ + @Public + @Stable void setResource(Resource resource); + + /** + * Get security tokens (if security is enabled). + * @return security tokens (if security is enabled) + */ + @Public + @Stable + ByteBuffer getContainerTokens(); + + /** + * Set security tokens (if security is enabled). + * @param containerToken security tokens + */ + @Public + @Stable + void setContainerTokens(ByteBuffer containerToken); + + /** + * Get all LocalResource required by the container. + * @return all LocalResource required by the container + */ + @Public + @Stable + Map getAllLocalResources(); + @Private + @Unstable + LocalResource getLocalResource(String key); + + /** + * Add all LocalResource required by the container. + * @param localResources LocalResource required by the container + */ + @Public + @Stable void addAllLocalResources(Map localResources); + + @Private + @Unstable void setLocalResource(String key, LocalResource value); + + @Private + @Unstable void removeLocalResource(String key); + + @Private + @Unstable void clearLocalResources(); + + /** + * Get application-specific binary service data. + * @return application-specific binary service data + */ + @Public + @Stable + Map getAllServiceData(); - void setContainerTokens(ByteBuffer containerToken); - + @Private + @Unstable + ByteBuffer getServiceData(String key); + + /** + * Add add application-specific binary service data. + * @param serviceData application-specific binary service data + */ + @Public + @Stable void addAllServiceData(Map serviceData); + + @Private + @Unstable void setServiceData(String key, ByteBuffer value); + + @Private + @Unstable void removeServiceData(String key); + + @Private + @Unstable void clearServiceData(); + + /** + * Get environment variables for the launched container. + * @return environment variables for the launched container + */ + @Public + @Stable + Map getAllEnv(); + + @Private + @Unstable + String getEnv(String key); + /** + * Add environment variables for the launched container. + * @param env environment variables for the launched container + */ + @Public + @Stable void addAllEnv(Map env); + + @Private + @Unstable void setEnv(String key, String value); + + @Private + @Unstable void removeEnv(String key); + + @Private + @Unstable void clearEnv(); + + /** + * Get the list of commands for launching the container. + * @return the list of commands for launching the container + */ + @Public + @Stable + List getCommandList(); + @Private + @Unstable + String getCommand(int index); + + @Private + @Unstable + int getCommandCount(); + + /** + * Add the list of commands for launching the container. + * @param commands the list of commands for launching the container + */ + @Public + @Stable void addAllCommands(List commands); + + @Private + @Unstable void addCommand(String command); + + @Private + @Unstable void removeCommand(int index); + + @Private + @Unstable void clearCommands(); } Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java Thu Sep 8 01:39:07 2011 @@ -18,16 +18,92 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

LocalResource represents a local resource required to + * run a container.

+ * + *

The NodeManager is responsible for localizing the resource + * prior to launching the container.

+ * + *

Applications can specify {@link LocalResourceType} and + * {@link LocalResourceVisibility}.

+ * + * @see LocalResourceType + * @see LocalResourceVisibility + * @see ContainerLaunchContext + * @see ApplicationSubmissionContext + * @see ContainerManager#startContainer(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) + */ +@Public +@Stable public interface LocalResource { - public abstract URL getResource(); - public abstract long getSize(); - public abstract long getTimestamp(); - public abstract LocalResourceType getType(); - public abstract LocalResourceVisibility getVisibility(); - - public abstract void setResource(URL resource); - public abstract void setSize(long size); - public abstract void setTimestamp(long timestamp); - public abstract void setType(LocalResourceType type); - public abstract void setVisibility(LocalResourceVisibility visibility); + /** + * Get the location of the resource to be localized. + * @return location of the resource to be localized + */ + public URL getResource(); + + /** + * Set location of the resource to be localized. + * @param resource location of the resource to be localized + */ + public void setResource(URL resource); + + /** + * Get the size of the resource to be localized. + * @return size of the resource to be localized + */ + public long getSize(); + + /** + * Set the size of the resource to be localized. + * @param size size of the resource to be localized + */ + public void setSize(long size); + + /** + * Get the original timestamp of the resource to be localized, used + * for verification. + * @return timestamp of the resource to be localized + */ + public long getTimestamp(); + + /** + * Set the timestamp of the resource to be localized, used + * for verification. + * @param timestamp timestamp of the resource to be localized + */ + public void setTimestamp(long timestamp); + + /** + * Get the LocalResourceType of the resource to be localized. + * @return LocalResourceType of the resource to be localized + */ + public LocalResourceType getType(); + + /** + * Set the LocalResourceType of the resource to be localized. + * @param type LocalResourceType of the resource to be localized + */ + public void setType(LocalResourceType type); + + /** + * Get the LocalResourceVisibility of the resource to be + * localized. + * @return LocalResourceVisibility of the resource to be + * localized + */ + public LocalResourceVisibility getVisibility(); + + /** + * Set the LocalResourceVisibility of the resource to be + * localized. + * @param visibility LocalResourceVisibility of the resource to be + * localized + */ + public void setVisibility(LocalResourceVisibility visibility); } Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java Thu Sep 8 01:39:07 2011 @@ -18,6 +18,42 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

LocalResourceType specifies the type + * of a resource localized by the NodeManager.

+ * + *

The type can be one of: + *

    + *
  • + * {@link #FILE} - Regular file i.e. uninterpreted bytes. + *
  • + *
  • + * {@link #ARCHIVE} - Archive, which is automatically unarchived by the + * NodeManager. + *
  • + *
+ *

+ * + * @see LocalResource + * @see ContainerLaunchContext + * @see ApplicationSubmissionContext + * @see ContainerManager#startContainer(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) + */ +@Public +@Stable public enum LocalResourceType { - ARCHIVE, FILE + + /** + * Archive, which is automatically unarchived by the NodeManager. + */ + ARCHIVE, + + /** + * Regular file i.e. uninterpreted bytes. + */ + FILE } Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java Thu Sep 8 01:39:07 2011 @@ -18,6 +18,48 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

LocalResourceVisibility specifies the visibility + * of a resource localized by the NodeManager.

+ * + *

The visibility can be one of: + *

    + *
  • {@link #PUBLIC} - Shared by all users on the node.
  • + *
  • + * {@link #PRIVATE} - Shared among all applications of the + * same user on the node. + *
  • + *
  • + * {@link #APPLICATION} - Shared only among containers of the + * same application on the node. + *
  • + *
+ *

+ * + * @see LocalResource + * @see ContainerLaunchContext + * @see ApplicationSubmissionContext + * @see ContainerManager#startContainer(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) + */ +@Public +@Stable public enum LocalResourceVisibility { - PUBLIC, PRIVATE, APPLICATION + /** + * Shared by all users on the node. + */ + PUBLIC, + + /** + * Shared among all applications of the same user on the node. + */ + PRIVATE, + + /** + * Shared only among containers of the same application on the node. + */ + APPLICATION } Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeHealthStatus.java Thu Sep 8 01:39:07 2011 @@ -17,17 +17,69 @@ */ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

NodeHealthStatus is a summary of the health status of the + * node.

+ * + *

It includes information such as: + *

    + *
  • + * An indicator of whether the node is healthy, as determined by the + * health-check script. + *
  • + *
  • The previous time at which the health status was reported.
  • + *
  • A diagnostic report on the health status.
  • + *
  • + *
  • + *
+ *

+ * + * @see NodeReport + * @see ClientRMProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest) + */ +@Public +@Stable public interface NodeHealthStatus { + /** + * Is the node healthy? + * @return true if the node is healthy, else false + */ + @Public + @Stable boolean getIsNodeHealthy(); - String getHealthReport(); - - long getLastHealthReportTime(); - + @Private + @Unstable void setIsNodeHealthy(boolean isNodeHealthy); + /** + * Get the diagnostic health report of the node. + * @return diagnostic health report of the node + */ + @Public + @Stable + String getHealthReport(); + + @Private + @Unstable void setHealthReport(String healthReport); + /** + * Get the last timestamp at which the health report was received. + * @return last timestamp at which the health report was received + */ + @Public + @Stable + long getLastHealthReportTime(); + + @Private + @Unstable void setLastHealthReportTime(long lastHealthReport); } \ No newline at end of file Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java Thu Sep 8 01:39:07 2011 @@ -18,19 +18,113 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

NodeReport is a summary of runtime information of a + * node in the cluster.

+ * + *

It includes details such as: + *

    + *
  • {@link NodeId} of the node.
  • + *
  • HTTP Tracking URL of the node.
  • + *
  • Rack name for the node.
  • + *
  • Used {@link Resource} on the node.
  • + *
  • Total available {@link Resource} of the node.
  • + *
  • Number of running containers on the node.
  • + *
  • {@link NodeHealthStatus} of the node.
  • + *
+ *

+ * + * @see NodeHealthStatus + * @see ClientRMProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest) + */ +@Public +@Stable public interface NodeReport { + /** + * Get the NodeId of the node. + * @return NodeId of the node + */ NodeId getNodeId(); + + @Private + @Unstable void setNodeId(NodeId nodeId); + + /** + * Get the http address of the node. + * @return http address of the node + */ + @Public + @Stable String getHttpAddress(); + + @Private + @Unstable void setHttpAddress(String httpAddress); + + /** + * Get the rack name for the node. + * @return rack name for the node + */ + @Public + @Stable String getRackName(); + + @Private + @Unstable void setRackName(String rackName); + + /** + * Get used Resource on the node. + * @return used Resource on the node + */ + @Public + @Stable Resource getUsed(); + + @Private + @Unstable void setUsed(Resource used); + + /** + * Get the total Resource on the node. + * @return total Resource on the node + */ + @Public + @Stable Resource getCapability(); + + @Private + @Unstable void setCapability(Resource capability); + + /** + * Get the number of running containers on the node. + * @return number of running containers on the node + */ + @Public + @Stable int getNumContainers(); + + @Private + @Unstable void setNumContainers(int numContainers); + + /** + * Get the NodeHealthStatus of the node. + * @return NodeHealthStatus of the node + */ + @Public + @Stable NodeHealthStatus getNodeHealthStatus(); + + @Private + @Unstable void setNodeHealthStatus(NodeHealthStatus nodeHealthStatus); } Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java Thu Sep 8 01:39:07 2011 @@ -18,8 +18,39 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

QueueACL enumerates the various ACLs for queues.

+ * + *

The ACLs are one of: + *

    + *
  • {@link #SUBMIT_JOB} - ACL to submit jobs to the queue.
  • + *
  • {@link #ADMINISTER_QUEUE} - ACL to administer the queue.
  • + *
  • {@link #ADMINISTER_JOBS} - ACL to administer jobs in the queue.
  • + *
+ *

+ * + * @see QueueInfo + * @see ClientRMProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest) + */ +@Public +@Stable public enum QueueACL { + /** + * ACL to submit jobs to the queue. + */ SUBMIT_JOB, + + /** + * ACL to administer the queue. + */ ADMINISTER_QUEUE, + + /** + * ACL to administer jobs in the queue. + */ ADMINISTER_JOBS; // currently unused } \ No newline at end of file Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java Thu Sep 8 01:39:07 2011 @@ -20,25 +20,114 @@ package org.apache.hadoop.yarn.api.recor import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

QueueInfo

is a report of the runtime information of the queue.

+ * + *

It includes information such as: + *

    + *
  • Queue name.
  • + *
  • Capacity of the queue.
  • + *
  • Maximum capacity of the queue.
  • + *
  • Current capacity of the queue.
  • + *
  • Child queues.
  • + *
  • Running applications.
  • + *
  • {@link QueueState} of the queue.
  • + *
+ *

+ * + * @see QueueState + * @see ClientRMProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest) + */ +@Public +@Stable public interface QueueInfo { + /** + * Get the name of the queue. + * @return name of the queue + */ + @Public + @Stable String getQueueName(); + + @Private + @Unstable void setQueueName(String queueName); + /** + * Get the configured capacity of the queue. + * @return configured capacity of the queue + */ + @Public + @Stable float getCapacity(); + + @Private + @Unstable void setCapacity(float capacity); + /** + * Get the maximum capacity of the queue. + * @return maximum capacity of the queue + */ + @Public + @Stable float getMaximumCapacity(); + + @Private + @Unstable void setMaximumCapacity(float maximumCapacity); + /** + * Get the current capacity of the queue. + * @return current capacity of the queue + */ + @Public + @Stable float getCurrentCapacity(); + + @Private + @Unstable void setCurrentCapacity(float currentCapacity); + /** + * Get the child queues of the queue. + * @return child queues of the queue + */ + @Public + @Stable List getChildQueues(); + + @Private + @Unstable void setChildQueues(List childQueues); + /** + * Get the running applications of the queue. + * @return running applications of the queue + */ + @Public + @Stable List getApplications(); + + @Private + @Unstable void setApplications(List applications); + /** + * Get the QueueState of the queue. + * @return QueueState of the queue + */ + @Public + @Stable QueueState getQueueState(); + + @Private + @Unstable void setQueueState(QueueState queueState); } Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java Thu Sep 8 01:39:07 2011 @@ -18,10 +18,33 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + /** - * State of a Queue + *

State of a Queue.

+ * + *

A queue is one of: + *

    + *
  • {@link #RUNNING} - normal state.
  • + *
  • {@link #STOPPED} - not accepting new application submissions. + *
+ *

+ * + * @see QueueInfo + * @see ClientRMProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest) */ +@Public +@Stable public enum QueueState { - STOPPED, + /** + * Stopped - Not accepting submissions of new applications. + */ + STOPPED, + + /** + * Running - normal operation. + */ RUNNING } \ No newline at end of file Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java Thu Sep 8 01:39:07 2011 @@ -20,10 +20,43 @@ package org.apache.hadoop.yarn.api.recor import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ClientRMProtocol; + +/** + *

QueueUserACLInfo provides information {@link QueueACL} for + * the given user.

+ * + * @see QueueACL + * @see ClientRMProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest) + */ +@Public +@Stable public interface QueueUserACLInfo { + /** + * Get the queue name of the queue. + * @return queue name of the queue + */ + @Public + @Stable String getQueueName(); - void setQueueName(String queueName); + @Private + @Unstable + void setQueueName(String queueName); + + /** + * Get the list of QueueACL for the given user. + * @return list of QueueACL for the given user + */ + @Public + @Stable List getUserAcls(); + + @Private + @Unstable void setUserAcls(List acls); } Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java Thu Sep 8 01:39:07 2011 @@ -29,8 +29,6 @@ import org.apache.hadoop.yarn.proto.Yarn import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProtoOrBuilder; -import org.mortbay.log.Log; - public class ContainerIdPBImpl extends ProtoBase implements ContainerId { Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto Thu Sep 8 01:39:07 2011 @@ -143,6 +143,7 @@ message StartContainerRequestProto { } message StartContainerResponseProto { + repeated StringBytesMapProto service_response = 1; } message StopContainerRequestProto { Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java Thu Sep 8 01:39:07 2011 @@ -317,15 +317,15 @@ public class ProtoOverHadoopRpcEngine im } @Override - public Writable call(Class protocol, Writable writableRequest, + public Writable call(String protocol, Writable writableRequest, long receiveTime) throws IOException { ProtoSpecificRequestWritable request = (ProtoSpecificRequestWritable) writableRequest; ProtoSpecificRpcRequest rpcRequest = request.message; String methodName = rpcRequest.getMethodName(); - System.out.println("Call: protocol=" + protocol.getCanonicalName() + ", method=" + System.out.println("Call: protocol=" + protocol + ", method=" + methodName); if (verbose) - log("Call: protocol=" + protocol.getCanonicalName() + ", method=" + log("Call: protocol=" + protocol + ", method=" + methodName); MethodDescriptor methodDescriptor = service.getDescriptorForType() .findMethodByName(methodName); Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java Thu Sep 8 01:39:07 2011 @@ -50,7 +50,7 @@ public class ConverterUtils { * * @param url * url to convert - * @return + * @return path from {@link URL} * @throws URISyntaxException */ public static Path getPathFromYarnURL(URL url) throws URISyntaxException { @@ -63,8 +63,8 @@ public class ConverterUtils { /** * change from CharSequence to string for map key and value - * @param env - * @return + * @param env map for converting + * @return string,string map */ public static Map convertToString( Map env) { Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java Thu Sep 8 01:39:07 2011 @@ -221,8 +221,7 @@ public class ProcfsBasedProcessTree { } /** Verify that the given process id is same as its process group id. - * @param pidStr Process id of the to-be-verified-process - * @param procfsDir Procfs root dir + * @return true if the process id matches else return false. */ public boolean checkPidPgrpidForMatch() { return checkPidPgrpidForMatch(pid, PROCFS); Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java Thu Sep 8 01:39:07 2011 @@ -62,7 +62,7 @@ public class RackResolver { * right resolver implementation. * @param conf * @param hostName - * @return + * @return node {@link Node} after resolving the hostname */ public static Node resolve(Configuration conf, String hostName) { init(conf); @@ -74,7 +74,7 @@ public class RackResolver { * network topology. This method doesn't initialize the class. * Call {@link #init(Configuration)} explicitly. * @param hostName - * @return + * @return node {@link Node} after resolving the hostname */ public static Node resolve(String hostName) { if (!initCalled) { Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java Thu Sep 8 01:39:07 2011 @@ -77,11 +77,18 @@ public abstract class ContainerExecutor List localDirs) throws IOException, InterruptedException; + /** * Launch the container on the node. This is a blocking call and returns only * when the container exits. - * - * @param launchCtxt + * @param container the container to be launched + * @param nmPrivateContainerScriptPath the path for launch script + * @param nmPrivateTokensPath the path for tokens for the container + * @param user the user of the container + * @param appId the appId of the container + * @param containerWorkDir the work dir for the container + * @return the return status of the launch + * @throws IOException */ public abstract int launchContainer(Container container, Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath, Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java Thu Sep 8 01:39:07 2011 @@ -60,11 +60,14 @@ public class DeletionService extends Abs this.exec = exec; this.debugDelay = 0; } - + + /** + * /** * Delete the path(s) as this user. * @param user The user to delete as, or the JVM user if null - * @param p Paths to delete + * @param subDir the sub directory name + * @param baseDirs the base directories which contains the subDir's */ public void delete(String user, Path subDir, Path... baseDirs) { // TODO if parent owned by NM, rename within parent inline Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java Thu Sep 8 01:39:07 2011 @@ -26,11 +26,14 @@ import java.util.concurrent.ConcurrentHa import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.NodeHealthCheckerService; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -49,6 +52,7 @@ import org.apache.hadoop.yarn.service.Co import org.apache.hadoop.yarn.service.Service; public class NodeManager extends CompositeService { + private static final Log LOG = LogFactory.getLog(NodeManager.class); protected final NodeManagerMetrics metrics = NodeManagerMetrics.create(); public NodeManager() { @@ -185,6 +189,7 @@ public class NodeManager extends Composi } public static void main(String[] args) { + StringUtils.startupShutdownMessage(NodeManager.class, args, LOG); NodeManager nodeManager = new NodeManager(); YarnConfiguration conf = new YarnConfiguration(); nodeManager.init(conf); Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java Thu Sep 8 01:39:07 2011 @@ -44,11 +44,14 @@ public class AuxServices extends Abstrac public static final String AUX_SERVICE_CLASS_FMT = "nodemanager.aux.service.%s.class"; public final Map serviceMap; + public final Map serviceMeta; public AuxServices() { super(AuxServices.class.getName()); serviceMap = Collections.synchronizedMap(new HashMap()); + serviceMeta = + Collections.synchronizedMap(new HashMap()); // Obtain services from configuration in init() } @@ -63,6 +66,15 @@ public class AuxServices extends Abstrac return Collections.unmodifiableCollection(serviceMap.values()); } + /** + * @return the meta data for all registered services, that have been started. + * If a service has not been started no metadata will be available. The key + * the the name of the service as defined in the configuration. + */ + public Map getMeta() { + return Collections.unmodifiableMap(serviceMeta); + } + @Override public void init(Configuration conf) { Collection auxNames = conf.getStringCollection(AUX_SERVICES); @@ -75,7 +87,15 @@ public class AuxServices extends Abstrac throw new RuntimeException("No class defiend for " + sName); } AuxiliaryService s = ReflectionUtils.newInstance(sClass, conf); - // TODO better use use s.getName()? + // TODO better use s.getName()? + if(!sName.equals(s.getName())) { + LOG.warn("The Auxilurary Service named '"+sName+"' in the " + +"configuration is for class "+sClass+" which has " + +"a name of '"+s.getName()+"'. Because these are " + +"not the same tools trying to send ServiceData and read " + +"Service Meta Data may have issues unless the refer to " + +"the name in the config."); + } addService(sName, s); s.init(conf); } catch (RuntimeException e) { @@ -90,9 +110,15 @@ public class AuxServices extends Abstrac public void start() { // TODO fork(?) services running as configured user // monitor for health, shutdown/restart(?) if any should die - for (Service service : serviceMap.values()) { + for (Map.Entry entry : serviceMap.entrySet()) { + AuxiliaryService service = entry.getValue(); + String name = entry.getKey(); service.start(); service.register(this); + ByteBuffer meta = service.getMeta(); + if(meta != null) { + serviceMeta.put(name, meta); + } } super.start(); } @@ -108,6 +134,7 @@ public class AuxServices extends Abstrac } } serviceMap.clear(); + serviceMeta.clear(); } } finally { super.stop(); @@ -146,6 +173,15 @@ public class AuxServices extends Abstrac public interface AuxiliaryService extends Service { void initApp(String user, ApplicationId appId, ByteBuffer data); void stopApp(ApplicationId appId); + /** + * Retreive metadata for this service. This is likely going to be contact + * information so that applications can access the service remotely. Ideally + * each service should provide a method to parse out the information to a usable + * class. This will only be called after the services start method has finished. + * the result may be cached. + * @return metadata for this service that should be made avaiable to applications. + */ + ByteBuffer getMeta(); } } Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java Thu Sep 8 01:39:07 2011 @@ -65,6 +65,8 @@ import org.apache.hadoop.yarn.server.nod import org.apache.hadoop.yarn.server.nodemanager.ContainerManagerEvent; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.nodemanager.NMConfig; import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; @@ -266,6 +268,10 @@ public class ContainerManagerImpl extend ContainerId containerID = launchContext.getContainerId(); ApplicationId applicationID = containerID.getAppId(); if (context.getContainers().putIfAbsent(containerID, container) != null) { + NMAuditLogger.logFailure(launchContext.getUser(), + AuditConstants.START_CONTAINER, "ContainerManagerImpl", + "Container already running on this node!", + applicationID, containerID); throw RPCUtil.getRemoteException("Container " + containerID + " already is running on this node!!"); } @@ -281,8 +287,14 @@ public class ContainerManagerImpl extend // TODO: Validate the request dispatcher.getEventHandler().handle(new ApplicationInitEvent(container)); + + NMAuditLogger.logSuccess(launchContext.getUser(), + AuditConstants.START_CONTAINER, "ContainerManageImpl", + applicationID, containerID); + StartContainerResponse response = recordFactory.newRecordInstance(StartContainerResponse.class); + response.addAllServiceResponse(auxiluaryServices.getMeta()); metrics.launchedContainer(); metrics.allocateContainer(launchContext.getResource()); return response; @@ -299,12 +311,23 @@ public class ContainerManagerImpl extend Container container = this.context.getContainers().get(containerID); if (container == null) { LOG.warn("Trying to stop unknown container " + containerID); + NMAuditLogger.logFailure(container.getUser(), + AuditConstants.STOP_CONTAINER, "ContainerManagerImpl", + "Trying to stop unknown container!", + containerID.getAppId(), containerID); return response; // Return immediately. } dispatcher.getEventHandler().handle( new ContainerKillEvent(containerID, "Container killed by the ApplicationMaster.")); + // user logged here not ideal since just getting user from container but + // request doesn't have anything and should be coming from user of AM so + // should be the same or should be rejected by auth before here. + NMAuditLogger.logSuccess(container.getUser(), + AuditConstants.STOP_CONTAINER, "ContainerManageImpl", + containerID.getAppId(), containerID); + // TODO: Move this code to appropriate place once kill_container is // implemented. nodeStatusUpdater.sendOutofBandHeartBeat(); Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java Thu Sep 8 01:39:07 2011 @@ -42,6 +42,8 @@ import org.apache.hadoop.yarn.event.Disp import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger; +import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationContainerFinishedEvent; @@ -365,18 +367,28 @@ public class ContainerImpl implements Co case EXITED_WITH_SUCCESS: metrics.endRunningContainer(); metrics.completedContainer(); + NMAuditLogger.logSuccess(getUser(), + AuditConstants.FINISH_SUCCESS_CONTAINER, "ContainerImpl", + getContainerID().getAppId(), getContainerID()); break; case EXITED_WITH_FAILURE: metrics.endRunningContainer(); // fall through case LOCALIZATION_FAILED: metrics.failedContainer(); + NMAuditLogger.logFailure(getUser(), + AuditConstants.FINISH_FAILED_CONTAINER, "ContainerImpl", + "Container failed with state: " + getContainerState(), + getContainerID().getAppId(), getContainerID()); break; case CONTAINER_CLEANEDUP_AFTER_KILL: metrics.endRunningContainer(); // fall through case NEW: metrics.killedContainer(); + NMAuditLogger.logSuccess(getUser(), + AuditConstants.FINISH_KILLED_CONTAINER, "ContainerImpl", + getContainerID().getAppId(), getContainerID()); } metrics.releaseContainer(getLaunchContext().getResource()); Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java Thu Sep 8 01:39:07 2011 @@ -100,7 +100,8 @@ public class ContainerLaunch implements String appIdStr = app.toString(); Path containerLogDir = this.logDirsSelector.getLocalPathForWrite(appIdStr + Path.SEPARATOR - + containerIdStr, LocalDirAllocator.SIZE_UNKNOWN, this.conf); + + containerIdStr, LocalDirAllocator.SIZE_UNKNOWN, this.conf, + false); for (String str : command) { // TODO: Should we instead work via symlinks without this grammar? newCmds.add(str.replace(ApplicationConstants.LOG_DIR_EXPANSION_VAR, @@ -147,7 +148,7 @@ public class ContainerLaunch implements + Path.SEPARATOR + user + Path.SEPARATOR + ContainerLocalizer.APPCACHE + Path.SEPARATOR + appIdStr + Path.SEPARATOR + containerIdStr, - LocalDirAllocator.SIZE_UNKNOWN, this.conf); + LocalDirAllocator.SIZE_UNKNOWN, this.conf, false); try { // /////////// Write out the container-script in the nmPrivate space. String[] localDirs = Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java Thu Sep 8 01:39:07 2011 @@ -22,6 +22,7 @@ import org.junit.Test; import static org.junit.Assert.*; import java.nio.ByteBuffer; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -44,10 +45,16 @@ public class TestAuxServices { private final int expected_appId; private int remaining_init; private int remaining_stop; + private ByteBuffer meta = null; + LightService(String name, char idef, int expected_appId) { + this(name, idef, expected_appId, null); + } + LightService(String name, char idef, int expected_appId, ByteBuffer meta) { super(name); this.idef = idef; this.expected_appId = expected_appId; + this.meta = meta; } @Override public void init(Configuration conf) { @@ -71,14 +78,18 @@ public class TestAuxServices { public void stopApp(ApplicationId appId) { assertEquals(expected_appId, appId.getId()); } + @Override + public ByteBuffer getMeta() { + return meta; + } } static class ServiceA extends LightService { - public ServiceA() { super("A", 'A', 65); } + public ServiceA() { super("A", 'A', 65, ByteBuffer.wrap("A".getBytes())); } } static class ServiceB extends LightService { - public ServiceB() { super("B", 'B', 66); } + public ServiceB() { super("B", 'B', 66, ByteBuffer.wrap("B".getBytes())); } } @Test @@ -139,6 +150,44 @@ public class TestAuxServices { } } + + @Test + public void testAuxServicesMeta() { + Configuration conf = new Configuration(); + conf.setStrings(AuxServices.AUX_SERVICES, new String[] { "Asrv", "Bsrv" }); + conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, "Asrv"), + ServiceA.class, Service.class); + conf.setClass(String.format(AuxServices.AUX_SERVICE_CLASS_FMT, "Bsrv"), + ServiceB.class, Service.class); + final AuxServices aux = new AuxServices(); + aux.init(conf); + + int latch = 1; + for (Service s : aux.getServices()) { + assertEquals(INITED, s.getServiceState()); + if (s instanceof ServiceA) { latch *= 2; } + else if (s instanceof ServiceB) { latch *= 3; } + else fail("Unexpected service type " + s.getClass()); + } + assertEquals("Invalid mix of services", 6, latch); + aux.start(); + for (Service s : aux.getServices()) { + assertEquals(STARTED, s.getServiceState()); + } + + Map meta = aux.getMeta(); + assertEquals(2, meta.size()); + assertEquals("A", new String(meta.get("Asrv").array())); + assertEquals("B", new String(meta.get("Bsrv").array())); + + aux.stop(); + for (Service s : aux.getServices()) { + assertEquals(STOPPED, s.getServiceState()); + } + } + + + @Test public void testAuxUnexpectedStop() { Configuration conf = new Configuration(); Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java Thu Sep 8 01:39:07 2011 @@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.service.AbstractService; public class AdminService extends AbstractService implements RMAdminProtocol { @@ -113,40 +114,54 @@ public class AdminService extends Abstra super.stop(); } - private void checkAcls(String method) throws YarnRemoteException { + private UserGroupInformation checkAcls(String method) throws YarnRemoteException { + UserGroupInformation user; try { - UserGroupInformation user = UserGroupInformation.getCurrentUser(); - if (!adminAcl.isUserAllowed(user)) { - LOG.warn("User " + user.getShortUserName() + " doesn't have permission" + - " to call '" + method + "'"); - - throw RPCUtil.getRemoteException( - new AccessControlException("User " + user.getShortUserName() + - " doesn't have permission" + - " to call '" + method + "'") - ); - } - - LOG.info("RM Admin: " + method + " invoked by user " + - user.getShortUserName()); - + user = UserGroupInformation.getCurrentUser(); } catch (IOException ioe) { LOG.warn("Couldn't get current user", ioe); + + RMAuditLogger.logFailure("UNKNOWN", method, + adminAcl.toString(), "AdminService", + "Couldn't get current user"); throw RPCUtil.getRemoteException(ioe); } + + if (!adminAcl.isUserAllowed(user)) { + LOG.warn("User " + user.getShortUserName() + " doesn't have permission" + + " to call '" + method + "'"); + + RMAuditLogger.logFailure(user.getShortUserName(), method, + adminAcl.toString(), "AdminService", + AuditConstants.UNAUTHORIZED_USER); + + throw RPCUtil.getRemoteException( + new AccessControlException("User " + user.getShortUserName() + + " doesn't have permission" + + " to call '" + method + "'") + ); + } + LOG.info("RM Admin: " + method + " invoked by user " + + user.getShortUserName()); + + return user; } @Override public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) throws YarnRemoteException { - checkAcls("refreshQueues"); - + UserGroupInformation user = checkAcls("refreshQueues"); try { scheduler.reinitialize(conf, null, null); // ContainerTokenSecretManager can't // be 'refreshed' + RMAuditLogger.logSuccess(user.getShortUserName(), "refreshQueues", + "AdminService"); return recordFactory.newRecordInstance(RefreshQueuesResponse.class); } catch (IOException ioe) { LOG.info("Exception refreshing queues ", ioe); + RMAuditLogger.logFailure(user.getShortUserName(), "refreshQueues", + adminAcl.toString(), "AdminService", + "Exception refreshing queues"); throw RPCUtil.getRemoteException(ioe); } } @@ -154,12 +169,17 @@ public class AdminService extends Abstra @Override public RefreshNodesResponse refreshNodes(RefreshNodesRequest request) throws YarnRemoteException { - checkAcls("refreshNodes"); + UserGroupInformation user = checkAcls("refreshNodes"); try { this.nodesListManager.refreshNodes(); + RMAuditLogger.logSuccess(user.getShortUserName(), "refreshNodes", + "AdminService"); return recordFactory.newRecordInstance(RefreshNodesResponse.class); } catch (IOException ioe) { LOG.info("Exception refreshing nodes ", ioe); + RMAuditLogger.logFailure(user.getShortUserName(), "refreshNodes", + adminAcl.toString(), "AdminService", + "Exception refreshing nodes"); throw RPCUtil.getRemoteException(ioe); } } @@ -168,9 +188,11 @@ public class AdminService extends Abstra public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( RefreshSuperUserGroupsConfigurationRequest request) throws YarnRemoteException { - checkAcls("refreshSuperUserGroupsConfiguration"); + UserGroupInformation user = checkAcls("refreshSuperUserGroupsConfiguration"); ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration()); + RMAuditLogger.logSuccess(user.getShortUserName(), + "refreshSuperUserGroupsConfiguration", "AdminService"); return recordFactory.newRecordInstance( RefreshSuperUserGroupsConfigurationResponse.class); @@ -179,9 +201,11 @@ public class AdminService extends Abstra @Override public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings( RefreshUserToGroupsMappingsRequest request) throws YarnRemoteException { - checkAcls("refreshUserToGroupsMappings"); + UserGroupInformation user = checkAcls("refreshUserToGroupsMappings"); Groups.getUserToGroupsMappingService().refresh(); + RMAuditLogger.logSuccess(user.getShortUserName(), + "refreshUserToGroupsMappings", "AdminService"); return recordFactory.newRecordInstance( RefreshUserToGroupsMappingsResponse.class); @@ -190,12 +214,14 @@ public class AdminService extends Abstra @Override public RefreshAdminAclsResponse refreshAdminAcls( RefreshAdminAclsRequest request) throws YarnRemoteException { - checkAcls("refreshAdminAcls"); + UserGroupInformation user = checkAcls("refreshAdminAcls"); Configuration conf = new Configuration(); adminAcl = new AccessControlList( conf.get(RMConfig.RM_ADMIN_ACL, RMConfig.DEFAULT_RM_ADMIN_ACL)); + RMAuditLogger.logSuccess(user.getShortUserName(), "refreshAdminAcls", + "AdminService"); return recordFactory.newRecordInstance(RefreshAdminAclsResponse.class); } Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java?rev=1166495&r1=1166494&r2=1166495&view=diff ============================================================================== --- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java (original) +++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java Thu Sep 8 01:39:07 2011 @@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.protoc import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.AMResponse; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -49,6 +50,7 @@ import org.apache.hadoop.yarn.ipc.RPCUti import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager; import org.apache.hadoop.yarn.security.SchedulerSecurityInfo; +import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; @@ -115,11 +117,16 @@ public class ApplicationMasterService ex ApplicationAttemptId applicationAttemptId = request .getApplicationAttemptId(); + ApplicationId appID = applicationAttemptId.getApplicationId(); AMResponse lastResponse = responseMap.get(applicationAttemptId); if (lastResponse == null) { String message = "Application doesn't exist in cache " + applicationAttemptId; LOG.error(message); + RMAuditLogger.logFailure(this.rmContext.getRMApps().get(appID).getUser(), + AuditConstants.REGISTER_AM, message, "ApplicationMasterService", + "Error in registering application master", appID, + applicationAttemptId); throw RPCUtil.getRemoteException(message); } @@ -133,6 +140,10 @@ public class ApplicationMasterService ex new RMAppAttemptRegistrationEvent(applicationAttemptId, request .getHost(), request.getRpcPort(), request.getTrackingUrl())); + RMAuditLogger.logSuccess(this.rmContext.getRMApps().get(appID).getUser(), + AuditConstants.REGISTER_AM, "ApplicationMasterService", appID, + applicationAttemptId); + // Pick up min/max resource from scheduler... RegisterApplicationMasterResponse response = recordFactory .newRecordInstance(RegisterApplicationMasterResponse.class);