jt = deserializeServiceData(secret);
+ // TODO: Once SHuffle is out of NM, this can use MR APIs
JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
userRsrc.put(jobId.toString(), user);
LOG.info("Added token for " + jobId.toString());
@@ -193,7 +238,7 @@ public class ShuffleHandler extends Abst
Configuration conf = getConfig();
ServerBootstrap bootstrap = new ServerBootstrap(selector);
bootstrap.setPipelineFactory(new HttpPipelineFactory(conf));
- port = conf.getInt("mapreduce.shuffle.port", 8080);
+ port = conf.getInt(SHUFFLE_PORT_CONFIG_KEY, DEFAULT_SHUFFLE_PORT);
accepted.add(bootstrap.bind(new InetSocketAddress(port)));
LOG.info(getName() + " listening on port " + port);
super.start();
@@ -207,6 +252,17 @@ public class ShuffleHandler extends Abst
super.stop();
}
+ @Override
+ public synchronized ByteBuffer getMeta() {
+ try {
+ return serializeMetaData(port);
+ } catch (IOException e) {
+ LOG.error("Error during getMeta", e);
+ // TODO add API to AuxiliaryServices to report failures
+ return null;
+ }
+ }
+
Shuffle createShuffle() {
return new Shuffle(getConfig());
}
@@ -306,7 +362,7 @@ public class ShuffleHandler extends Abst
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
try {
verifyRequest(jobId, ctx, request, response,
- new URL("http", "", 8080, reqUri));
+ new URL("http", "", port, reqUri));
} catch (IOException e) {
LOG.warn("Shuffle failure ", e);
sendError(ctx, e.getMessage(), UNAUTHORIZED);
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java Thu Sep 8 01:39:07 2011
@@ -26,11 +26,21 @@ import static org.apache.hadoop.test.Met
import org.jboss.netty.channel.ChannelFuture;
import org.junit.Test;
+import static org.junit.Assert.*;
import static org.apache.hadoop.test.MockitoMaker.*;
public class TestShuffleHandler {
static final long MiB = 1024 * 1024;
+ @Test public void testSerializeMeta() throws Exception {
+ assertEquals(1, ShuffleHandler.deserializeMetaData(
+ ShuffleHandler.serializeMetaData(1)));
+ assertEquals(-1, ShuffleHandler.deserializeMetaData(
+ ShuffleHandler.serializeMetaData(-1)));
+ assertEquals(8080, ShuffleHandler.deserializeMetaData(
+ ShuffleHandler.serializeMetaData(8080)));
+ }
+
@Test public void testShuffleMetrics() throws Exception {
MetricsSystem ms = new MetricsSystemImpl();
ShuffleHandler sh = new ShuffleHandler(ms);
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/bin/yarn
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/bin/yarn?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/bin/yarn (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/bin/yarn Thu Sep 8 01:39:07 2011
@@ -148,132 +148,18 @@ IFS=
# add hadoop-common libs to CLASSPATH
-if [ -d "$HADOOP_COMMON_HOME/build/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build/classes
-fi
-if [ -d "$HADOOP_COMMON_HOME/build/webapps" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build
-fi
-if [ -d "$HADOOP_COMMON_HOME/build/test/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build/test/classes
-fi
-if [ -d "$HADOOP_COMMON_HOME/build/test/core/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/build/test/core/classes
-fi
-
-for f in $HADOOP_COMMON_HOME/hadoop-*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-for f in $HADOOP_COMMON_HOME/lib/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-for f in $HADOOP_COMMON_HOME/share/hadoop/common/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-for f in $HADOOP_COMMON_HOME/share/hadoop/common/lib/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-for f in $HADOOP_COMMON_HOME/share/hadoop/hdfs/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-if [ -d "$HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Common/common" ]; then
-for f in $HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Common/common/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-fi
-
-if [ -d "$HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Hdfs/common" ]; then
-for f in $HADOOP_COMMON_HOME/build/ivy/lib/Hadoop-Hdfs/common/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-fi
-
-if [ -d "$HADOOP_COMMON_HOME/build/ivy/lib/Hadoop/common" ]; then
-for f in $HADOOP_COMMON_HOME/build/ivy/lib/Hadoop/common/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-fi
+CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/share/hadoop/common'/*'
+CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/share/hadoop/common/lib'/*'
# add hadoop-hdfs libs to CLASSPATH
-for f in $HADOOP_HDFS_HOME/hadoop-*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-for f in $HADOOP_HDFS_HOME/lib/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-if [ -d "$HADOOP_HDFS_HOME/build/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/classes
-fi
-if [ -d "$HADOOP_HDFS_HOME/build/webapps" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build
-fi
-if [ -d "$HADOOP_HDFS_HOME/build/test/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/test/classes
-fi
-if [ -d "$HADOOP_HDFS_HOME/build/tools" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/tools
-fi
-
-# add hadoop-mapred libs to CLASSPATH
-
-for f in $HADOOP_HDFS_HOME/hadoop-*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-for f in $HADOOP_HDFS_HOME/lib/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-if [ -d "$HADOOP_MAPRED_HOME/build/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/classes
-fi
-if [ -d "$HADOOP_MAPRED_HOME/build/webapps" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build
-fi
-if [ -d "$HADOOP_MAPRED_HOME/build/test/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/test/classes
-fi
-if [ -d "$HADOOP_MAPRED_HOME/build/tools" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/build/tools
-fi
-
-# for releases, add core mapred jar & webapps to CLASSPATH
-if [ -d "$HADOOP_MAPRED_HOME/webapps" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME
-fi
-
-# add libs to CLASSPATH
-for f in $HADOOP_MAPRED_HOME/lib/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-# add libs to CLASSPATH
-for f in $HADOOP_MAPRED_HOME/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-# add libs to CLASSPATH
-for f in $YARN_HOME/lib/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
+CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/share/hadoop/hdfs'/*'
+CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib'/*'
# add yarn libs to CLASSPATH
-for f in $YARN_HOME/modules/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-# add user-specified CLASSPATH last
-if [ "$YARN_USER_CLASSPATH_FIRST" = "" ] && [ "$YARN_CLASSPATH" != "" ]; then
- CLASSPATH=${CLASSPATH}:${YARN_CLASSPATH}
-fi
+
+CLASSPATH=${CLASSPATH}:$YARN_HOME/modules'/*'
+CLASSPATH=${CLASSPATH}:$YARN_HOME/lib'/*'
# default log directory & file
if [ "$YARN_LOG_DIR" = "" ]; then
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocol.java Thu Sep 8 01:39:07 2011
@@ -18,16 +18,94 @@
package org.apache.hadoop.yarn.api;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+/**
+ * The protocol between a live instance of ApplicationMaster
+ * and the ResourceManager
.
+ *
+ * This is used by the ApplicationMaster
to register/unregister
+ * and to request and obtain resources in the cluster from the
+ * ResourceManager
.
+ */
+@Public
+@Stable
public interface AMRMProtocol {
- public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws YarnRemoteException;
- public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws YarnRemoteException;;
- public AllocateResponse allocate(AllocateRequest request) throws YarnRemoteException;
+
+ /**
+ * The interface used by a new ApplicationMaster
to register
+ * with the ResourceManager
.
+ *
+ * The ApplicationMaster
needs to provide details such
+ * as RPC Port, HTTP tracking url etc. as specified in
+ * {@link RegisterApplicationMasterRequest}.
+ *
+ * The ResourceManager
responds with critical details such
+ * as minimum and maximum resource capabilities in the cluster as specified in
+ * {@link RegisterApplicationMasterResponse}.
+ *
+ * @param request registration request
+ * @return registration respose
+ * @throws YarnRemoteException
+ */
+ public RegisterApplicationMasterResponse registerApplicationMaster(
+ RegisterApplicationMasterRequest request)
+ throws YarnRemoteException;
+
+ /**
+ * The interface used by an ApplicationMaster
to notify the
+ * ResourceManager
about its completion (success or failed).
+ *
+ * The ApplicationMaster
has to provide details such as
+ * final state, diagnostics (in case of failures) etc. as specified in
+ * {@link FinishApplicationMasterRequest}.
+ *
+ * The ResourceManager
responds with
+ * {@link FinishApplicationMasterResponse}.
+ *
+ * @param request completion request
+ * @return completion response
+ * @throws YarnRemoteException
+ */
+ public FinishApplicationMasterResponse finishApplicationMaster(
+ FinishApplicationMasterRequest request)
+ throws YarnRemoteException;
+
+ /**
+ * The main interface between an ApplicationMaster
+ * and the ResourceManager
.
+ *
+ * The ApplicationMaster
uses this interface to provide a list
+ * of {@link ResourceRequest} and returns unused {@link Container} allocated
+ * to it via {@link AllocateRequest}.
+ *
+ * This also doubles up as a heartbeat to let the
+ * ResourceManager
know that the ApplicationMaster
+ * is alive. Thus, applications should use periodically make this call to
+ * be kept alive.
+ *
+ * The ResourceManager
responds with list of allocated
+ * {@link Container}, status of completed containers and headroom information
+ * for the application.
+ *
+ * The ApplicationMaster
can use the available headroom
+ * (resources) to decide how to utilized allocated resources and make
+ * informed decisions about future resource requests.
+ *
+ * @param request allocation request
+ * @return allocation response
+ * @throws YarnRemoteException
+ */
+ public AllocateResponse allocate(AllocateRequest request)
+ throws YarnRemoteException;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java Thu Sep 8 01:39:07 2011
@@ -18,6 +18,9 @@
package org.apache.hadoop.yarn.api;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
@@ -36,16 +39,190 @@ import org.apache.hadoop.yarn.api.protoc
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+/**
+ * The protocol between clients and the ResourceManager
+ * to submit/abort jobs and to get information on applications, cluster metrics,
+ * nodes, queues and ACLs.
+ */
+@Public
+@Stable
public interface ClientRMProtocol {
- public GetNewApplicationIdResponse getNewApplicationId(GetNewApplicationIdRequest request) throws YarnRemoteException;
- public GetApplicationReportResponse getApplicationReport(GetApplicationReportRequest request) throws YarnRemoteException;
- public SubmitApplicationResponse submitApplication(SubmitApplicationRequest request) throws YarnRemoteException;
- public FinishApplicationResponse finishApplication(FinishApplicationRequest request) throws YarnRemoteException;
- public GetClusterMetricsResponse getClusterMetrics(GetClusterMetricsRequest request) throws YarnRemoteException;
- public GetAllApplicationsResponse getAllApplications(GetAllApplicationsRequest request) throws YarnRemoteException;
- public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) throws YarnRemoteException;
- public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) throws YarnRemoteException;
- public GetQueueUserAclsInfoResponse getQueueUserAcls(GetQueueUserAclsInfoRequest request) throws YarnRemoteException;
+ /**
+ * The interface used by clients to obtain a new {@link ApplicationId} for
+ * submitting new applications.
+ *
+ * The ResourceManager
responds with a new, monotonically
+ * increasing, {@link ApplicationId} which is used by the client to submit
+ * a new application.
+ *
+ * @param request request to get a new ApplicationId
+ * @return new ApplicationId
to be used to submit an application
+ * @throws YarnRemoteException
+ * @see #submitApplication(SubmitApplicationRequest)
+ */
+ public GetNewApplicationIdResponse getNewApplicationId(
+ GetNewApplicationIdRequest request)
+ throws YarnRemoteException;
+
+ /**
+ * The interface used by clients to submit a new application to the
+ * ResourceManager.
+ *
+ * The client is required to provide details such as queue,
+ * {@link Resource} required to run the ApplicationMaster
,
+ * the equivalent of {@link ContainerLaunchContext} for launching
+ * the ApplicationMaster
etc. via the
+ * {@link SubmitApplicationRequest}.
+ *
+ * Currently the ResourceManager
sends an immediate (empty)
+ * {@link SubmitApplicationResponse} on accepting the submission and throws
+ * an exception if it rejects the submission.
+ *
+ * In secure mode,the ResourceManager
verifies access to
+ * queues etc. before accepting the application submission.
+ *
+ * @param request request to submit a new application
+ * @return (empty) response on accepting the submission
+ * @throws YarnRemoteException
+ * @see #getNewApplicationId(GetNewApplicationIdRequest)
+ */
+ public SubmitApplicationResponse submitApplication(
+ SubmitApplicationRequest request)
+ throws YarnRemoteException;
+
+ /**
+ * The interface used by clients to request the
+ * ResourceManager
to abort submitted application.
+ *
+ * The client, via {@link FinishApplicationRequest} provides the
+ * {@link ApplicationId} of the application to be aborted.
+ *
+ * In secure mode,the ResourceManager
verifies access to the
+ * application, queue etc. before terminating the application.
+ *
+ * Currently, the ResourceManager
returns an empty response
+ * on success and throws an exception on rejecting the request.
+ *
+ * @param request request to abort a submited application
+ * @return ResourceManager
returns an empty response
+ * on success and throws an exception on rejecting the request
+ * @throws YarnRemoteException
+ * @see #getQueueUserAcls(GetQueueUserAclsInfoRequest)
+ */
+ public FinishApplicationResponse finishApplication(
+ FinishApplicationRequest request)
+ throws YarnRemoteException;
+
+ /**
+ * The interface used by clients to get a report of an Application from
+ * the ResourceManager
.
+ *
+ * The client, via {@link GetApplicationReportRequest} provides the
+ * {@link ApplicationId} of the application.
+ *
+ * In secure mode,the ResourceManager
verifies access to the
+ * application, queue etc. before accepting the request.
+ *
+ * The ResourceManager
responds with a
+ * {@link GetApplicationReportResponse} which includes the
+ * {@link ApplicationReport} for the application.
+ *
+ * @param request request for an application report
+ * @return application report
+ * @throws YarnRemoteException
+ */
+ public GetApplicationReportResponse getApplicationReport(
+ GetApplicationReportRequest request)
+ throws YarnRemoteException;
+
+ /**
+ * The interface used by clients to get metrics about the cluster from
+ * the ResourceManager
.
+ *
+ * The ResourceManager
responds with a
+ * {@link GetClusterMetricsResponse} which includes the
+ * {@link YarnClusterMetrics} with details such as number of current
+ * nodes in the cluster.
+ *
+ * @param request request for cluster metrics
+ * @return cluster metrics
+ * @throws YarnRemoteException
+ */
+ public GetClusterMetricsResponse getClusterMetrics(
+ GetClusterMetricsRequest request)
+ throws YarnRemoteException;
+
+ /**
+ * The interface used by clients to get a report of all Applications
+ * in the cluster from the ResourceManager
.
+ *
+ * The ResourceManager
responds with a
+ * {@link GetAllApplicationsResponse} which includes the
+ * {@link ApplicationReport} for all the applications.
+ *
+ * @param request request for report on all running applications
+ * @return report on all running applications
+ * @throws YarnRemoteException
+ */
+ public GetAllApplicationsResponse getAllApplications(
+ GetAllApplicationsRequest request)
+ throws YarnRemoteException;
+
+ /**
+ * The interface used by clients to get a report of all nodes
+ * in the cluster from the ResourceManager
.
+ *
+ * The ResourceManager
responds with a
+ * {@link GetClusterNodesResponse} which includes the
+ * {@link NodeReport} for all the nodes in the cluster.
+ *
+ * @param request request for report on all nodes
+ * @return report on all nodes
+ * @throws YarnRemoteException
+ */
+ public GetClusterNodesResponse getClusterNodes(
+ GetClusterNodesRequest request)
+ throws YarnRemoteException;
+
+ /**
+ * The interface used by clients to get information about queues
+ * from the ResourceManager
.
+ *
+ * The client, via {@link GetQueueInfoRequest}, can ask for details such
+ * as used/total resources, child queues, running applications etc.
+ *
+ * In secure mode,the ResourceManager
verifies access before
+ * providing the information.
+ *
+ * @param request request to get queue information
+ * @return queue information
+ * @throws YarnRemoteException
+ */
+ public GetQueueInfoResponse getQueueInfo(
+ GetQueueInfoRequest request)
+ throws YarnRemoteException;
+
+ /**
+ * The interface used by clients to get information about queue
+ * acls for current users from the ResourceManager
.
+ *
+ *
+ * The ResourceManager
responds with queue acls for all
+ * existing queues.
+ *
+ * @param request request to get queue acls for current user
+ * @return queue acls for current user
+ * @throws YarnRemoteException
+ */
+ public GetQueueUserAclsInfoResponse getQueueUserAcls(
+ GetQueueUserAclsInfoRequest request)
+ throws YarnRemoteException;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java?rev=1166495&r1=1166494&r2=1166495&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java Thu Sep 8 01:39:07 2011
@@ -18,21 +18,108 @@
package org.apache.hadoop.yarn.api;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+/**
+ * The protocol between an ApplicationMaster
and a
+ * NodeManager
to start/stop containers and to get status
+ * of running containers.
+ *
+ * If security is enabled the NodeManager
verifies that the
+ * ApplicationMaster
has truly been allocated the container
+ * by the ResourceManager
and also verifies all interactions such
+ * as stopping the container or obtaining status information for the container.
+ *
+ */
+@Public
+@Stable
public interface ContainerManager {
+ /**
+ * The ApplicationMaster
requests a NodeManager
+ * to start a {@link Container} allocated to it using this interface.
+ *
+ *
+ * The ApplicationMaster
has to provide details such as
+ * allocated resource capability, security tokens (if enabled), command
+ * to be executed to start the container, environment for the process,
+ * necessary binaries/jar/shared-objects etc. via the
+ * {@link ContainerLaunchContext} in the {@link StartContainerRequest}.
+ *
+ * Currently the NodeManager
sends an immediate, empty
+ * response via {@link StartContainerResponse} to signify acceptance of the
+ * request and throws an exception in case of errors. The
+ * ApplicationMaster
can use
+ * {@link #getContainerStatus(GetContainerStatusRequest)} to get updated
+ * status of the to-be-launched or launched container.
+ *
+ * @param request request to start a container
+ * @return empty response to indicate acceptance of the request
+ * or an exception
+ * @throws YarnRemoteException
+ */
+ @Public
+ @Stable
StartContainerResponse startContainer(StartContainerRequest request)
throws YarnRemoteException;
+ /**
+ * The ApplicationMaster
requests a NodeManager
+ * to stop a {@link Container} allocated to it using this interface.
+ *
+ *
+ * The ApplicationMaster
sends a
+ * {@link StopContainerRequest} which includes the {@link ContainerId} of the
+ * container to be stopped.
+ *
+ * Currently the NodeManager
sends an immediate, empty
+ * response via {@link StopContainerResponse} to signify acceptance of the
+ * request and throws an exception in case of errors. The
+ * ApplicationMaster
can use
+ * {@link #getContainerStatus(GetContainerStatusRequest)} to get updated
+ * status of the container.
+ *
+ * @param request request to stop a container
+ * @return empty response to indicate acceptance of the request
+ * or an exception
+ * @throws YarnRemoteException
+ */
+ @Public
+ @Stable
StopContainerResponse stopContainer(StopContainerRequest request)
throws YarnRemoteException;
+ /**
+ * The api used by the ApplicationMaster
to request for
+ * current status of a Container
from the
+ * NodeManager
.
+ *
+ * The ApplicationMaster
sends a
+ * {@link GetContainerStatusRequest} which includes the {@link ContainerId} of
+ * the container whose status is needed.
+ *
+ *The NodeManager
responds with
+ *{@link GetContainerStatusResponse} which includes the
+ *{@link ContainerStatus} of the container.
+ *
+ * @param request request to get ContainerStatus
of a container
+ * with the specified ContainerId
+ * @return ContainerStatus
of the container
+ * @throws YarnRemoteException
+ */
+ @Public
+ @Stable
GetContainerStatusResponse getContainerStatus(
GetContainerStatusRequest request) throws YarnRemoteException;
}