ariatosca-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mxmrlv <...@git.apache.org>
Subject [GitHub] incubator-ariatosca pull request #156: ARIA-278 remove core tasks
Date Tue, 20 Jun 2017 12:10:30 GMT
Github user mxmrlv commented on a diff in the pull request:

    https://github.com/apache/incubator-ariatosca/pull/156#discussion_r122953127
  
    --- Diff: aria/orchestrator/workflows/core/engine.py ---
    @@ -38,84 +35,192 @@ class Engine(logger.LoggerMixin):
         The workflow engine. Executes workflows
         """
     
    -    def __init__(self, executor, workflow_context, tasks_graph, **kwargs):
    +    def __init__(self, default_executor, **kwargs):
             super(Engine, self).__init__(**kwargs)
    -        self._workflow_context = workflow_context
    -        self._execution_graph = networkx.DiGraph()
    -        translation.build_execution_graph(task_graph=tasks_graph,
    -                                          execution_graph=self._execution_graph,
    -                                          default_executor=executor)
    +        self._executors = {default_executor.__class__: default_executor}
    +        self._executing_tasks = []
     
    -    def execute(self):
    +    def execute(self, ctx):
             """
             execute the workflow
             """
             try:
    -            events.start_workflow_signal.send(self._workflow_context)
    +            events.start_workflow_signal.send(ctx)
                 while True:
    -                cancel = self._is_cancel()
    +                cancel = self._is_cancel(ctx)
                     if cancel:
                         break
    -                for task in self._ended_tasks():
    -                    self._handle_ended_tasks(task)
    -                for task in self._executable_tasks():
    -                    self._handle_executable_task(task)
    -                if self._all_tasks_consumed():
    +                for task in self._ended_tasks(ctx):
    +                    self._handle_ended_tasks(ctx, task)
    +                for task in self._executable_tasks(ctx):
    +                    self._handle_executable_task(ctx, task)
    +                if self._all_tasks_consumed(ctx):
                         break
                     else:
                         time.sleep(0.1)
                 if cancel:
    -                events.on_cancelled_workflow_signal.send(self._workflow_context)
    +                events.on_cancelled_workflow_signal.send(ctx)
                 else:
    -                events.on_success_workflow_signal.send(self._workflow_context)
    +                events.on_success_workflow_signal.send(ctx)
             except BaseException as e:
    -            events.on_failure_workflow_signal.send(self._workflow_context, exception=e)
    +            events.on_failure_workflow_signal.send(ctx, exception=e)
                 raise
     
    -    def cancel_execution(self):
    +    @staticmethod
    +    def cancel_execution(ctx):
             """
             Send a cancel request to the engine. If execution already started, execution
status
             will be modified to 'cancelling' status. If execution is in pending mode, execution
status
             will be modified to 'cancelled' directly.
             """
    -        events.on_cancelling_workflow_signal.send(self._workflow_context)
    +        events.on_cancelling_workflow_signal.send(ctx)
     
    -    def _is_cancel(self):
    -        return self._workflow_context.execution.status in (models.Execution.CANCELLING,
    -                                                           models.Execution.CANCELLED)
    +    @staticmethod
    +    def _is_cancel(ctx):
    +        execution = ctx.model.execution.update(ctx.execution)
    +        return execution.status in (models.Execution.CANCELLING, models.Execution.CANCELLED)
     
    -    def _executable_tasks(self):
    +    def _executable_tasks(self, ctx):
             now = datetime.utcnow()
    -        return (task for task in self._tasks_iter()
    -                if task.is_waiting() and
    -                task.due_at <= now and
    -                not self._task_has_dependencies(task))
    +        return (
    +            task for task in self._tasks_iter(ctx)
    +            if task.is_waiting() and task.due_at <= now and \
    +            not self._task_has_dependencies(ctx, task)
    +        )
     
    -    def _ended_tasks(self):
    -        return (task for task in self._tasks_iter() if task.has_ended())
    +    def _ended_tasks(self, ctx):
    +        for task in self._executing_tasks:
    +            if task.has_ended() and task in ctx._graph:
    +                yield task
     
    -    def _task_has_dependencies(self, task):
    -        return len(self._execution_graph.pred.get(task.id, {})) > 0
    -
    -    def _all_tasks_consumed(self):
    -        return len(self._execution_graph.node) == 0
    -
    -    def _tasks_iter(self):
    -        for _, data in self._execution_graph.nodes_iter(data=True):
    -            task = data['task']
    -            if isinstance(task, engine_task.OperationTask):
    -                if not task.model_task.has_ended():
    -                    self._workflow_context.model.task.refresh(task.model_task)
    -            yield task
    +    @staticmethod
    +    def _task_has_dependencies(ctx, task):
    +        return len(ctx._graph.pred.get(task, [])) > 0
     
         @staticmethod
    -    def _handle_executable_task(task):
    -        if isinstance(task, engine_task.OperationTask):
    -            events.sent_task_signal.send(task)
    -        task.execute()
    +    def _all_tasks_consumed(ctx):
    +        return len(ctx._graph.node) == 0
     
    -    def _handle_ended_tasks(self, task):
    +    @staticmethod
    +    def _tasks_iter(ctx):
    +        for task in ctx.execution.tasks:
    +            yield ctx.model.task.refresh(task)
    +
    +    def _handle_executable_task(self, ctx, task):
    +        if task._executor not in self._executors:
    +            self._executors[task._executor] = task._executor()
    +        task_executor = self._executors[task._executor]
    +
    +        context_cls = task._context_cls or operation.BaseOperationContext
    +        op_ctx = context_cls(
    +            model_storage=ctx.model,
    +            resource_storage=ctx.resource,
    +            workdir=ctx._workdir,
    +            task_id=task.id,
    +            actor_id=task.actor.id if task.actor else None,
    +            service_id=task.execution.service.id,
    +            execution_id=task.execution.id,
    +            name=task.name
    +        )
    +
    +        self._executing_tasks.append(task)
    +
    +        if not task.stub_type:
    +            events.sent_task_signal.send(op_ctx)
    +        task_executor.execute(op_ctx)
    +
    +    def _handle_ended_tasks(self, ctx, task):
    +        self._executing_tasks.remove(task)
             if task.status == models.Task.FAILED and not task.ignore_failure:
                 raise exceptions.ExecutorException('Workflow failed')
             else:
    -            self._execution_graph.remove_node(task.id)
    +            ctx._graph.remove_node(task)
    +
    +
    +def construct_execution_tasks(execution,
    +                              task_graph,
    +                              default_executor,
    +                              stub_executor=executor.base.StubTaskExecutor,
    +                              start_stub_type=models.Task.START_WORKFLOW,
    +                              end_stub_type=models.Task.END_WORKFLOW,
    +                              depends_on=()):
    +    """
    +    Translates the user graph to the execution graph
    +    :param task_graph: The user's graph
    +    :param start_stub_type: internal use
    +    :param end_stub_type: internal use
    +    :param depends_on: internal use
    +    """
    +    depends_on = list(depends_on)
    +
    +    # Insert start marker
    +    start_task = models.Task(api_id=_start_graph_suffix(task_graph.id),
    +                             _executor=stub_executor,
    +                             execution=execution,
    +                             stub_type=start_stub_type,
    +                             dependencies=depends_on)
    +
    +    for task in task_graph.topological_order(reverse=True):
    +        operation_dependencies = _get_tasks_from_dependencies(
    +            execution, task_graph.get_dependencies(task), [start_task])
    +
    +        if isinstance(task, api.task.OperationTask):
    +            models.Task.from_api_task(api_task=task,
    +                                      executor=default_executor,
    +                                      dependencies=operation_dependencies)
    +
    +        elif isinstance(task, api.task.WorkflowTask):
    +            # Build the graph recursively while adding start and end markers
    +            construct_execution_tasks(
    +                execution=execution,
    +                task_graph=task,
    +                default_executor=default_executor,
    +                stub_executor=stub_executor,
    +                start_stub_type=models.Task.START_SUBWROFKLOW,
    +                end_stub_type=models.Task.END_SUBWORKFLOW,
    +                depends_on=operation_dependencies
    +            )
    +        elif isinstance(task, api.task.StubTask):
    +            models.Task(api_id=task.id,
    +                        _executor=stub_executor,
    +                        execution=execution,
    +                        stub_type=models.Task.STUB,
    +                        dependencies=operation_dependencies)
    +        else:
    +            raise RuntimeError('Undefined state')
    +
    +    # Insert end marker
    +    models.Task(api_id=_end_graph_suffix(task_graph.id),
    +                _executor=stub_executor,
    +                execution=execution,
    +                stub_type=end_stub_type,
    +                dependencies=_get_non_dependent_tasks(execution) or [start_task])
    +
    +
    +def _start_graph_suffix(api_id):
    +    return '{0}-Start'.format(api_id)
    +
    +
    +def _end_graph_suffix(api_id):
    +    return '{0}-End'.format(api_id)
    +
    +
    +def _get_non_dependent_tasks(execution):
    +    dependency_tasks = set()
    +    for task in execution.tasks:
    +        dependency_tasks.update(task.dependencies)
    +    return list(set(execution.tasks) - set(dependency_tasks))
    +
    +
    +def _get_tasks_from_dependencies(execution, dependencies, default=()):
    +    """
    +    Returns task list from dependencies.
    +    """
    +    tasks = []
    +    for dependency in dependencies:
    +        if getattr(dependency, 'actor', False):
    --- End diff --
    
    stub_type


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

Mime
View raw message