quickstep-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hbdeshmukh <...@git.apache.org>
Subject [GitHub] incubator-quickstep pull request #64: Introduced Shiftboss for the distribut...
Date Wed, 27 Jul 2016 21:01:17 GMT
Github user hbdeshmukh commented on a diff in the pull request:

    https://github.com/apache/incubator-quickstep/pull/64#discussion_r72522825
  
    --- Diff: query_execution/Shiftboss.cpp ---
    @@ -0,0 +1,357 @@
    +/**
    + *   Licensed under the Apache License, Version 2.0 (the "License");
    + *   you may not use this file except in compliance with the License.
    + *   You may obtain a copy of the License at
    + *
    + *       http://www.apache.org/licenses/LICENSE-2.0
    + *
    + *   Unless required by applicable law or agreed to in writing, software
    + *   distributed under the License is distributed on an "AS IS" BASIS,
    + *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + *   See the License for the specific language governing permissions and
    + *   limitations under the License.
    + **/
    +
    +#include "query_execution/Shiftboss.hpp"
    +
    +#include <cstddef>
    +#include <cstdlib>
    +#include <memory>
    +#include <string>
    +#include <unordered_map>
    +#include <utility>
    +#include <vector>
    +
    +#include "catalog/CatalogTypedefs.hpp"
    +#include "query_execution/QueryContext.hpp"
    +#include "query_execution/QueryExecutionMessages.pb.h"
    +#include "query_execution/QueryExecutionTypedefs.hpp"
    +#include "query_execution/QueryExecutionUtil.hpp"
    +#include "query_execution/WorkerMessage.hpp"
    +#include "relational_operators/RebuildWorkOrder.hpp"
    +#include "relational_operators/WorkOrderFactory.hpp"
    +#include "storage/InsertDestination.hpp"
    +#include "storage/StorageBlock.hpp"
    +#include "storage/StorageBlockInfo.hpp"
    +#include "storage/StorageManager.hpp"
    +#include "threading/ThreadUtil.hpp"
    +
    +#include "glog/logging.h"
    +
    +#include "tmb/address.h"
    +#include "tmb/id_typedefs.h"
    +#include "tmb/message_bus.h"
    +#include "tmb/message_style.h"
    +#include "tmb/tagged_message.h"
    +
    +using std::free;
    +using std::malloc;
    +using std::move;
    +using std::size_t;
    +using std::string;
    +using std::unique_ptr;
    +using std::vector;
    +
    +using tmb::TaggedMessage;
    +
    +namespace quickstep {
    +
    +class WorkOrder;
    +
    +void Shiftboss::run() {
    +  if (cpu_id_ >= 0) {
    +    // We can pin the shiftboss thread to a CPU if specified.
    +    ThreadUtil::BindToCPU(cpu_id_);
    +  }
    +
    +  for (;;) {
    +    // Receive() is a blocking call, causing this thread to sleep until next
    +    // message is received.
    +    AnnotatedMessage annotated_message(bus_->Receive(shiftboss_client_id_, 0, true));
    +    LOG(INFO) << "Shiftboss (id '" << shiftboss_client_id_
    +              << "') received the typed '" << annotated_message.tagged_message.message_type()
    +              << "' message from client " << annotated_message.sender;
    +    switch (annotated_message.tagged_message.message_type()) {
    +      case kShiftbossRegistrationResponseMessage: {
    +        foreman_client_id_ = annotated_message.sender;
    +        break;
    +      }
    +      case kShiftbossInitiateMessage: {
    +        const TaggedMessage &tagged_message = annotated_message.tagged_message;
    +
    +        serialization::ShiftbossInitiateMessage proto;
    +        CHECK(proto.ParseFromArray(tagged_message.message(), tagged_message.message_bytes()));
    +
    +        processShiftbossInitiateMessage(proto.query_id(), proto.catalog_database_cache(),
proto.query_context());
    +        break;
    +      }
    +      case kWorkOrderMessage: {
    +        const TaggedMessage &tagged_message = annotated_message.tagged_message;
    +
    +        serialization::WorkOrderMessage proto;
    +        CHECK(proto.ParseFromArray(tagged_message.message(), tagged_message.message_bytes()));
    +
    +        const std::size_t query_id = proto.query_id();
    +        DCHECK_EQ(1u, query_contexts_.count(query_id));
    +
    +        WorkOrder *work_order = WorkOrderFactory::ReconstructFromProto(proto.work_order(),
    +                                                                       &database_cache_,
    +                                                                       query_contexts_[query_id].get(),
    +                                                                       storage_manager_,
    +                                                                       shiftboss_client_id_,
    +                                                                       bus_);
    +
    +        unique_ptr<WorkerMessage> worker_message(
    +            WorkerMessage::WorkOrderMessage(work_order, proto.operator_index()));
    +
    +        TaggedMessage worker_tagged_message(worker_message.get(),
    +                                            sizeof(*worker_message),
    +                                            kWorkOrderMessage);
    +
    +        const size_t worker_index = getSchedulableWorker();
    +        LOG(INFO) << "Shiftboss (id '" << shiftboss_client_id_
    +                  << "') forwarded WorkOrderMessage (typed '" << kWorkOrderMessage
    +                  << "') from Foreman to worker " << worker_index;
    +
    +        QueryExecutionUtil::SendTMBMessage(bus_,
    +                                           shiftboss_client_id_,
    +                                           workers_->getClientID(worker_index),
    +                                           move(worker_tagged_message));
    +        break;
    +      }
    +      case kInitiateRebuildMessage: {
    +        const TaggedMessage &tagged_message = annotated_message.tagged_message;
    +
    +        serialization::InitiateRebuildMessage proto;
    +        CHECK(proto.ParseFromArray(tagged_message.message(), tagged_message.message_bytes()));
    +
    +        processInitiateRebuildMessage(proto.query_id(),
    +                                      proto.operator_index(),
    +                                      proto.insert_destination_index(),
    +                                      proto.relation_id());
    +        break;
    +      }
    +      case kWorkOrderCompleteMessage:  // Fall through.
    +      case kRebuildWorkOrderCompleteMessage:
    +      case kDataPipelineMessage:
    +      case kWorkOrdersAvailableMessage:
    +      case kWorkOrderFeedbackMessage: {
    +        LOG(INFO) << "Shiftboss (id '" << shiftboss_client_id_
    +                  << "') forwarded typed '" << annotated_message.tagged_message.message_type()
    +                  << "' message from worker (client " << annotated_message.sender
<< ") to Foreman";
    +
    +        DCHECK_NE(foreman_client_id_, tmb::kClientIdNone);
    +        QueryExecutionUtil::SendTMBMessage(bus_,
    +                                           shiftboss_client_id_,
    +                                           foreman_client_id_,
    +                                           move(annotated_message.tagged_message));
    +        break;
    +      }
    +      case kQueryResultRelationMessage: {
    +        const TaggedMessage &tagged_message = annotated_message.tagged_message;
    +
    +        serialization::QueryResultRelationMessage proto;
    +        CHECK(proto.ParseFromArray(tagged_message.message(), tagged_message.message_bytes()));
    +
    +        for (int i = 0; i < proto.blocks_size(); ++i) {
    +          const block_id block = proto.blocks(i);
    +          storage_manager_->saveBlockOrBlob(block);
    +          if (storage_manager_->blockOrBlobIsLoaded(block)) {
    +            // NOTE(zuyu): eviction is required to avoid accesses to the query
    +            // result relation schema in CatalogDatabaseCache, for all query
    +            // optimizer execution generator unit tests and the single-process
    +            // Quickstep CLI.
    +            storage_manager_->evictBlockOrBlob(block);
    --- End diff --
    
    > You could play with my old code in distributed-exe-gen-test to confirm my statement.
    
    Please understand that I am not doubting what you have said. I am just trying to understand
why do you need to evict a block from a result relation, which presumably won't be accessed
by any other worker thread from that node at the time this line of code is getting executed.
Your explanation from the source code reads as follows:
    
    ```
                // NOTE(zuyu): eviction is required to avoid accesses to the query
                // result relation schema in CatalogDatabaseCache, for all query
                // optimizer execution generator unit tests and the single-process
                // Quickstep CLI.
    ```
    
    Why would optimizer execution generator unit tests interfere with the actual invocation
of the system? 
    Why does quickstep CLI cause an issue here? Is it something about concurrent accesses
to a same block from two different processes? 


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

Mime
View raw message