singa-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From GitBox <...@apache.org>
Subject [GitHub] [incubator-singa] xuewanqi commented on a change in pull request #468: Distributted module
Date Fri, 05 Jul 2019 06:45:13 GMT
xuewanqi commented on a change in pull request #468: Distributted module
URL: https://github.com/apache/incubator-singa/pull/468#discussion_r300558656
 
 

 ##########
 File path: src/dist/communicator.cc
 ##########
 @@ -0,0 +1,150 @@
+#include "singa/dist/communicator.h"
+#include<iostream>
+namespace singa{
+
+static uint64_t getHostHash(const char* string) {
+  // Based on DJB2, result = result * 33 + char
+  uint64_t result = 5381;
+  for (int c = 0; string[c] != '\0'; c++){
+    result = ((result << 5) + result) + string[c];
+  }
+  return result;
+}
+
+
+static void getHostName(char* hostname, int maxlen) {
+  gethostname(hostname, maxlen);
+  for (int i=0; i< maxlen; i++) {
+    if (hostname[i] == '.') {
+        hostname[i] = '\0';
+        return;
+    }
+  }
+}
+
+
+Communicator::Communicator(int nDev): nDev(nDev){
+  MPICHECK(MPI_Init(NULL, NULL));
+  // get MPI Global Ranks and total Ranks
+  MPICHECK(MPI_Comm_rank(MPI_COMM_WORLD, &MPIRankInGlobal));
+  MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &totalMPIRanksInGlobal));
+  //std::cout<<"g rank " << MPIRankInGlobal << "\n";
+
+  //calculating MPIRankInLocal which is used in selecting a GPU
+  MPIRankInLocal=0;
+  uint64_t hostHashs[totalMPIRanksInGlobal];
+  char hostname[1024];
+  getHostName(hostname, 1024);
+  hostHashs[MPIRankInGlobal] = getHostHash(hostname);
+  MPICHECK(MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, hostHashs,
+    		 sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD));
+  for (int p=0; p<totalMPIRanksInGlobal; p++) {
+     if (p == MPIRankInGlobal) break;
+     if (hostHashs[p] == hostHashs[MPIRankInGlobal]) MPIRankInLocal++;
+  }
+
+  //std::cout<<"l rank " << MPIRankInLocal << "\n";
+
+  //picking GPUs based on MPIRankInLocal
+  //create cuda stream s
+  s = (cudaStream_t*)malloc(sizeof(cudaStream_t)*nDev);
+  for (int i = 0; i < nDev; ++i) {
+    CUDACHECK(cudaSetDevice(MPIRankInLocal*nDev + i));
 
 Review comment:
   line 51and 52 is to set GPUs the current process(MPIRankInLocal) should control. 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

Mime
View raw message