MpiContainer.h
Go to the documentation of this file.
1 //Copyright (c) 2013-2023, The MercuryDPM Developers Team. All rights reserved.
2 //For the list of developers, see <http://www.MercuryDPM.org/Team>.
3 //
4 //Redistribution and use in source and binary forms, with or without
5 //modification, are permitted provided that the following conditions are met:
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above copyright
9 // notice, this list of conditions and the following disclaimer in the
10 // documentation and/or other materials provided with the distribution.
11 // * Neither the name MercuryDPM nor the
12 // names of its contributors may be used to endorse or promote products
13 // derived from this software without specific prior written permission.
14 //
15 //THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16 //ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 //WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 //DISCLAIMED. IN NO EVENT SHALL THE MERCURYDPM DEVELOPERS TEAM BE LIABLE FOR ANY
19 //DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 //(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 //LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22 //ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 //(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 //SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 
31 #ifndef MPICONTAINER_H_
32 #define MPICONTAINER_H_
33 
34 #include <cstddef>
35 #include <functional>
36 #include <vector>
37 #include "Math/Vector.h"
38 
39 #ifdef MERCURYDPM_USE_MPI
40 #include <mpi.h>
41 #endif
42 
43 #ifdef MERCURYDPM_FORCE_ASSERTS
44 #define MERCURYDPM_ASSERTS true
45 #else
46 #ifdef MERCURYDPM_NO_ASSERTS
47 #define MERCURYDPM_ASSERTS false
48 #else
49 #ifdef NDEBUG
50 #define MERCURYDPM_ASSERTS false
51 #else
52 #define MERCURYDPM_ASSERTS true
53 #endif
54 #endif
55 #endif
56 
57 class SpeciesHandler;
58 
66 {
67  PARTICLE = 0, POSITION = 1, VELOCITY = 2, FORCE = 3, INTERACTION = 4, SUPERQUADRIC = 5
68 };
69 
77 {
88 };
89 
90 namespace Detail
91 {
92 #ifdef MERCURYDPM_USE_MPI
93 //convert integral data to the corresponding MPI type
94 template<typename T>
95 typename std::enable_if<std::is_integral<T>::value, MPI_Datatype>::type
96 toMPIType(T t)
97 {
98  MPI_Datatype type;
99  MPI_Type_match_size(MPI_TYPECLASS_INTEGER, sizeof(T), &type);
100  return type;
101 }
102 
103 //convert floating point data to the corresponding MPI type
104 template<typename T>
105 typename std::enable_if<std::is_floating_point<T>::value, MPI_Datatype>::type
106 toMPIType(T t)
107 {
108  MPI_Datatype type;
109  MPI_Type_match_size(MPI_TYPECLASS_REAL,sizeof(T),&type);
110  return type;
111 }
112 #endif
113 } /*detail*/
114 
118 void initialiseMPI();
119 
129 class MPIContainer final
130 {
131 public:
135  {
136  static MPIContainer theInstance;
137  return theInstance;
138  }
139 
143  void initialiseMercuryMPITypes(const SpeciesHandler& speciesHandler);
144 
152  void sync()
153  {
154 #ifdef MERCURYDPM_USE_MPI
155  MPI_Waitall(pending_.size(),pending_.data(),MPI_STATUSES_IGNORE);
156  pending_.clear();
157  MPI_Barrier(communicator_);
158 #endif
159  }
160 
169  template<typename T>
170  typename std::enable_if<std::is_scalar<T>::value, void>::type
171  send(T& t, int to, int tag)
172  {
173 #if MERCURYDPM_ASSERTS
174  if (to == processorID_)
175  {
176  logger(FATAL, "[MPI FATAL]: Sending data to self!");
177  }
178 #endif
179 #ifdef MERCURYDPM_USE_MPI
180  MPI_Request request;
181  MPI_Isend(&t, 1, Detail::toMPIType(t), to, tag, communicator_, &request);
182  pending_.push_back(request);
183 
184 #endif
185  }
186 
188  template<typename T>
189  typename std::enable_if<std::is_scalar<T>::value, void>::type
190  send(T* t, int count, int to, int tag)
191  {
192 #if MERCURYDPM_ASSERTS
193  if (to == processorID_)
194  {
195  logger(FATAL, "[MPI FATAL]: Sending data to self!");
196  }
197 
198  if (count == 0)
199  {
200  logger(WARN, "[MPI ERROR]: Sending zero data");
201  }
202 #endif
203 #ifdef MERCURYDPM_USE_MPI
204  MPI_Request request;
205  MPI_Isend(t, count, Detail::toMPIType(*t), to, tag, communicator_, &request);
206  pending_.push_back(request);
207 
208 #endif
209  }
210 
219  template<typename T>
220  typename std::enable_if<std::is_scalar<T>::value, void>::type
221  receive(T& t, int from, int tag)
222  {
223 #if MERCURYDPM_ASSERTS
224  if (from == processorID_)
225  {
226  logger(FATAL, "[MPI FATAL]: Receiving data from self!");
227  }
228 #endif
229 #ifdef MERCURYDPM_USE_MPI
230  MPI_Request request;
231  MPI_Irecv(&t, 1, Detail::toMPIType(t), from, tag, communicator_, &request);
232  pending_.push_back(request);
233 
234 #endif
235  }
236 
238  template<typename T>
239  typename std::enable_if<std::is_scalar<T>::value, void>::type
240  receive(T* t, int count, int from, int tag)
241  {
242 #if MERCURYDPM_ASSERTS
243  if (from == processorID_)
244  {
245  logger(FATAL, "[MPI FATAL]: Receiving data fromself!");
246  }
247 
248  if (count == 0)
249  {
250  logger(WARN, "[MPI ERROR]: Receiving zero data");
251  }
252 #endif
253 #ifdef MERCURYDPM_USE_MPI
254  MPI_Request request;
255  MPI_Irecv(&t, count, Detail::toMPIType(*t), from, tag, communicator_, &request);
256  pending_.push_back(request);
257 
258 #endif
259  }
260 
261 
270  template<typename T>
271  void send(T* t, MercuryMPIType type, int count, int to, int tag)
272  {
273  //std::cout << "[Process: " << processorID_ << "]: QUEUING SEND REQUEST with tag: " << tag << std::endl;
274 #if MERCURYDPM_ASSERTS
275  if (to == processorID_)
276  {
277  logger(FATAL, "[MPI FATAL]: Sending data to self!");
278  }
279 
280  if (count == 0)
281  {
282  logger(WARN, "[MPI ERROR]: Sending zero data");
283  }
284 #endif
285 #ifdef MERCURYDPM_USE_MPI
286  MPI_Request request;
287  MPI_Isend(t, count, dataTypes_[type], to, tag, communicator_, &request);
288  pending_.push_back(request);
289 
290 
291 #endif
292  }
293 
302  template<typename T>
303  void receive(T* t, MercuryMPIType type, int count, int from, int tag)
304  {
305  //std::cout << "[Process: " << processorID_ << "]: QUEUING RECEIVE REQUEST with tag: " << tag << std::endl;
306 #if MERCURYDPM_ASSERTS
307  if (from == processorID_)
308  {
309  logger(FATAL, "[MPI FATAL]: Receiving data to self!");
310  }
311 
312  if (count == 0)
313  {
314  logger(WARN, "[MPI ERROR]: Receiving zero data");
315  }
316 #endif
317 #ifdef MERCURYDPM_USE_MPI
318  MPI_Request request;
319  MPI_Irecv(t, count, dataTypes_[type], from, tag, communicator_, &request);
320  pending_.push_back(request);
321 
322 #endif
323  }
324 
333  template<typename T>
334  typename std::enable_if<std::is_scalar<T>::value, void>::type
335  directSend(T& t, int count, int to, int tag)
336  {
337 #if MERCURYDPM_ASSERTS
338  if (to == processorID_)
339  {
340  logger(FATAL, "[MPI FATAL]: Sending data to self!");
341  }
342 
343  if (count == 0)
344  {
345  logger(WARN, "[MPI ERROR]: Sending zero data");
346  }
347 #endif
348 #ifdef MERCURYDPM_USE_MPI
349  MPI_Ssend(&t, count, Detail::toMPIType(t), to, tag, communicator_);
350 #endif
351  }
352 
353 
354  template<typename T>
355  void directSend(T* t, MercuryMPIType type, int count, int to, int tag)
356  {
357 #if MERCURYDPM_ASSERTS
358  if (to == processorID_)
359  {
360  logger(FATAL, "[MPI FATAL]: Sending data to self!");
361  }
362 
363  if (count == 0)
364  {
365  logger(WARN, "[MPI ERROR]: Sending zero data");
366  }
367 #endif
368 #ifdef MERCURYDPM_USE_MPI
369  MPI_Ssend(t,count,dataTypes_[type], to, tag, communicator_);
370 #endif
371  }
372 
381  template<typename T>
382  typename std::enable_if<std::is_scalar<T>::value, void>::type
383  directReceive(T& t, int count, int from, int tag)
384  {
385 #if MERCURYDPM_ASSERTS
386  if (from == processorID_)
387  {
388  logger(FATAL, "[MPI FATAL]: Receiving data from self!");
389  }
390 
391  if (count == 0)
392  {
393  logger(WARN, "[MPI ERROR]: Receiving zero data");
394  }
395 #endif
396 #ifdef MERCURYDPM_USE_MPI
397  MPI_Recv(&t, count, Detail::toMPIType(t), from, tag,communicator_, MPI_STATUS_IGNORE);
398 #endif
399  }
400 
401  template<typename T>
402  void directReceive(T* t, MercuryMPIType type, int count, int from, int tag)
403  {
404 #if MERCURYDPM_ASSERTS
405  if (from == processorID_)
406  {
407  logger(FATAL, "[MPI FATAL]: Receiving data to self!");
408  }
409 
410  if (count == 0)
411  {
412  logger(WARN, "[MPI ERROR]: Receiving zero data");
413  }
414 #endif
415 #ifdef MERCURYDPM_USE_MPI
416  MPI_Recv(t, count, dataTypes_[type], from, tag, communicator_, MPI_STATUS_IGNORE);
417 #endif
418  }
419 
427  template<typename T>
428  void gather(T& send_t, T* receive_t)
429  {
430 #ifdef MERCURYDPM_USE_MPI
431  MPI_Gather(&send_t, 1, Detail::toMPIType(send_t), receive_t, 1, Detail::toMPIType(send_t), 0, communicator_);
432 #endif
433  }
434 
439  template<typename T>
440  typename std::enable_if<std::is_scalar<T>::value, void>::type
441  broadcast(T& t, int fromProcessor = 0)
442  {
443 #ifdef MERCURYDPM_USE_MPI
444  MPI_Bcast(&t,1,Detail::toMPIType(t),fromProcessor,communicator_);
445 #endif
446  }
447 
452  template<typename T>
453  typename std::enable_if<std::is_scalar<T>::value, void>::type
454  broadcast(T* t, int size, int fromProcessor)
455  {
456 #ifdef MERCURYDPM_USE_MPI
457  MPI_Bcast((void *)t,size,Detail::toMPIType(t[0]),fromProcessor,communicator_);
458 
459 #endif
460  }
461 
466  template<typename T>
467  void broadcast(T* t, MercuryMPIType type, int fromProcessor = 0)
468  {
469 #ifdef MERCURYDPM_USE_MPI
470  MPI_Bcast((void *)t,1,dataTypes_[type],fromProcessor,communicator_);
471 #endif
472  }
473 
484 #ifdef MERCURYDPM_USE_MPI
485  template<typename T>
486  typename std::enable_if<std::is_scalar<T>::value, void>::type
487  reduce(T& t, MPI_Op operation, int id = 0)
488  {
489 
490  if(id == getProcessorID())
491  {
492  MPI_Reduce(MPI_IN_PLACE, &t, 1, Detail::toMPIType(t), operation, id, communicator_);
493  }
494  else
495  {
496  MPI_Reduce(&t, nullptr, 1, Detail::toMPIType(t), operation, id, communicator_);
497  }
498  }
499 #endif
500 
509 #ifdef MERCURYDPM_USE_MPI
510  template<typename T>
511  typename std::enable_if<std::is_scalar<T>::value, void>::type
512  allReduce(T& send_t, T& receive_t, MPI_Op operation)
513  {
514  MPI_Allreduce(&send_t, &receive_t, 1, Detail::toMPIType(send_t), operation,communicator_);
515  }
516 #endif
517 
527 #ifdef MERCURYDPM_USE_MPI
528  template<typename T>
529  typename std::enable_if<std::is_scalar<T>::value, void>::type
530  allGather(T& send_t, int send_count, std::vector<T>& receive_t, int receive_count)
531  {
532  MPI_Allgather(&send_t, send_count, Detail::toMPIType(send_t),
533  receive_t.data(), receive_count, Detail::toMPIType(receive_t[0]),communicator_);
534  }
535 #endif
536 
540  std::size_t getProcessorID();
541 
545  std::size_t getNumberOfProcessors() const;
546 
550 #ifdef MERCURYDPM_USE_MPI
551  MPI_Comm& getComm();
552 #endif
553 
563  template<typename T>
565  {
566 #ifdef MERCURYDPM_USE_MPI
567  MPI_Datatype MPIType;
568  MPI_Type_contiguous(sizeof(T), MPI_BYTE, &MPIType);
569  MPI_Type_commit(&MPIType);
570  dataTypes_.push_back(MPIType);
571 #endif
572  }
573 
574 
579  {
580 #ifdef MERCURYDPM_USE_MPI
581  for(MPI_Datatype type : dataTypes_)
582  {
583  MPI_Type_free(&type);
584  }
585 #endif
586  }
587 
591  MPIContainer(const MPIContainer& orig) = delete;
592 
593 private:
594 
598  MPIContainer();
599 
603  //std::size_t processorID_;
605 
610 
611 #ifdef MERCURYDPM_USE_MPI
615  std::vector<MPI_Request> pending_;
616 
620  MPI_Comm communicator_;
624  std::vector<MPI_Datatype> dataTypes_;
625 
626 #endif
627 
628 };
629 
630 
631 #endif /* MPICONTAINER_H_ */
Logger< MERCURYDPM_LOGLEVEL > logger("MercuryKernel")
Definition of different loggers with certain modules. A user can define its own custom logger here.
@ FATAL
@ WARN
MercuryMPITag
An enum that facilitates the creation of unique communication tags in the parallel code.
Definition: MpiContainer.h:77
@ SUPERQUADRIC_DATA
Definition: MpiContainer.h:87
@ INTERACTION_DATA
Definition: MpiContainer.h:84
@ VELOCITY_DATA
Definition: MpiContainer.h:82
@ INTERACTION_COUNT
Definition: MpiContainer.h:83
@ PARTICLE_INDEX
Definition: MpiContainer.h:86
@ PERIODIC_POSITION_DATA
Definition: MpiContainer.h:81
@ PARTICLE_COUNT
Definition: MpiContainer.h:78
@ POSITION_DATA
Definition: MpiContainer.h:80
@ PARTICLE_DATA
Definition: MpiContainer.h:79
@ PERIODIC_COMPLEXITY
Definition: MpiContainer.h:85
void initialiseMPI()
Inialises the MPI library.
Definition: MpiContainer.cc:137
MercuryMPIType
An enum that indicates what type of data is being send over MPI.
Definition: MpiContainer.h:66
@ VELOCITY
Definition: MpiContainer.h:67
@ FORCE
Definition: MpiContainer.h:67
@ SUPERQUADRIC
Definition: MpiContainer.h:67
@ INTERACTION
Definition: MpiContainer.h:67
@ PARTICLE
Definition: MpiContainer.h:67
@ POSITION
Definition: MpiContainer.h:67
This class contains all information and functions required for communication between processors.
Definition: MpiContainer.h:130
void deleteMercuryMPITypes()
Deletes the MercuryMPITypes.
Definition: MpiContainer.h:578
std::enable_if< std::is_scalar< T >::value, void >::type broadcast(T &t, int fromProcessor=0)
Broadcasts a scalar from the root to all other processors.
Definition: MpiContainer.h:441
std::size_t getNumberOfProcessors() const
Get the total number of processors participating in this simulation.
Definition: MpiContainer.cc:104
std::enable_if< std::is_scalar< T >::value, void >::type receive(T &t, int from, int tag)
asynchronously receive a scalar from some other processor.
Definition: MpiContainer.h:221
void receive(T *t, MercuryMPIType type, int count, int from, int tag)
asynchronously receive a list of MercuryMPIType objects from some other processor.
Definition: MpiContainer.h:303
int numberOfProcessors_
The total number of processors in the communicator.
Definition: MpiContainer.h:609
void initialiseMercuryMPITypes(const SpeciesHandler &speciesHandler)
Creates the MPI types required for communication of Mercury data through the MPI interface.
Definition: MpiContainer.cc:74
std::enable_if< std::is_scalar< T >::value, void >::type send(T *t, int count, int to, int tag)
Definition: MpiContainer.h:190
std::enable_if< std::is_scalar< T >::value, void >::type directReceive(T &t, int count, int from, int tag)
synchronously receive a list of scalars from another processor. if the send command has not been issu...
Definition: MpiContainer.h:383
void directSend(T *t, MercuryMPIType type, int count, int to, int tag)
Definition: MpiContainer.h:355
void gather(T &send_t, T *receive_t)
Gathers a scaler from all processors to a vector of scalars on the root.
Definition: MpiContainer.h:428
void send(T *t, MercuryMPIType type, int count, int to, int tag)
asynchronously send a list of MercuryMPITypes objects to some other processor.
Definition: MpiContainer.h:271
int processorID_
The ID of the processor this class is running on.
Definition: MpiContainer.h:604
static MPIContainer & Instance()
fetch the instance to be used for communication
Definition: MpiContainer.h:134
MPIContainer()
Constructor.
Definition: MpiContainer.cc:43
std::enable_if< std::is_scalar< T >::value, void >::type broadcast(T *t, int size, int fromProcessor)
Broadcasts a scalar from the root to all other processors.
Definition: MpiContainer.h:454
void sync()
Process all pending asynchronous communication requests before continuing.
Definition: MpiContainer.h:152
std::enable_if< std::is_scalar< T >::value, void >::type directSend(T &t, int count, int to, int tag)
synchronously send a list of scalars to another processor. the data should be received directly or th...
Definition: MpiContainer.h:335
void directReceive(T *t, MercuryMPIType type, int count, int from, int tag)
Definition: MpiContainer.h:402
void createMercuryMPIType(T t, MercuryMPIType type)
Get the communicator used for MPI commands.
Definition: MpiContainer.h:564
void broadcast(T *t, MercuryMPIType type, int fromProcessor=0)
Broadcasts an MercuryMPIType to all other processors.
Definition: MpiContainer.h:467
std::size_t getProcessorID()
Reduces a scalar on all processors to one scalar on a target processor.
Definition: MpiContainer.cc:113
std::enable_if< std::is_scalar< T >::value, void >::type send(T &t, int to, int tag)
Asynchronously send a scalar to some other processor.
Definition: MpiContainer.h:171
std::enable_if< std::is_scalar< T >::value, void >::type receive(T *t, int count, int from, int tag)
Definition: MpiContainer.h:240
MPIContainer(const MPIContainer &orig)=delete
Copy constructor is disabled, to enforce a singleton pattern.
Container to store all ParticleSpecies.
Definition: SpeciesHandler.h:37
Definition: MpiContainer.h:91