MercuryDPM  Trunk
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
MpiContainer.h
Go to the documentation of this file.
1 //Copyright (c) 2013-2020, The MercuryDPM Developers Team. All rights reserved.
2 //For the list of developers, see <http://www.MercuryDPM.org/Team>.
3 //
4 //Redistribution and use in source and binary forms, with or without
5 //modification, are permitted provided that the following conditions are met:
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above copyright
9 // notice, this list of conditions and the following disclaimer in the
10 // documentation and/or other materials provided with the distribution.
11 // * Neither the name MercuryDPM nor the
12 // names of its contributors may be used to endorse or promote products
13 // derived from this software without specific prior written permission.
14 //
15 //THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16 //ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 //WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 //DISCLAIMED. IN NO EVENT SHALL THE MERCURYDPM DEVELOPERS TEAM BE LIABLE FOR ANY
19 //DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 //(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 //LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22 //ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 //(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 //SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 
31 #ifndef MPICONTAINER_H_
32 #define MPICONTAINER_H_
33 
34 #include <cstddef>
35 #include <functional>
36 #include <vector>
37 #include "Math/Vector.h"
38 
39 #ifdef MERCURY_USE_MPI
40 #include <mpi.h>
41 #endif
42 
43 #ifdef MERCURY_FORCE_ASSERTS
44 #define MERCURY_ASSERTS true
45 #else
46 #ifdef MERCURY_NO_ASSERTS
47 #define MERCURY_ASSERTS false
48 #else
49 #ifdef NDEBUG
50 #define MERCURY_ASSERTS false
51 #else
52 #define MERCURY_ASSERTS true
53 #endif
54 #endif
55 #endif
56 
57 class SpeciesHandler;
58 
66 {
67  PARTICLE = 0, POSITION = 1, VELOCITY = 2, FORCE = 3, INTERACTION = 4, SUPERQUADRIC = 5
68 };
69 
77 {
88 };
89 
90 namespace Detail
91 {
92 #ifdef MERCURY_USE_MPI
93 //convert integral data to the corresponding MPI type
94 template<typename T>
95 typename std::enable_if<std::is_integral<T>::value, MPI::Datatype>::type
96 toMPIType(T t)
97 {
98  return MPI::Datatype::Match_size(MPI_TYPECLASS_INTEGER,sizeof(T));
99 }
100 
101 //convert floating point data to the corresponding MPI type
102 template<typename T>
103 typename std::enable_if<std::is_floating_point<T>::value, MPI::Datatype>::type
104 toMPIType(T t)
105 {
106  return MPI::Datatype::Match_size(MPI_TYPECLASS_REAL,sizeof(T));
107 }
108 #endif
109 } /*detail*/
110 
114 void initialiseMPI();
115 
125 class MPIContainer final
126 {
127 public:
131  {
132  static MPIContainer theInstance;
133  return theInstance;
134  }
135 
139  void initialiseMercuryMPITypes(const SpeciesHandler& speciesHandler);
140 
148  void sync()
149  {
150 #ifdef MERCURY_USE_MPI
151  MPI::Request::Waitall(pending_.size(),pending_.data());
152  pending_.clear();
153  communicator_.Barrier();
154 #endif
155  }
156 
165  template<typename T>
166  typename std::enable_if<std::is_scalar<T>::value, void>::type
167  send(T& t, int to, int tag)
168  {
169 #if MERCURY_ASSERTS
170  if (to == processorID_)
171  {
172  std::cout << "[MPI FATAL]: Sending data to self!" << std::endl;
173  std::exit(-1);
174  }
175 #endif
176 #ifdef MERCURY_USE_MPI
177  pending_.push_back(communicator_.Isend(&t, 1, Detail::toMPIType(t), to, tag ));
178 #endif
179  }
180 
182  template<typename T>
183  typename std::enable_if<std::is_scalar<T>::value, void>::type
184  send(T* t, int count, int to, int tag)
185  {
186 #if MERCURY_ASSERTS
187  if (to == processorID_)
188  {
189  std::cout << "[MPI FATAL]: Sending data to self!" << std::endl;
190  std::exit(-1);
191  }
192 
193  if (count == 0)
194  {
195  std::cout << "[MPI ERROR]: Sending zero data" << std::endl;
196  }
197 #endif
198 #ifdef MERCURY_USE_MPI
199  pending_.push_back(communicator_.Isend(t, count, Detail::toMPIType(*t), to, tag ));
200 #endif
201  }
202 
211  template<typename T>
212  typename std::enable_if<std::is_scalar<T>::value, void>::type
213  receive(T& t, int from, int tag)
214  {
215 #if MERCURY_ASSERTS
216  if (from == processorID_)
217  {
218  std::cout << "[MPI FATAL]: Receiving data from self!" << std::endl;
219  std::exit(-1);
220  }
221 #endif
222 #ifdef MERCURY_USE_MPI
223  pending_.push_back(communicator_.Irecv(&t, 1, Detail::toMPIType(t), from, tag ));
224 #endif
225  }
226 
228  template<typename T>
229  typename std::enable_if<std::is_scalar<T>::value, void>::type
230  receive(T* t, int count, int from, int tag)
231  {
232 #if MERCURY_ASSERTS
233  if (from == processorID_)
234  {
235  std::cout << "[MPI FATAL]: Receiving data fromself!" << std::endl;
236  std::exit(-1);
237  }
238 
239  if (count == 0)
240  {
241  std::cout << "[MPI ERROR]: Receiving zero data" << std::endl;
242  }
243 #endif
244 #ifdef MERCURY_USE_MPI
245  pending_.push_back(communicator_.Irecv(t, count, Detail::toMPIType(*t), from, tag ));
246 #endif
247  }
248 
249 
258  template<typename T>
259  void send(T* t, MercuryMPIType type, int count, int to, int tag)
260  {
261  //std::cout << "[Process: " << processorID_ << "]: QUEUING SEND REQUEST with tag: " << tag << std::endl;
262 #if MERCURY_ASSERTS
263  if (to == processorID_)
264  {
265  std::cout << "[MPI FATAL]: Sending data to self!" << std::endl;
266  std::exit(-1);
267  }
268 
269  if (count == 0)
270  {
271  std::cout << "[MPI ERROR]: Sending zero data" << std::endl;
272  }
273 #endif
274 #ifdef MERCURY_USE_MPI
275  pending_.push_back(communicator_.Isend(t, count, dataTypes_[type], to, tag ));
276 
277 #endif
278  }
279 
288  template<typename T>
289  void receive(T* t, MercuryMPIType type, int count, int from, int tag)
290  {
291  //std::cout << "[Process: " << processorID_ << "]: QUEUING RECEIVE REQUEST with tag: " << tag << std::endl;
292 #if MERCURY_ASSERTS
293  if (from == processorID_)
294  {
295  std::cout << "[MPI FATAL]: Receiving data to self!" << std::endl;
296  std::exit(-1);
297  }
298 
299  if (count == 0)
300  {
301  std::cout << "[MPI ERROR]: Receiving zero data" << std::endl;
302  }
303 #endif
304 #ifdef MERCURY_USE_MPI
305  pending_.push_back(communicator_.Irecv(t, count, dataTypes_[type], from, tag ));
306 #endif
307  }
308 
317  template<typename T>
318  typename std::enable_if<std::is_scalar<T>::value, void>::type
319  directSend(T& t, int count, int to, int tag)
320  {
321 #if MERCURY_ASSERTS
322  if (to == processorID_)
323  {
324  std::cout << "[MPI FATAL]: Sending data to self!" << std::endl;
325  std::exit(-1);
326  }
327 
328  if (count == 0)
329  {
330  std::cout << "[MPI ERROR]: Sending zero data" << std::endl;
331  }
332 #endif
333 #ifdef MERCURY_USE_MPI
334  communicator_.Ssend(&t, count, Detail::toMPIType(t), to, tag);
335 #endif
336  }
337 
338 
339  template<typename T>
340  void directSend(T* t, MercuryMPIType type, int count, int to, int tag)
341  {
342 #if MERCURY_ASSERTS
343  if (to == processorID_)
344  {
345  std::cout << "[MPI FATAL]: Sending data to self!" << std::endl;
346  std::exit(-1);
347  }
348 
349  if (count == 0)
350  {
351  std::cout << "[MPI ERROR]: Sending zero data" << std::endl;
352  }
353 #endif
354 #ifdef MERCURY_USE_MPI
355  communicator_.Ssend(t, count, dataTypes_[type], to, tag);
356 #endif
357  }
358 
367  template<typename T>
368  typename std::enable_if<std::is_scalar<T>::value, void>::type
369  directReceive(T& t, int count, int from, int tag)
370  {
371 #if MERCURY_ASSERTS
372  if (from == processorID_)
373  {
374  std::cout << "[MPI FATAL]: Receiving data from self!" << std::endl;
375  std::exit(-1);
376  }
377 
378  if (count == 0)
379  {
380  std::cout << "[MPI ERROR]: Receiving zero data" << std::endl;
381  }
382 #endif
383 #ifdef MERCURY_USE_MPI
384  communicator_.Recv(&t, count, Detail::toMPIType(t), from, tag);
385 #endif
386  }
387 
388  template<typename T>
389  void directReceive(T* t, MercuryMPIType type, int count, int from, int tag)
390  {
391 #if MERCURY_ASSERTS
392  if (from == processorID_)
393  {
394  std::cout << "[MPI FATAL]: Receiving data to self!" << std::endl;
395  std::exit(-1);
396  }
397 
398  if (count == 0)
399  {
400  std::cout << "[MPI ERROR]: Receiving zero data" << std::endl;
401  }
402 #endif
403 #ifdef MERCURY_USE_MPI
404  communicator_.Recv(t, count, dataTypes_[type], from, tag);
405 #endif
406  }
407 
415  template<typename T>
416  void gather(T& send_t, T* receive_t)
417  {
418 #ifdef MERCURY_USE_MPI
419  communicator_.Gather(&send_t, 1, Detail::toMPIType(send_t), receive_t, 1, Detail::toMPIType(send_t), 0);
420 #endif
421  }
422 
427  template<typename T>
428  typename std::enable_if<std::is_scalar<T>::value, void>::type
429  broadcast(T& t, int fromProcessor = 0)
430  {
431 #ifdef MERCURY_USE_MPI
432  communicator_.Bcast(&t,1,Detail::toMPIType(t),fromProcessor);
433 #endif
434  }
435 
440  template<typename T>
441  typename std::enable_if<std::is_scalar<T>::value, void>::type
442  broadcast(T* t, int size, int fromProcessor)
443  {
444 #ifdef MERCURY_USE_MPI
445  communicator_.Bcast((void *)t,size, Detail::toMPIType(t[0]),fromProcessor);
446 #endif
447  }
448 
453  template<typename T>
454  void broadcast(T* t, MercuryMPIType type, int fromProcessor = 0)
455  {
456 #ifdef MERCURY_USE_MPI
457  communicator_.Bcast((void *)t,1,dataTypes_[type],fromProcessor);
458 #endif
459  }
460 
471 #ifdef MERCURY_USE_MPI
472  template<typename T>
473  typename std::enable_if<std::is_scalar<T>::value, void>::type
474  reduce(T& t, MPI::Op operation, int id = 0)
475  {
476 
477  if(id == getProcessorID())
478  {
479  communicator_.Reduce(MPI::IN_PLACE, &t, 1, Detail::toMPIType(t), operation, id);
480  }
481  else
482  {
483  communicator_.Reduce(&t, nullptr, 1, Detail::toMPIType(t), operation, id);
484  }
485  }
486 #endif
487 
496 #ifdef MERCURY_USE_MPI
497  template<typename T>
498  typename std::enable_if<std::is_scalar<T>::value, void>::type
499  allReduce(T& send_t, T& receive_t, MPI::Op operation)
500  {
501  communicator_.Allreduce(&send_t, &receive_t, 1, Detail::toMPIType(send_t), operation);
502  }
503 #endif
504 
514 #ifdef MERCURY_USE_MPI
515  template<typename T>
516  typename std::enable_if<std::is_scalar<T>::value, void>::type
517  allGather(T& send_t, int send_count, std::vector<T>& receive_t, int receive_count)
518  {
519  communicator_.Allgather(&send_t, send_count, Detail::toMPIType(send_t),
520  receive_t.data(), receive_count, Detail::toMPIType(receive_t[0]));
521  }
522 #endif
523 
527  std::size_t getProcessorID();
528 
532  std::size_t getNumberOfProcessors() const;
533 
537 #ifdef MERCURY_USE_MPI
538  MPI::Intracomm& getComm();
539 #endif
540 
550  template<typename T>
552  {
553 #ifdef MERCURY_USE_MPI
554  MPI_Datatype MPIType;
555  MPI_Type_contiguous(sizeof(T), MPI_BYTE, &MPIType);
556  MPI_Type_commit(&MPIType);
557  dataTypes_.push_back(MPIType);
558 #endif
559  }
560 
561 
566  {
567 #ifdef MERCURY_USE_MPI
568  for(MPI_Datatype type : dataTypes_)
569  {
570  MPI_Type_free(&type);
571  }
572 #endif
573  }
574 
578  MPIContainer(const MPIContainer& orig) = delete;
579 
580 private:
581 
585  MPIContainer();
586 
590  std::size_t processorID_;
591 
595  std::size_t numberOfProcessors_;
596 
597 #ifdef MERCURY_USE_MPI
598 
601  std::vector<MPI::Request> pending_;
602 
606  MPI::Intracomm communicator_;
607 
611  std::vector<MPI_Datatype> dataTypes_;
612 
613 #endif
614 
615 };
616 
617 
618 #endif /* MPICONTAINER_H_ */
Container to store all ParticleSpecies.
This class contains all information and functions required for communication between processors...
Definition: MpiContainer.h:125
std::enable_if< std::is_scalar< T >::value, void >::type send(T *t, int count, int to, int tag)
Definition: MpiContainer.h:184
static MPIContainer & Instance()
fetch the instance to be used for communication
Definition: MpiContainer.h:130
void initialiseMercuryMPITypes(const SpeciesHandler &speciesHandler)
Creates the MPI types required for communication of Mercury data through the MPI interface.
Definition: MpiContainer.cc:70
MercuryMPIType
An enum that indicates what type of data is being send over MPI.
Definition: MpiContainer.h:65
std::size_t processorID_
The ID of the processor this class is running on.
Definition: MpiContainer.h:590
std::enable_if< std::is_scalar< T >::value, void >::type receive(T &t, int from, int tag)
asynchronously receive a scalar from some other processor.
Definition: MpiContainer.h:213
std::size_t getNumberOfProcessors() const
Get the total number of processors participating in this simulation.
void directSend(T *t, MercuryMPIType type, int count, int to, int tag)
Definition: MpiContainer.h:340
void gather(T &send_t, T *receive_t)
Gathers a scaler from all processors to a vector of scalars on the root.
Definition: MpiContainer.h:416
MPIContainer()
Constructor.
Definition: MpiContainer.cc:43
std::size_t getProcessorID()
Reduces a scalar on all processors to one scalar on a target processor.
void initialiseMPI()
Inialises the MPI library.
MercuryMPITag
An enum that facilitates the creation of unique communication tags in the parallel code...
Definition: MpiContainer.h:76
void deleteMercuryMPITypes()
Deletes the MercuryMPITypes.
Definition: MpiContainer.h:565
std::enable_if< std::is_scalar< T >::value, void >::type directReceive(T &t, int count, int from, int tag)
synchronously receive a list of scalars from another processor. if the send command has not been issu...
Definition: MpiContainer.h:369
std::enable_if< std::is_scalar< T >::value, void >::type broadcast(T *t, int size, int fromProcessor)
Broadcasts a scalar from the root to all other processors.
Definition: MpiContainer.h:442
void broadcast(T *t, MercuryMPIType type, int fromProcessor=0)
Broadcasts an MercuryMPIType to all other processors.
Definition: MpiContainer.h:454
std::enable_if< std::is_scalar< T >::value, void >::type broadcast(T &t, int fromProcessor=0)
Broadcasts a scalar from the root to all other processors.
Definition: MpiContainer.h:429
void sync()
Process all pending asynchronous communication requests before continuing.
Definition: MpiContainer.h:148
std::enable_if< std::is_scalar< T >::value, void >::type send(T &t, int to, int tag)
Asynchronously send a scalar to some other processor.
Definition: MpiContainer.h:167
void createMercuryMPIType(T t, MercuryMPIType type)
Get the communicator used for MPI commands.
Definition: MpiContainer.h:551
void receive(T *t, MercuryMPIType type, int count, int from, int tag)
asynchronously receive a list of MercuryMPIType objects from some other processor.
Definition: MpiContainer.h:289
std::enable_if< std::is_scalar< T >::value, void >::type receive(T *t, int count, int from, int tag)
Definition: MpiContainer.h:230
std::enable_if< std::is_scalar< T >::value, void >::type directSend(T &t, int count, int to, int tag)
synchronously send a list of scalars to another processor. the data should be received directly or th...
Definition: MpiContainer.h:319
std::size_t numberOfProcessors_
The total number of processors in the communicator.
Definition: MpiContainer.h:595
void send(T *t, MercuryMPIType type, int count, int to, int tag)
asynchronously send a list of MercuryMPITypes objects to some other processor.
Definition: MpiContainer.h:259
void directReceive(T *t, MercuryMPIType type, int count, int from, int tag)
Definition: MpiContainer.h:389