#ifndef NGCORE_MPIWRAPPER_HPP #define NGCORE_MPIWRAPPER_HPP #include #ifdef PARALLEL #define OMPI_SKIP_MPICXX #include #endif #include "array.hpp" #include "table.hpp" #include "exception.hpp" #include "profiler.hpp" #include "ngstream.hpp" namespace ngcore { #ifdef PARALLEL template struct MPI_typetrait { }; template <> struct MPI_typetrait { static MPI_Datatype MPIType () { return MPI_INT; } }; template <> struct MPI_typetrait { static MPI_Datatype MPIType () { return MPI_SHORT; } }; template <> struct MPI_typetrait { static MPI_Datatype MPIType () { return MPI_CHAR; } }; template <> struct MPI_typetrait { static MPI_Datatype MPIType () { return MPI_CHAR; } }; template <> struct MPI_typetrait { static MPI_Datatype MPIType () { return MPI_CHAR; } }; template <> struct MPI_typetrait { static MPI_Datatype MPIType () { return MPI_UINT64_T; } }; template <> struct MPI_typetrait { static MPI_Datatype MPIType () { return MPI_DOUBLE; } }; template <> struct MPI_typetrait { static MPI_Datatype MPIType () { return MPI_C_BOOL; } }; template struct MPI_typetrait> { static MPI_Datatype MPIType () { static MPI_Datatype MPI_T = 0; if (!MPI_T) { MPI_Type_contiguous ( S, MPI_typetrait::MPIType(), &MPI_T); MPI_Type_commit ( &MPI_T ); } return MPI_T; } }; template ::MPIType())> inline MPI_Datatype GetMPIType () { return MPI_typetrait::MPIType(); } template inline MPI_Datatype GetMPIType (T &) { return GetMPIType(); } inline void MyMPI_WaitAll (FlatArray requests) { static Timer t("MPI - WaitAll"); RegionTimer reg(t); if (!requests.Size()) return; MPI_Waitall (requests.Size(), requests.Data(), MPI_STATUSES_IGNORE); } inline int MyMPI_WaitAny (FlatArray requests) { int nr; MPI_Waitany (requests.Size(), requests.Data(), &nr, MPI_STATUS_IGNORE); return nr; } class NgMPI_Comm { protected: MPI_Comm comm; bool valid_comm; int * refcount; int rank, size; public: NgMPI_Comm () : valid_comm(false), refcount(nullptr), rank(0), size(1) { ; } NgMPI_Comm (MPI_Comm _comm, bool owns = false) : comm(_comm), valid_comm(true) { int flag; MPI_Initialized (&flag); if (!flag) { valid_comm = false; refcount = nullptr; rank = 0; size = 1; return; } if (!owns) refcount = nullptr; else refcount = new int{1}; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &size); } NgMPI_Comm (const NgMPI_Comm & c) : comm(c.comm), valid_comm(c.valid_comm), refcount(c.refcount), rank(c.rank), size(c.size) { if (refcount) (*refcount)++; } NgMPI_Comm (NgMPI_Comm && c) : comm(c.comm), valid_comm(c.valid_comm), refcount(c.refcount), rank(c.rank), size(c.size) { c.refcount = nullptr; } ~NgMPI_Comm() { if (refcount) if (--(*refcount) == 0) MPI_Comm_free(&comm); } bool ValidCommunicator() const { return valid_comm; } NgMPI_Comm & operator= (const NgMPI_Comm & c) { if (refcount) if (--(*refcount) == 0) MPI_Comm_free(&comm); refcount = c.refcount; if (refcount) (*refcount)++; comm = c.comm; valid_comm = c.valid_comm; size = c.size; rank = c.rank; return *this; } class InvalidCommException : public Exception { public: InvalidCommException() : Exception("Do not have a valid communicator") { ; } }; operator MPI_Comm() const { if (!valid_comm) throw InvalidCommException(); return comm; } int Rank() const { return rank; } int Size() const { return size; } void Barrier() const { static Timer t("MPI - Barrier"); RegionTimer reg(t); if (size > 1) MPI_Barrier (comm); } /** --- blocking P2P --- **/ template())> void Send (T & val, int dest, int tag) const { MPI_Send (&val, 1, GetMPIType(), dest, tag, comm); } void Send (const std::string & s, int dest, int tag) const { MPI_Send( const_cast (&s[0]), s.length(), MPI_CHAR, dest, tag, comm); } template())> void Send(FlatArray s, int dest, int tag) const { MPI_Send (s.Data(), s.Size(), GetMPIType(), dest, tag, comm); } template())> void Recv (T & val, int src, int tag) const { MPI_Recv (&val, 1, GetMPIType(), src, tag, comm, MPI_STATUS_IGNORE); } void Recv (std::string & s, int src, int tag) const { MPI_Status status; int len; MPI_Probe (src, tag, comm, &status); MPI_Get_count (&status, MPI_CHAR, &len); // s.assign (len, ' '); s.resize (len); MPI_Recv( &s[0], len, MPI_CHAR, src, tag, comm, MPI_STATUS_IGNORE); } template ())> void Recv (FlatArray s, int src, int tag) const { MPI_Recv (s.Data(), s.Size(), GetMPIType (), src, tag, comm, MPI_STATUS_IGNORE); } template ())> void Recv (Array & s, int src, int tag) const { MPI_Status status; int len; const MPI_Datatype MPI_T = GetMPIType (); MPI_Probe (src, tag, comm, &status); MPI_Get_count (&status, MPI_T, &len); s.SetSize (len); MPI_Recv (s.Data(), len, MPI_T, src, tag, comm, MPI_STATUS_IGNORE); } /** --- non-blocking P2P --- **/ template())> MPI_Request ISend (T & val, int dest, int tag) const { MPI_Request request; MPI_Isend (&val, 1, GetMPIType(), dest, tag, comm, &request); return request; } template())> MPI_Request ISend (FlatArray s, int dest, int tag) const { MPI_Request request; MPI_Isend (s.Data(), s.Size(), GetMPIType(), dest, tag, comm, &request); return request; } template())> MPI_Request IRecv (T & val, int dest, int tag) const { MPI_Request request; MPI_Irecv (&val, 1, GetMPIType(), dest, tag, comm, &request); return request; } template())> MPI_Request IRecv (FlatArray s, int src, int tag) const { MPI_Request request; MPI_Irecv (s.Data(), s.Size(), GetMPIType(), src, tag, comm, &request); return request; } /** --- collectives --- **/ template ())> T Reduce (T d, const MPI_Op & op, int root = 0) const { static Timer t("MPI - Reduce"); RegionTimer reg(t); if (size == 1) return d; T global_d; MPI_Reduce (&d, &global_d, 1, GetMPIType(), op, root, comm); return global_d; } template ())> T AllReduce (T d, const MPI_Op & op) const { static Timer t("MPI - AllReduce"); RegionTimer reg(t); if (size == 1) return d; T global_d; MPI_Allreduce ( &d, &global_d, 1, GetMPIType(), op, comm); return global_d; } template ())> void AllReduce (FlatArray d, const MPI_Op & op) const { static Timer t("MPI - AllReduce Array"); RegionTimer reg(t); if (size == 1) return; MPI_Allreduce (MPI_IN_PLACE, d.Data(), d.Size(), GetMPIType(), op, comm); } template ())> void Bcast (T & s, int root = 0) const { if (size == 1) return; static Timer t("MPI - Bcast"); RegionTimer reg(t); MPI_Bcast (&s, 1, GetMPIType(), root, comm); } template void Bcast (Array & d, int root = 0) { if (size == 1) return; int ds = d.Size(); Bcast (ds, root); if (Rank() != root) d.SetSize (ds); if (ds != 0) MPI_Bcast (d.Data(), ds, GetMPIType(), root, comm); } void Bcast (std::string & s, int root = 0) const { if (size == 1) return; int len = s.length(); Bcast (len, root); if (rank != 0) s.resize (len); MPI_Bcast (&s[0], len, MPI_CHAR, root, comm); } template void AllToAll (FlatArray send, FlatArray recv) const { MPI_Alltoall (send.Data(), 1, GetMPIType(), recv.Data(), 1, GetMPIType(), comm); } template void ScatterRoot (FlatArray send) const { if (size == 1) return; MPI_Scatter (send.Data(), 1, GetMPIType(), MPI_IN_PLACE, -1, GetMPIType(), 0, comm); } template void Scatter (T & recv) const { if (size == 1) return; MPI_Scatter (NULL, 0, GetMPIType(), &recv, 1, GetMPIType(), 0, comm); } template void GatherRoot (FlatArray recv) const { recv[0] = T(0); if (size == 1) return; MPI_Gather (MPI_IN_PLACE, 1, GetMPIType(), recv.Data(), 1, GetMPIType(), 0, comm); } template void Gather (T send) const { if (size == 1) return; MPI_Gather (&send, 1, GetMPIType(), NULL, 1, GetMPIType(), 0, comm); } template void AllGather (T val, FlatArray recv) const { if (size == 1) { recv[0] = val; return; } MPI_Allgather (&val, 1, GetMPIType(), recv.Data(), 1, GetMPIType(), comm); } template void ExchangeTable (DynamicTable & send_data, DynamicTable & recv_data, int tag) { Array send_sizes(size); Array recv_sizes(size); for (int i = 0; i < size; i++) send_sizes[i] = send_data[i].Size(); AllToAll (send_sizes, recv_sizes); recv_data = DynamicTable (recv_sizes, true); Array requests; for (int dest = 0; dest < size; dest++) if (dest != rank && send_data[dest].Size()) requests.Append (ISend (FlatArray(send_data[dest]), dest, tag)); for (int dest = 0; dest < size; dest++) if (dest != rank && recv_data[dest].Size()) requests.Append (IRecv (FlatArray(recv_data[dest]), dest, tag)); MyMPI_WaitAll (requests); } NgMPI_Comm SubCommunicator (FlatArray procs) const { MPI_Comm subcomm; MPI_Group gcomm, gsubcomm; MPI_Comm_group(comm, &gcomm); MPI_Group_incl(gcomm, procs.Size(), procs.Data(), &gsubcomm); MPI_Comm_create_group(comm, gsubcomm, 4242, &subcomm); return NgMPI_Comm(subcomm, true); } }; // class NgMPI_Comm class MyMPI { bool initialized_by_me; public: MyMPI(int argc, char ** argv) { int is_init = -1; MPI_Initialized(&is_init); if (!is_init) { MPI_Init (&argc, &argv); initialized_by_me = true; } else initialized_by_me = false; NgMPI_Comm comm(MPI_COMM_WORLD); NGSOStream::SetGlobalActive (comm.Rank() == 0); if (comm.Size() > 1) TaskManager::SetNumThreads (1); } ~MyMPI() { if (initialized_by_me) MPI_Finalize (); } }; #else // PARALLEL class MPI_Comm { int nr; public: MPI_Comm (int _nr = 0) : nr(_nr) { ; } operator int() const { return nr; } bool operator== (MPI_Comm c2) const { return nr == c2.nr; } }; static MPI_Comm MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 10000; typedef int MPI_Op; typedef int MPI_Datatype; typedef int MPI_Request; enum { MPI_SUM = 0, MPI_MIN = 1, MPI_MAX = 2, MPI_LOR = 4711 }; inline void MPI_Type_contiguous ( int, MPI_Datatype, MPI_Datatype*) { ; } inline void MPI_Type_commit ( MPI_Datatype * ) { ; } template struct MPI_typetrait { static MPI_Datatype MPIType () { return -1; } }; template inline MPI_Datatype GetMPIType () { return -1; } class NgMPI_Comm { public: NgMPI_Comm () { ; } NgMPI_Comm (MPI_Comm _comm, bool owns = false) { ; } size_t Rank() const { return 0; } size_t Size() const { return 1; } bool ValidCommunicator() const { return false; } void Barrier() const { ; } operator MPI_Comm() const { return MPI_Comm(); } template void Send( T & val, int dest, int tag) const { ; } template void Send(FlatArray s, int dest, int tag) const { ; } template void Recv (T & val, int src, int tag) const { ; } template void Recv (FlatArray s, int src, int tag) const { ; } template void Recv (Array & s, int src, int tag) const { ; } template MPI_Request ISend (T & val, int dest, int tag) const { return 0; } template MPI_Request ISend (FlatArray s, int dest, int tag) const { return 0; } template MPI_Request IRecv (T & val, int dest, int tag) const { return 0; } template MPI_Request IRecv (FlatArray s, int src, int tag) const { return 0; } template T Reduce (T d, const MPI_Op & op, int root = 0) const { return d; } template T AllReduce (T d, const MPI_Op & op) const { return d; } template void AllReduce (FlatArray d, const MPI_Op & op) const { ; } template void Bcast (T & s, int root = 0) const { ; } template void Bcast (Array & d, int root = 0) { ; } template void AllGather (T val, FlatArray recv) const { recv[0] = val; } template void ExchangeTable (DynamicTable & send_data, DynamicTable & recv_data, int tag) { ; } NgMPI_Comm SubCommunicator (FlatArray procs) const { return *this; } }; inline void MyMPI_WaitAll (FlatArray requests) { ; } inline int MyMPI_WaitAny (FlatArray requests) { return 0; } class MyMPI { public: MyMPI(int argc, char ** argv) { ; } }; #endif // PARALLEL } // namespace ngcore #endif // NGCORE_MPIWRAPPER_HPP