Merge branch 'subcomm' into 'master'

sub-communicators as methods of NgMPI_Comm, using ngcore-arrays

See merge request jschoeberl/netgen!215
This commit is contained in:
Joachim Schöberl 2019-08-29 09:32:35 +00:00
commit fcea398d7c
3 changed files with 17 additions and 31 deletions

View File

@ -174,10 +174,10 @@ namespace ngcore
} }
template<typename T, typename T2 = decltype(GetMPIType<T>())> template<typename T, typename T2 = decltype(GetMPIType<T>())>
MPI_Request IRecv (T & val, int src, int tag) const MPI_Request IRecv (T & val, int dest, int tag) const
{ {
MPI_Request request; MPI_Request request;
MPI_Irecv (&val, 1, GetMPIType<T>(), src, tag, comm, &request); MPI_Irecv (&val, 1, GetMPIType<T>(), dest, tag, comm, &request);
return request; return request;
} }
@ -227,6 +227,16 @@ namespace ngcore
MPI_Bcast (&s[0], len, MPI_CHAR, root, comm); MPI_Bcast (&s[0], len, MPI_CHAR, root, comm);
} }
NgMPI_Comm SubCommunicator (FlatArray<int> procs) const
{
MPI_Comm subcomm;
MPI_Group gcomm, gsubcomm;
MPI_Comm_group(comm, &gcomm);
MPI_Group_incl(gcomm, procs.Size(), procs.Data(), &gsubcomm);
MPI_Comm_create_group(comm, gsubcomm, 4242, &subcomm);
return NgMPI_Comm(subcomm, true);
}
}; // class NgMPI_Comm }; // class NgMPI_Comm
NETGEN_INLINE void MyMPI_WaitAll (FlatArray<MPI_Request> requests) NETGEN_INLINE void MyMPI_WaitAll (FlatArray<MPI_Request> requests)
@ -313,15 +323,7 @@ namespace ngcore
#endif // PARALLEL #endif // PARALLEL
} // namespace ngcore
}
#endif // NGCORE_MPIWRAPPER_HPP #endif // NGCORE_MPIWRAPPER_HPP

View File

@ -94,21 +94,6 @@ namespace netgen
template <class T> inline MPI_Datatype MyGetMPIType ( ) { return 0; } template <class T> inline MPI_Datatype MyGetMPIType ( ) { return 0; }
#endif #endif
#ifdef PARALLEL
inline MPI_Comm MyMPI_SubCommunicator(MPI_Comm comm, NgArray<int> & procs)
{
MPI_Comm subcomm;
MPI_Group gcomm, gsubcomm;
MPI_Comm_group(comm, &gcomm);
MPI_Group_incl(gcomm, procs.Size(), &(procs[0]), &gsubcomm);
MPI_Comm_create_group(comm, gsubcomm, 6969, &subcomm);
return subcomm;
}
#else
inline MPI_Comm MyMPI_SubCommunicator(MPI_Comm comm, NgArray<int> & procs)
{ return comm; }
#endif
#ifdef PARALLEL #ifdef PARALLEL
enum { MPI_TAG_CMD = 110 }; enum { MPI_TAG_CMD = 110 };
enum { MPI_TAG_MESH = 210 }; enum { MPI_TAG_MESH = 210 };

View File

@ -88,13 +88,12 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
.def("Min", [](NgMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_MIN, c); }) .def("Min", [](NgMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_MIN, c); })
.def("Max", [](NgMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_MAX, c); }) .def("Max", [](NgMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_MAX, c); })
.def("SubComm", [](NgMPI_Comm & c, std::vector<int> proc_list) { .def("SubComm", [](NgMPI_Comm & c, std::vector<int> proc_list) {
NgArray<int> procs(proc_list.size()); Array<int> procs(proc_list.size());
for (int i = 0; i < procs.Size(); i++) for (int i = 0; i < procs.Size(); i++)
procs[i] = proc_list[i]; { procs[i] = proc_list[i]; }
if (!procs.Contains(c.Rank())) if (!procs.Contains(c.Rank()))
throw Exception("rank "+ToString(c.Rank())+" not in subcomm"); { throw Exception("rank "+ToString(c.Rank())+" not in subcomm"); }
MPI_Comm subcomm = MyMPI_SubCommunicator(c, procs); return c.SubCommunicator(procs);
return NgMPI_Comm(subcomm, true);
}, py::arg("procs")); }, py::arg("procs"));
; ;