From c074e0c752c60271a12387e976a312181dae16f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joachim=20Sch=C3=B6berl?= Date: Fri, 31 Jul 2020 09:57:19 +0200 Subject: [PATCH] reduce duplicated mpi-wrapping --- libsrc/core/mpi_wrapper.hpp | 8 ++ libsrc/core/ngcore.hpp | 2 +- libsrc/general/mpi_interface.hpp | 235 ++++++------------------------- libsrc/general/ngarray.hpp | 6 + libsrc/gprim/geomobjects.hpp | 2 +- libsrc/meshing/meshtype.cpp | 8 +- libsrc/meshing/parallelmesh.cpp | 8 +- libsrc/meshing/paralleltop.cpp | 121 ---------------- libsrc/meshing/python_mesh.cpp | 35 ++--- 9 files changed, 80 insertions(+), 345 deletions(-) diff --git a/libsrc/core/mpi_wrapper.hpp b/libsrc/core/mpi_wrapper.hpp index c1fc47dd..11af5f04 100644 --- a/libsrc/core/mpi_wrapper.hpp +++ b/libsrc/core/mpi_wrapper.hpp @@ -227,6 +227,14 @@ namespace ngcore MPI_Bcast (&s[0], len, MPI_CHAR, root, comm); } + template + void AllToAll (FlatArray send, FlatArray recv) const + { + MPI_Alltoall (send.Data(), 1, GetMPIType(), + recv.Data(), 1, GetMPIType(), comm); + } + + NgMPI_Comm SubCommunicator (FlatArray procs) const { MPI_Comm subcomm; diff --git a/libsrc/core/ngcore.hpp b/libsrc/core/ngcore.hpp index 91d65bde..72ebde25 100644 --- a/libsrc/core/ngcore.hpp +++ b/libsrc/core/ngcore.hpp @@ -6,6 +6,7 @@ #include "bitarray.hpp" #include "exception.hpp" #include "flags.hpp" +#include "table.hpp" #include "hashtable.hpp" #include "localheap.hpp" #include "logging.hpp" @@ -13,7 +14,6 @@ #include "profiler.hpp" #include "signal.hpp" #include "symboltable.hpp" -#include "table.hpp" #include "taskmanager.hpp" #include "version.hpp" #include "xbool.hpp" diff --git a/libsrc/general/mpi_interface.hpp b/libsrc/general/mpi_interface.hpp index b7de6d64..08e7e85a 100644 --- a/libsrc/general/mpi_interface.hpp +++ b/libsrc/general/mpi_interface.hpp @@ -14,65 +14,8 @@ namespace netgen { - // using ngcore::id; - // using ngcore::ntasks; - -#ifndef PARALLEL - /** without MPI, we need a dummy typedef **/ - // typedef int MPI_Comm; -#endif - - /** This is the "standard" communicator that will be used for netgen-objects. **/ - // extern DLL_HEADER NgMPI_Comm ng_comm; #ifdef OLD -#ifdef PARALLEL - inline int MyMPI_GetNTasks (MPI_Comm comm /* = ng_comm */) - { - int ntasks; - MPI_Comm_size(comm, &ntasks); - return ntasks; - } - inline int MyMPI_GetId (MPI_Comm comm /* = ng_comm */) - { - int id; - MPI_Comm_rank(comm, &id); - return id; - } -#else - // enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0}; - inline int MyMPI_GetNTasks (MPI_Comm comm /* = ng_comm */) { return 1; } - inline int MyMPI_GetId (MPI_Comm comm /* = ng_comm */) { return 0; } -#endif -#endif - - /* -#ifdef PARALLEL - // For python wrapping of communicators - struct PyMPI_Comm { - MPI_Comm comm; - bool owns_comm; - PyMPI_Comm (MPI_Comm _comm, bool _owns_comm = false) : comm(_comm), owns_comm(_owns_comm) { } - PyMPI_Comm (const PyMPI_Comm & c) = delete; - ~PyMPI_Comm () { - if (owns_comm) - MPI_Comm_free(&comm); - } - inline int Rank() const { return MyMPI_GetId(comm); } - inline int Size() const { return MyMPI_GetNTasks(comm); } - }; -#else - // dummy without MPI - struct PyMPI_Comm { - MPI_Comm comm = 0; - PyMPI_Comm (MPI_Comm _comm, bool _owns_comm = false) { } - ~PyMPI_Comm () { } - inline int Rank() const { return 0; } - inline int Size() const { return 1; } - }; -#endif - */ - #ifdef PARALLEL template inline MPI_Datatype MyGetMPIType ( ) @@ -93,32 +36,35 @@ namespace netgen typedef int MPI_Datatype; template inline MPI_Datatype MyGetMPIType ( ) { return 0; } #endif +#endif + #ifdef PARALLEL enum { MPI_TAG_CMD = 110 }; enum { MPI_TAG_MESH = 210 }; enum { MPI_TAG_VIS = 310 }; - inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm /* = ng_comm */) + + [[deprecated("mympi_send int, use comm.Send instead")]] + inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm) { int hi = i; MPI_Send( &hi, 1, MPI_INT, dest, tag, comm); } - - inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm /* = ng_comm */) + + [[deprecated("mympi_revc int, use comm.Recv instead")]] + inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm) { MPI_Status status; MPI_Recv( &i, 1, MPI_INT, src, tag, comm, &status); } - - - inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm /* = ng_comm */) + inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm) { MPI_Send( const_cast (s.c_str()), s.length(), MPI_CHAR, dest, tag, comm); } - inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm /* = ng_comm */) + inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm) { MPI_Status status; int len; @@ -132,32 +78,32 @@ namespace netgen template - inline void MyMPI_Send (NgFlatArray s, int dest, int tag, MPI_Comm comm /* = ng_comm */) + inline void MyMPI_Send (NgFlatArray s, int dest, int tag, MPI_Comm comm) { - MPI_Send( &s.First(), s.Size(), MyGetMPIType(), dest, tag, comm); + MPI_Send( &s.First(), s.Size(), GetMPIType(), dest, tag, comm); } template - inline void MyMPI_Recv ( NgFlatArray s, int src, int tag, MPI_Comm comm /* = ng_comm */) + inline void MyMPI_Recv ( NgFlatArray s, int src, int tag, MPI_Comm comm) { MPI_Status status; - MPI_Recv( &s.First(), s.Size(), MyGetMPIType(), src, tag, comm, &status); + MPI_Recv( &s.First(), s.Size(), GetMPIType(), src, tag, comm, &status); } template - inline void MyMPI_Recv ( NgArray & s, int src, int tag, MPI_Comm comm /* = ng_comm */) + inline void MyMPI_Recv ( NgArray & s, int src, int tag, MPI_Comm comm) { MPI_Status status; int len; MPI_Probe (src, tag, comm, &status); - MPI_Get_count (&status, MyGetMPIType(), &len); + MPI_Get_count (&status, GetMPIType(), &len); s.SetSize (len); - MPI_Recv( &s.First(), len, MyGetMPIType(), src, tag, comm, &status); + MPI_Recv( &s.First(), len, GetMPIType(), src, tag, comm, &status); } template - inline int MyMPI_Recv ( NgArray & s, int tag, MPI_Comm comm /* = ng_comm */) + inline int MyMPI_Recv ( NgArray & s, int tag, MPI_Comm comm) { MPI_Status status; int len; @@ -165,10 +111,10 @@ namespace netgen int src = status.MPI_SOURCE; - MPI_Get_count (&status, MyGetMPIType(), &len); + MPI_Get_count (&status, GetMPIType(), &len); s.SetSize (len); - MPI_Recv( &s.First(), len, MyGetMPIType(), src, tag, comm, &status); + MPI_Recv( &s.First(), len, GetMPIType(), src, tag, comm, &status); return src; } @@ -190,22 +136,22 @@ namespace netgen */ template - inline MPI_Request MyMPI_ISend (NgFlatArray s, int dest, int tag, MPI_Comm comm /* = ng_comm */) + inline MPI_Request MyMPI_ISend (NgFlatArray s, int dest, int tag, MPI_Comm comm) { MPI_Request request; - MPI_Isend( &s.First(), s.Size(), MyGetMPIType(), dest, tag, comm, &request); + MPI_Isend( &s.First(), s.Size(), GetMPIType(), dest, tag, comm, &request); return request; } template - inline MPI_Request MyMPI_IRecv (NgFlatArray s, int dest, int tag, MPI_Comm comm /* = ng_comm */) + inline MPI_Request MyMPI_IRecv (NgFlatArray s, int dest, int tag, MPI_Comm comm) { MPI_Request request; - MPI_Irecv( &s.First(), s.Size(), MyGetMPIType(), dest, tag, comm, &request); + MPI_Irecv( &s.First(), s.Size(), GetMPIType(), dest, tag, comm, &request); return request; } - + /* template inline void MyMPI_ISend (NgFlatArray s, int dest, int tag) @@ -232,106 +178,59 @@ namespace netgen receive-table entries will be set */ - /* template inline void MyMPI_ExchangeTable (TABLE & send_data, TABLE & recv_data, int tag, - MPI_Comm comm = MPI_COMM_WORLD) + const NgMPI_Comm & comm) { - int ntasks, rank; - MPI_Comm_size(comm, &ntasks); - MPI_Comm_rank(comm, &rank); - - NgArray requests; - for (int dest = 0; dest < ntasks; dest++) - if (dest != rank) - requests.Append (MyMPI_ISend (send_data[dest], dest, tag, comm)); - - for (int i = 0; i < ntasks-1; i++) - { - MPI_Status status; - MPI_Probe (MPI_ANY_SOURCE, tag, comm, &status); - int size, src = status.MPI_SOURCE; - MPI_Get_count (&status, MPI_INT, &size); - recv_data.SetEntrySize (src, size, sizeof(T)); - requests.Append (MyMPI_IRecv (recv_data[src], src, tag, comm)); - } - MPI_Barrier (comm); - MPI_Waitall (requests.Size(), &requests[0], MPI_STATUS_IGNORE); - } - */ - - template - inline void MyMPI_ExchangeTable (TABLE & send_data, - TABLE & recv_data, int tag, - const NgMPI_Comm & comm /* = ng_comm */) - { - /* - int rank = MyMPI_GetId(comm); - int ntasks = MyMPI_GetNTasks(comm); - */ int rank = comm.Rank(); int ntasks = comm.Size(); - NgArray send_sizes(ntasks); - NgArray recv_sizes(ntasks); + Array send_sizes(ntasks); + Array recv_sizes(ntasks); for (int i = 0; i < ntasks; i++) send_sizes[i] = send_data[i].Size(); + + comm.AllToAll (send_sizes, recv_sizes); - MPI_Alltoall (&send_sizes[0], 1, MPI_INT, - &recv_sizes[0], 1, MPI_INT, comm); - - // in-place is buggy ! -// MPI_Alltoall (MPI_IN_PLACE, 1, MPI_INT, -// &recv_sizes[0], 1, MPI_INT, comm); - - for (int i = 0; i < ntasks; i++) recv_data.SetEntrySize (i, recv_sizes[i], sizeof(T)); - NgArray requests; + Array requests; for (int dest = 0; dest < ntasks; dest++) if (dest != rank && send_data[dest].Size()) - requests.Append (MyMPI_ISend (send_data[dest], dest, tag, comm)); + requests.Append (comm.ISend (FlatArray(send_data[dest]), dest, tag)); for (int dest = 0; dest < ntasks; dest++) if (dest != rank && recv_data[dest].Size()) - requests.Append (MyMPI_IRecv (recv_data[dest], dest, tag, comm)); + requests.Append (comm.IRecv (FlatArray(recv_data[dest]), dest, tag)); - // MPI_Barrier (comm); - MPI_Waitall (requests.Size(), &requests[0], MPI_STATUS_IGNORE); + MyMPI_WaitAll (requests); } - - - - - extern void MyMPI_SendCmd (const char * cmd); extern string MyMPI_RecvCmd (); - - template - inline void MyMPI_Bcast (T & s, MPI_Comm comm /* = ng_comm */) + inline void MyMPI_Bcast (T & s, MPI_Comm comm) { - MPI_Bcast (&s, 1, MyGetMPIType(), 0, comm); + MPI_Bcast (&s, 1, GetMPIType(), 0, comm); } template - inline void MyMPI_Bcast (NgArray & s, NgMPI_Comm comm /* = ng_comm */) + inline void MyMPI_Bcast (NgArray & s, NgMPI_Comm comm) { int size = s.Size(); MyMPI_Bcast (size, comm); // if (MyMPI_GetId(comm) != 0) s.SetSize (size); if (comm.Rank() != 0) s.SetSize (size); - MPI_Bcast (&s[0], size, MyGetMPIType(), 0, comm); + MPI_Bcast (&s[0], size, GetMPIType(), 0, comm); } template - inline void MyMPI_Bcast (NgArray & s, int root, MPI_Comm comm /* = ng_comm */) + inline void MyMPI_Bcast (NgArray & s, int root, MPI_Comm comm) { int id; MPI_Comm_rank(comm, &id); @@ -340,67 +239,21 @@ namespace netgen MPI_Bcast (&size, 1, MPI_INT, root, comm); if (id != root) s.SetSize (size); if ( !size ) return; - MPI_Bcast (&s[0], size, MyGetMPIType(), root, comm); + MPI_Bcast (&s[0], size, GetMPIType(), root, comm); } template - inline void MyMPI_Allgather (const T & send, NgFlatArray recv, MPI_Comm comm /* = ng_comm */) + inline void MyMPI_Allgather (const T & send, NgFlatArray recv, MPI_Comm comm) { - MPI_Allgather( const_cast (&send), 1, MyGetMPIType(), &recv[0], 1, MyGetMPIType(), comm); + MPI_Allgather( const_cast (&send), 1, GetMPIType(), &recv[0], 1, GetMPIType(), comm); } template - inline void MyMPI_Alltoall (NgFlatArray send, NgFlatArray recv, MPI_Comm comm /* = ng_comm */) + inline void MyMPI_Alltoall (NgFlatArray send, NgFlatArray recv, MPI_Comm comm) { - MPI_Alltoall( &send[0], 1, MyGetMPIType(), &recv[0], 1, MyGetMPIType(), comm); + MPI_Alltoall( &send[0], 1, GetMPIType(), &recv[0], 1, GetMPIType(), comm); } -// template -// inline void MyMPI_Alltoall_Block (NgFlatArray send, NgFlatArray recv, int blocklen, MPI_Comm comm = ng_comm) -// { -// MPI_Alltoall( &send[0], blocklen, MyGetMPIType(), &recv[0], blocklen, MyGetMPIType(), comm); -// } - - - - /* - inline void MyMPI_Send ( int *& s, int len, int dest, int tag) - { - int hlen = len; - MPI_Send( &hlen, 1, MPI_INT, dest, tag, MPI_COMM_WORLD); - MPI_Send( s, len, MPI_INT, dest, tag, MPI_COMM_WORLD); - } - - - inline void MyMPI_Recv ( int *& s, int & len, int src, int tag) - { - MPI_Status status; - MPI_Recv( &len, 1, MPI_INT, src, tag, MPI_COMM_WORLD, &status); - if ( s ) - delete [] s; - s = new int [len]; - MPI_Recv( s, len, MPI_INT, src, tag, MPI_COMM_WORLD, &status); - } - - - - inline void MyMPI_Send ( double * s, int len, int dest, int tag) - { - MPI_Send( &len, 1, MPI_INT, dest, tag, MPI_COMM_WORLD); - MPI_Send( s, len, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD); - } - - - inline void MyMPI_Recv ( double *& s, int & len, int src, int tag) - { - MPI_Status status; - MPI_Recv( &len, 1, MPI_INT, src, tag, MPI_COMM_WORLD, &status); - if ( s ) - delete [] s; - s = new double [len]; - MPI_Recv( s, len, MPI_DOUBLE, src, tag, MPI_COMM_WORLD, &status); - } - */ #endif // PARALLEL diff --git a/libsrc/general/ngarray.hpp b/libsrc/general/ngarray.hpp index fa160a8b..05c773a3 100644 --- a/libsrc/general/ngarray.hpp +++ b/libsrc/general/ngarray.hpp @@ -205,6 +205,12 @@ namespace netgen { return ( Pos(elem) >= 0 ); } + + operator FlatArray () const + { + static_assert (BASE==0); + return FlatArray(size, data); + } }; diff --git a/libsrc/gprim/geomobjects.hpp b/libsrc/gprim/geomobjects.hpp index fe8171bf..48f74680 100644 --- a/libsrc/gprim/geomobjects.hpp +++ b/libsrc/gprim/geomobjects.hpp @@ -478,7 +478,7 @@ namespace netgen }; -#ifdef PARALLEL +#ifdef PARALLEL_OLD template <> inline MPI_Datatype MyGetMPIType > () { diff --git a/libsrc/meshing/meshtype.cpp b/libsrc/meshing/meshtype.cpp index f512c683..65e83408 100644 --- a/libsrc/meshing/meshtype.cpp +++ b/libsrc/meshing/meshtype.cpp @@ -30,14 +30,14 @@ namespace netgen (char*)&hp.layer - (char*)&hp, (char*)&hp.singular - (char*)&hp }; MPI_Datatype types[] = { MPI_DOUBLE, MPI_INT, MPI_DOUBLE }; - *testout << "displ = " << displ[0] << ", " << displ[1] << ", " << displ[2] << endl; - *testout << "sizeof = " << sizeof (MeshPoint) << endl; + // *testout << "displ = " << displ[0] << ", " << displ[1] << ", " << displ[2] << endl; + // *testout << "sizeof = " << sizeof (MeshPoint) << endl; MPI_Type_create_struct (3, blocklen, displ, types, &htype); MPI_Type_commit ( &htype ); MPI_Aint lb, ext; MPI_Type_get_extent (htype, &lb, &ext); - *testout << "lb = " << lb << endl; - *testout << "ext = " << ext << endl; + // *testout << "lb = " << lb << endl; + // *testout << "ext = " << ext << endl; ext = sizeof (MeshPoint); MPI_Type_create_resized (htype, lb, ext, &type); MPI_Type_commit ( &type ); diff --git a/libsrc/meshing/parallelmesh.cpp b/libsrc/meshing/parallelmesh.cpp index fac0b2ef..5c869aaa 100644 --- a/libsrc/meshing/parallelmesh.cpp +++ b/libsrc/meshing/parallelmesh.cpp @@ -25,13 +25,19 @@ namespace metis { using namespace metis; #endif +namespace ngcore { + template <> struct MPI_typetrait { + static MPI_Datatype MPIType () { return MPI_INT; } }; +} + namespace netgen { + /* template <> inline MPI_Datatype MyGetMPIType ( ) { return MPI_INT; } - + */ void Mesh :: SendRecvMesh () { diff --git a/libsrc/meshing/paralleltop.cpp b/libsrc/meshing/paralleltop.cpp index c410f425..9968ddd7 100644 --- a/libsrc/meshing/paralleltop.cpp +++ b/libsrc/meshing/paralleltop.cpp @@ -460,24 +460,6 @@ namespace netgen MyMPI_ExchangeTable (send_edges, recv_edges, MPI_TAG_MESH+9, MPI_LocalComm); // cout << "UpdateCoarseGrid - edges mpi-exchange done" << endl; - /* - for (int dest = 1; dest < ntasks; dest++) - { - auto ex2loc = dest2vert[dest-1]; - NgFlatArray recvarray = recv_edges[dest-1]; - for (int ii = 0; ii < recvarray.Size(); ii+=2) - for (int edge : dest2edge[dest-1]) - { - topology.GetEdgeVertices (edge, v1, v2); - INDEX_2 re(ex2loc[recvarray[ii]], - ex2loc[recvarray[ii+1]]); - INDEX_2 es(v1, v2); - if (es == re) - SetDistantEdgeNum(dest, edge); - } - } - */ - for (int dest = 1; dest < ntasks; dest++) { auto ex2loc = dest2vert[dest-1]; @@ -504,8 +486,6 @@ namespace netgen NgProfiler::StopTimer (timere); - // MPI_Barrier (MPI_LocalComm); - // cout << "UpdateCoarseGrid - faces" << endl; if (mesh.GetDimension() == 3) { @@ -543,13 +523,6 @@ namespace netgen for (int dest = 1; dest < ntasks; dest++) if (dest != id) { - /* - loc2exchange = -1; - int cnt = 0; - for (PointIndex pi : mesh.Points().Range()) - if (IsExchangeVert(dest, pi)) - loc2exchange[pi] = cnt++; - */ if (dest2vert[dest-1].Size() == 0) continue; loc2exchange = -1; @@ -575,29 +548,6 @@ namespace netgen TABLE recv_faces(ntasks-1); MyMPI_ExchangeTable (send_faces, recv_faces, MPI_TAG_MESH+9, MPI_LocalComm); // cout << "UpdateCoarseGrid - faces mpi-exchange done" << endl; - - /* - for (int dest = 1; dest < ntasks; dest++) - if (dest != id) - { - loc2exchange = -1; - int cnt = 0; - for (PointIndex pi : dest2vert[dest-1]) - loc2exchange[pi] = cnt++; - - NgFlatArray recvarray = recv_faces[dest-1]; - for (int ii = 0; ii < recvarray.Size(); ii+=3) - for (int face : dest2face[dest-1]) - { - topology.GetFaceVertices (face, verts); - INDEX_3 re(recvarray[ii], recvarray[ii+1], recvarray[ii+2]); - INDEX_3 es(loc2exchange[verts[0]], loc2exchange[verts[1]], loc2exchange[verts[2]]); - if (es == re) - SetDistantFaceNum(dest, face); - } - } - */ - for (int dest = 1; dest < ntasks; dest++) { @@ -622,77 +572,6 @@ namespace netgen } } - - - - - - /* - NgArray glob2loc; - - int maxface = 0; - for (int face = 1; face <= nfa; face++) - maxface = max (maxface, GetGlobalFaceNum (face)); - - // glob2loc.SetSize (nfaglob); - glob2loc.SetSize (maxface); - glob2loc = -1; - - for (int loc = 1; loc <= nfa; loc++) - glob2loc[GetGlobalFaceNum(loc)] = loc; - - cnt_send = 0; - NgArray verts; - for (int face = 1; face <= nfa; face++) - { - topology.GetFaceVertices (face, verts); - for (int dest = 1; dest < ntasks; dest++) - if (IsExchangeVert (dest, verts[0]) && - IsExchangeVert (dest, verts[1]) && - IsExchangeVert (dest, verts[2])) - { - cnt_send[dest-1]+=2; - } - } - - TABLE send_faces(cnt_send); - for (int face = 1; face <= nfa; face++) - { - topology.GetFaceVertices (face, verts); - for (int dest = 1; dest < ntasks; dest++) - { - if (IsExchangeVert (dest, verts[0]) && - IsExchangeVert (dest, verts[1]) && - IsExchangeVert (dest, verts[2])) - { - send_faces.Add (dest-1, GetGlobalFaceNum(face)); - send_faces.Add (dest-1, face); - } - } - } - TABLE recv_faces(ntasks-1); - MyMPI_ExchangeTable (send_faces, recv_faces, MPI_TAG_MESH+8, MPI_LocalComm); - - for (int sender = 1; sender < ntasks; sender ++) - if (id != sender) - { - NgFlatArray recvarray = recv_faces[sender-1]; - - for (int ii = 0; ii < recvarray.Size(); ) - { - int globf = recvarray[ii++]; - int distf = recvarray[ii++]; - - if (globf <= maxface) - { - int locf = glob2loc[globf]; - if (locf != -1) - SetDistantFaceNum (sender, locf); - } - } - } - */ - NgProfiler::StopTimer (timerf); } // cout << "UpdateCoarseGrid - done" << endl; diff --git a/libsrc/meshing/python_mesh.cpp b/libsrc/meshing/python_mesh.cpp index 47dc99c6..8f309d5e 100644 --- a/libsrc/meshing/python_mesh.cpp +++ b/libsrc/meshing/python_mesh.cpp @@ -69,23 +69,6 @@ namespace netgen extern bool netgen_executable_started; extern shared_ptr ng_geometry; extern void Optimize2d (Mesh & mesh, MeshingParameters & mp); - -#ifdef PARALLEL - /** we need allreduce in python-wrapped communicators **/ - template - inline T MyMPI_AllReduceNG (T d, const MPI_Op & op /* = MPI_SUM */, MPI_Comm comm) - { - T global_d; - MPI_Allreduce ( &d, &global_d, 1, MyGetMPIType(), op, comm); - return global_d; - } -#else - // enum { MPI_SUM = 0, MPI_MIN = 1, MPI_MAX = 2 }; - // typedef int MPI_Op; - template - inline T MyMPI_AllReduceNG (T d, const MPI_Op & op /* = MPI_SUM */, MPI_Comm comm) - { return d; } -#endif } @@ -140,15 +123,15 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m) #else .def("WTime", [](NgMPI_Comm & c) { return -1.0; }) #endif - .def("Sum", [](NgMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_SUM, c); }) - .def("Min", [](NgMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_MIN, c); }) - .def("Max", [](NgMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_MAX, c); }) - .def("Sum", [](NgMPI_Comm & c, int x) { return MyMPI_AllReduceNG(x, MPI_SUM, c); }) - .def("Min", [](NgMPI_Comm & c, int x) { return MyMPI_AllReduceNG(x, MPI_MIN, c); }) - .def("Max", [](NgMPI_Comm & c, int x) { return MyMPI_AllReduceNG(x, MPI_MAX, c); }) - .def("Sum", [](NgMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_SUM, c); }) - .def("Min", [](NgMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_MIN, c); }) - .def("Max", [](NgMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_MAX, c); }) + .def("Sum", [](NgMPI_Comm & c, double x) { return c.AllReduce(x, MPI_SUM); }) + .def("Min", [](NgMPI_Comm & c, double x) { return c.AllReduce(x, MPI_MIN); }) + .def("Max", [](NgMPI_Comm & c, double x) { return c.AllReduce(x, MPI_MAX); }) + .def("Sum", [](NgMPI_Comm & c, int x) { return c.AllReduce(x, MPI_SUM); }) + .def("Min", [](NgMPI_Comm & c, int x) { return c.AllReduce(x, MPI_MIN); }) + .def("Max", [](NgMPI_Comm & c, int x) { return c.AllReduce(x, MPI_MAX); }) + .def("Sum", [](NgMPI_Comm & c, size_t x) { return c.AllReduce(x, MPI_SUM); }) + .def("Min", [](NgMPI_Comm & c, size_t x) { return c.AllReduce(x, MPI_MIN); }) + .def("Max", [](NgMPI_Comm & c, size_t x) { return c.AllReduce(x, MPI_MAX); }) .def("SubComm", [](NgMPI_Comm & c, std::vector proc_list) { Array procs(proc_list.size()); for (int i = 0; i < procs.Size(); i++)