diff --git a/libsrc/core/mpi_wrapper.hpp b/libsrc/core/mpi_wrapper.hpp index b258eda4..7e55faf5 100644 --- a/libsrc/core/mpi_wrapper.hpp +++ b/libsrc/core/mpi_wrapper.hpp @@ -101,20 +101,52 @@ namespace ngcore operator MPI_Comm() const { return comm; } - int Rank() const { return rank; } // int r; MPI_Comm_rank(comm, &r); return r; } - int Size() const { return size; } // int s; MPI_Comm_size(comm, &s); return s; } + int Rank() const { return rank; } + int Size() const { return size; } + void Barrier() const { + if (size > 1) MPI_Barrier (comm); + } + template())> - void Send( T & val, int dest, int tag) { + void Send (T & val, int dest, int tag) const { MPI_Send (&val, 1, GetMPIType(), dest, tag, comm); } template())> - void MyMPI_Recv (T & val, int src, int tag) { + void MyMPI_Recv (T & val, int src, int tag) const { MPI_Recv (&val, 1, GetMPIType(), src, tag, comm, MPI_STATUS_IGNORE); } + + /** --- collectives --- **/ + + template ())> + T AllReduce (T d, const MPI_Op & op) const + { + if (size == 1) return d; + + T global_d; + MPI_Allreduce ( &d, &global_d, 1, GetMPIType(), op, comm); + return global_d; + } + + template ())> + void Bcast (T & s, int root = 0) const { + if (size == 1) return ; + MPI_Bcast (&s, 1, GetMPIType(), root, comm); + } + + void Bcast (std::string & s, int root = 0) const + { + if (size == 1) return; + int len = s.length(); + Bcast (len, root); + if (rank != 0) s.resize (len); + MPI_Bcast (&s[0], len, MPI_CHAR, root, comm); + } + }; @@ -138,14 +170,20 @@ namespace ngcore size_t Rank() const { return 0; } size_t Size() const { return 1; } - + void Barrier() const { ; } operator MPI_Comm() const { return MPI_Comm(); } template - void Send( T & val, int dest, int tag) { ; } + void Send( T & val, int dest, int tag) const { ; } template - void MyMPI_Recv (T & val, int src, int tag) { ; } + void MyMPI_Recv (T & val, int src, int tag) const { ; } + + template ())> + T AllReduce (T d, const MPI_Op & op) const { return d; } + + template ())> + INLINE void Bcast (T & s, int root = 0) const { ; } }; #endif diff --git a/libsrc/general/mpi_interface.hpp b/libsrc/general/mpi_interface.hpp index 15e13171..6dc3070f 100644 --- a/libsrc/general/mpi_interface.hpp +++ b/libsrc/general/mpi_interface.hpp @@ -14,8 +14,8 @@ namespace netgen { - using ngcore::id; - using ngcore::ntasks; + // using ngcore::id; + // using ngcore::ntasks; #ifndef PARALLEL /** without MPI, we need a dummy typedef **/ @@ -23,16 +23,17 @@ namespace netgen #endif /** This is the "standard" communicator that will be used for netgen-objects. **/ - extern DLL_HEADER NgMPI_Comm ng_comm; + // extern DLL_HEADER NgMPI_Comm ng_comm; +#ifdef OLD #ifdef PARALLEL - inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) + inline int MyMPI_GetNTasks (MPI_Comm comm /* = ng_comm */) { int ntasks; MPI_Comm_size(comm, &ntasks); return ntasks; } - inline int MyMPI_GetId (MPI_Comm comm = ng_comm) + inline int MyMPI_GetId (MPI_Comm comm /* = ng_comm */) { int id; MPI_Comm_rank(comm, &id); @@ -40,10 +41,11 @@ namespace netgen } #else // enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0}; - inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) { return 1; } - inline int MyMPI_GetId (MPI_Comm comm = ng_comm) { return 0; } + inline int MyMPI_GetNTasks (MPI_Comm comm /* = ng_comm */) { return 1; } + inline int MyMPI_GetId (MPI_Comm comm /* = ng_comm */) { return 0; } #endif - +#endif + /* #ifdef PARALLEL // For python wrapping of communicators @@ -112,13 +114,13 @@ namespace netgen enum { MPI_TAG_MESH = 210 }; enum { MPI_TAG_VIS = 310 }; - inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm = ng_comm) + inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm /* = ng_comm */) { int hi = i; MPI_Send( &hi, 1, MPI_INT, dest, tag, comm); } - inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm = ng_comm) + inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm /* = ng_comm */) { MPI_Status status; MPI_Recv( &i, 1, MPI_INT, src, tag, comm, &status); @@ -126,12 +128,12 @@ namespace netgen - inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm = ng_comm) + inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm /* = ng_comm */) { MPI_Send( const_cast (s.c_str()), s.length(), MPI_CHAR, dest, tag, comm); } - inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm = ng_comm) + inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm /* = ng_comm */) { MPI_Status status; int len; @@ -145,20 +147,20 @@ namespace netgen template - inline void MyMPI_Send (FlatArray s, int dest, int tag, MPI_Comm comm = ng_comm) + inline void MyMPI_Send (FlatArray s, int dest, int tag, MPI_Comm comm /* = ng_comm */) { MPI_Send( &s.First(), s.Size(), MyGetMPIType(), dest, tag, comm); } template - inline void MyMPI_Recv ( FlatArray s, int src, int tag, MPI_Comm comm = ng_comm) + inline void MyMPI_Recv ( FlatArray s, int src, int tag, MPI_Comm comm /* = ng_comm */) { MPI_Status status; MPI_Recv( &s.First(), s.Size(), MyGetMPIType(), src, tag, comm, &status); } template - inline void MyMPI_Recv ( Array & s, int src, int tag, MPI_Comm comm = ng_comm) + inline void MyMPI_Recv ( Array & s, int src, int tag, MPI_Comm comm /* = ng_comm */) { MPI_Status status; int len; @@ -170,7 +172,7 @@ namespace netgen } template - inline int MyMPI_Recv ( Array & s, int tag, MPI_Comm comm = ng_comm) + inline int MyMPI_Recv ( Array & s, int tag, MPI_Comm comm /* = ng_comm */) { MPI_Status status; int len; @@ -203,7 +205,7 @@ namespace netgen */ template - inline MPI_Request MyMPI_ISend (FlatArray s, int dest, int tag, MPI_Comm comm = ng_comm) + inline MPI_Request MyMPI_ISend (FlatArray s, int dest, int tag, MPI_Comm comm /* = ng_comm */) { MPI_Request request; MPI_Isend( &s.First(), s.Size(), MyGetMPIType(), dest, tag, comm, &request); @@ -212,7 +214,7 @@ namespace netgen template - inline MPI_Request MyMPI_IRecv (FlatArray s, int dest, int tag, MPI_Comm comm = ng_comm) + inline MPI_Request MyMPI_IRecv (FlatArray s, int dest, int tag, MPI_Comm comm /* = ng_comm */) { MPI_Request request; MPI_Irecv( &s.First(), s.Size(), MyGetMPIType(), dest, tag, comm, &request); @@ -277,11 +279,15 @@ namespace netgen template inline void MyMPI_ExchangeTable (TABLE & send_data, TABLE & recv_data, int tag, - MPI_Comm comm = ng_comm) + const NgMPI_Comm & comm /* = ng_comm */) { + /* int rank = MyMPI_GetId(comm); int ntasks = MyMPI_GetNTasks(comm); - + */ + int rank = comm.Rank(); + int ntasks = comm.Size(); + Array send_sizes(ntasks); Array recv_sizes(ntasks); for (int i = 0; i < ntasks; i++) @@ -324,22 +330,23 @@ namespace netgen template - inline void MyMPI_Bcast (T & s, MPI_Comm comm = ng_comm) + inline void MyMPI_Bcast (T & s, MPI_Comm comm /* = ng_comm */) { MPI_Bcast (&s, 1, MyGetMPIType(), 0, comm); } template - inline void MyMPI_Bcast (Array & s, MPI_Comm comm = ng_comm) + inline void MyMPI_Bcast (Array & s, NgMPI_Comm comm /* = ng_comm */) { int size = s.Size(); MyMPI_Bcast (size, comm); - if (MyMPI_GetId(comm) != 0) s.SetSize (size); + // if (MyMPI_GetId(comm) != 0) s.SetSize (size); + if (comm.Rank() != 0) s.SetSize (size); MPI_Bcast (&s[0], size, MyGetMPIType(), 0, comm); } template - inline void MyMPI_Bcast (Array & s, int root, MPI_Comm comm = ng_comm) + inline void MyMPI_Bcast (Array & s, int root, MPI_Comm comm /* = ng_comm */) { int id; MPI_Comm_rank(comm, &id); @@ -352,13 +359,13 @@ namespace netgen } template - inline void MyMPI_Allgather (const T & send, FlatArray recv, MPI_Comm comm = ng_comm) + inline void MyMPI_Allgather (const T & send, FlatArray recv, MPI_Comm comm /* = ng_comm */) { MPI_Allgather( const_cast (&send), 1, MyGetMPIType(), &recv[0], 1, MyGetMPIType(), comm); } template - inline void MyMPI_Alltoall (FlatArray send, FlatArray recv, MPI_Comm comm = ng_comm) + inline void MyMPI_Alltoall (FlatArray send, FlatArray recv, MPI_Comm comm /* = ng_comm */) { MPI_Alltoall( &send[0], 1, MyGetMPIType(), &recv[0], 1, MyGetMPIType(), comm); } diff --git a/libsrc/include/nginterface.h b/libsrc/include/nginterface.h index 70d440d8..6c0cd284 100644 --- a/libsrc/include/nginterface.h +++ b/libsrc/include/nginterface.h @@ -40,7 +40,7 @@ // #ifndef PARALLEL // typedef int MPI_Comm; // #endif -namespace netgen { extern DLL_HEADER ngcore::NgMPI_Comm ng_comm; } +// namespace netgen { extern DLL_HEADER ngcore::NgMPI_Comm ng_comm; } // implemented element types: @@ -66,7 +66,7 @@ extern "C" { DLL_HEADER void Ng_LoadGeometry (const char * filename); // load netgen mesh - DLL_HEADER void Ng_LoadMesh (const char * filename, ngcore::NgMPI_Comm comm = netgen::ng_comm); + DLL_HEADER void Ng_LoadMesh (const char * filename, ngcore::NgMPI_Comm comm = ngcore::NgMPI_Comm{}); // load netgen mesh DLL_HEADER void Ng_LoadMeshFromString (const char * mesh_as_string); diff --git a/libsrc/include/nginterface_v2.hpp b/libsrc/include/nginterface_v2.hpp index 6d1eb0b5..24b9b3e9 100644 --- a/libsrc/include/nginterface_v2.hpp +++ b/libsrc/include/nginterface_v2.hpp @@ -36,7 +36,7 @@ namespace netgen using namespace std; using namespace ngcore; - extern DLL_HEADER NgMPI_Comm ng_comm; + // extern DLL_HEADER NgMPI_Comm ng_comm; static constexpr int POINTINDEX_BASE = 1; @@ -265,11 +265,11 @@ namespace netgen /** reuse a netgen-mesh **/ Ngx_Mesh (shared_ptr amesh); /** load a new mesh **/ - Ngx_Mesh (string filename, MPI_Comm acomm = netgen::ng_comm); + Ngx_Mesh (string filename, NgMPI_Comm acomm = NgMPI_Comm{}); - void LoadMesh (const string & filename, MPI_Comm comm = netgen::ng_comm); + void LoadMesh (const string & filename, NgMPI_Comm comm = NgMPI_Comm{}); - void LoadMesh (istream & str, MPI_Comm comm = netgen::ng_comm); + void LoadMesh (istream & str, NgMPI_Comm comm = NgMPI_Comm{}); void SaveMesh (ostream & str) const; void UpdateTopology (); void DoArchive (Archive & archive); diff --git a/libsrc/interface/nginterface.cpp b/libsrc/interface/nginterface.cpp index 8abaa026..e23d2bf5 100644 --- a/libsrc/interface/nginterface.cpp +++ b/libsrc/interface/nginterface.cpp @@ -119,8 +119,8 @@ void Ng_LoadMeshFromStream ( istream & input ) void Ng_LoadMesh (const char * filename, ngcore::NgMPI_Comm comm) { - int id = MyMPI_GetId(comm); - int ntasks = MyMPI_GetNTasks(comm); + int id = comm.Rank(); + int ntasks = comm.Size(); { ifstream infile(filename); diff --git a/libsrc/interface/nginterface_v2.cpp b/libsrc/interface/nginterface_v2.cpp index e68393f1..5da74e58 100644 --- a/libsrc/interface/nginterface_v2.cpp +++ b/libsrc/interface/nginterface_v2.cpp @@ -33,17 +33,17 @@ namespace netgen Ngx_Mesh :: Ngx_Mesh (shared_ptr amesh) { mesh = amesh ? amesh : netgen::mesh; } - Ngx_Mesh :: Ngx_Mesh (string filename, MPI_Comm acomm) + Ngx_Mesh :: Ngx_Mesh (string filename, NgMPI_Comm acomm) { LoadMesh(filename, acomm); } - Ngx_Mesh * LoadMesh (const string & filename, MPI_Comm comm = netgen::ng_comm) + Ngx_Mesh * LoadMesh (const string & filename, NgMPI_Comm comm) { netgen::mesh.reset(); Ng_LoadMesh (filename.c_str(), comm); return new Ngx_Mesh (netgen::mesh); } - void Ngx_Mesh :: LoadMesh (const string & filename, MPI_Comm comm) + void Ngx_Mesh :: LoadMesh (const string & filename, NgMPI_Comm comm) { netgen::mesh.reset(); Ng_LoadMesh (filename.c_str(), comm); @@ -51,7 +51,7 @@ namespace netgen mesh = netgen::mesh; } - void Ngx_Mesh :: LoadMesh (istream & ist, MPI_Comm comm) + void Ngx_Mesh :: LoadMesh (istream & ist, NgMPI_Comm comm) { netgen::mesh = make_shared(); netgen::mesh->SetCommunicator(comm); diff --git a/libsrc/meshing/curvedelems.cpp b/libsrc/meshing/curvedelems.cpp index 182945d3..e694968c 100644 --- a/libsrc/meshing/curvedelems.cpp +++ b/libsrc/meshing/curvedelems.cpp @@ -553,18 +553,19 @@ namespace netgen order = 1; - MPI_Comm curve_comm; + // MPI_Comm curve_comm; + const auto & curve_comm = mesh.GetCommunicator(); #ifdef PARALLEL enum { MPI_TAG_CURVE = MPI_TAG_MESH+20 }; const ParallelMeshTopology & partop = mesh.GetParallelTopology (); - MPI_Comm_dup (mesh.GetCommunicator(), &curve_comm); + // MPI_Comm_dup (mesh.GetCommunicator(), &curve_comm); Array procs; #else - curve_comm = ng_comm; // dummy! + // curve_comm = mesh.GetCommunicator(); #endif - int rank = MyMPI_GetId(curve_comm); - int ntasks = MyMPI_GetNTasks(curve_comm); + int rank = curve_comm.Rank(); + int ntasks = curve_comm.Size(); if (working) order = aorder; @@ -656,8 +657,8 @@ namespace netgen } } - - MyMPI_ExchangeTable (send_orders, recv_orders, MPI_TAG_CURVE, curve_comm); + if (ntasks > 1) + MyMPI_ExchangeTable (send_orders, recv_orders, MPI_TAG_CURVE, curve_comm); if (ntasks > 1 && working) { @@ -1186,7 +1187,8 @@ namespace netgen } } - MyMPI_ExchangeTable (send_surfnr, recv_surfnr, MPI_TAG_CURVE, curve_comm); + if (ntasks > 1) + MyMPI_ExchangeTable (send_surfnr, recv_surfnr, MPI_TAG_CURVE, curve_comm); if (ntasks > 1 && working) { @@ -1369,8 +1371,8 @@ namespace netgen #ifdef PARALLEL - MPI_Barrier (curve_comm); - MPI_Comm_free (&curve_comm); + curve_comm.Barrier(); + // MPI_Comm_free (&curve_comm); #endif } diff --git a/libsrc/meshing/global.cpp b/libsrc/meshing/global.cpp index 3b2a1c5f..18092496 100644 --- a/libsrc/meshing/global.cpp +++ b/libsrc/meshing/global.cpp @@ -32,7 +32,7 @@ namespace netgen // TraceGlobal glob2("global2"); // global communicator for netgen - DLL_HEADER NgMPI_Comm ng_comm; + // DLL_HEADER NgMPI_Comm ng_comm; weak_ptr global_mesh; void SetGlobalMesh (shared_ptr m) diff --git a/libsrc/meshing/global.hpp b/libsrc/meshing/global.hpp index aa7f7cd2..b1da9344 100644 --- a/libsrc/meshing/global.hpp +++ b/libsrc/meshing/global.hpp @@ -61,7 +61,7 @@ namespace netgen DLL_HEADER void SetGlobalMesh (shared_ptr m); // global communicator for netgen (dummy if no MPI) - extern DLL_HEADER NgMPI_Comm ng_comm; + // extern DLL_HEADER NgMPI_Comm ng_comm; } diff --git a/libsrc/meshing/meshclass.cpp b/libsrc/meshing/meshclass.cpp index eeda22da..5ed9663e 100644 --- a/libsrc/meshing/meshclass.cpp +++ b/libsrc/meshing/meshclass.cpp @@ -43,7 +43,7 @@ namespace netgen bcnames.SetSize(0); cd2names.SetSize(0); - this->comm = netgen :: ng_comm; + // this->comm = netgen :: ng_comm; #ifdef PARALLEL paralleltop = new ParallelMeshTopology (*this); #endif @@ -1325,8 +1325,8 @@ namespace netgen if (archive.Input()) { - int rank = MyMPI_GetId(GetCommunicator()); - int ntasks = MyMPI_GetNTasks(GetCommunicator()); + int rank = GetCommunicator().Rank(); + int ntasks = GetCommunicator().Size(); RebuildSurfaceElementLists(); diff --git a/libsrc/meshing/parallelmesh.cpp b/libsrc/meshing/parallelmesh.cpp index e9c040ef..0eed1dfe 100644 --- a/libsrc/meshing/parallelmesh.cpp +++ b/libsrc/meshing/parallelmesh.cpp @@ -35,8 +35,8 @@ namespace netgen void Mesh :: SendRecvMesh () { - int id = MyMPI_GetId(GetCommunicator()); - int np = MyMPI_GetNTasks(GetCommunicator()); + int id = GetCommunicator().Rank(); + int np = GetCommunicator().Size(); if (np == 1) { throw NgException("SendRecvMesh called, but only one rank in communicator!!"); @@ -72,9 +72,9 @@ namespace netgen { Array sendrequests; - MPI_Comm comm = GetCommunicator(); - int id = MyMPI_GetId(comm); - int ntasks = MyMPI_GetNTasks(comm); + NgMPI_Comm comm = GetCommunicator(); + int id = comm.Rank(); + int ntasks = comm.Size(); int dim = GetDimension(); MyMPI_Bcast(dim, comm); @@ -751,8 +751,9 @@ namespace netgen int timer_sels = NgProfiler::CreateTimer ("Receive surface elements"); NgProfiler::RegionTimer reg(timer); - int id = MyMPI_GetId(GetCommunicator()); - int ntasks = MyMPI_GetNTasks(GetCommunicator()); + NgMPI_Comm comm = GetCommunicator(); + int id = comm.Rank(); + int ntasks = comm.Size(); int dim; MyMPI_Bcast(dim, comm); @@ -1011,9 +1012,9 @@ namespace netgen // call it only for the master ! void Mesh :: Distribute () { - MPI_Comm comm = GetCommunicator(); - int id = MyMPI_GetId(comm); - int ntasks = MyMPI_GetNTasks(comm); + NgMPI_Comm comm = GetCommunicator(); + int id = comm.Rank(); + int ntasks = comm.Size(); if (id != 0 || ntasks == 1 ) return; @@ -1072,7 +1073,7 @@ namespace netgen eptr.Append (eind.Size()); Array epart(ne), npart(nn); - idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1; + idxtype nparts = GetCommunicator().Size()-1; if (nparts == 1) { @@ -1293,9 +1294,9 @@ namespace netgen // call it only for the master ! void Mesh :: Distribute (Array & volume_weights , Array & surface_weights, Array & segment_weights) { - MPI_Comm comm = GetCommunicator(); - int id = MyMPI_GetId(comm); - int ntasks = MyMPI_GetNTasks(comm); + NgMPI_Comm comm = GetCommunicator(); + int id = comm.Rank(); + int ntasks = comm.Size(); if (id != 0 || ntasks == 1 ) return; @@ -1385,7 +1386,7 @@ namespace netgen eptr.Append (eind.Size()); Array epart(ne), npart(nn); - idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1; + idxtype nparts = GetCommunicator().Size()-1; if (nparts == 1) { diff --git a/libsrc/meshing/paralleltop.cpp b/libsrc/meshing/paralleltop.cpp index 346b5e25..997d5465 100644 --- a/libsrc/meshing/paralleltop.cpp +++ b/libsrc/meshing/paralleltop.cpp @@ -25,8 +25,9 @@ namespace netgen { *testout << "ParallelMeshTopology::Reset" << endl; - int id = MyMPI_GetId(mesh.GetCommunicator()); - int ntasks = MyMPI_GetNTasks(mesh.GetCommunicator()); + NgMPI_Comm comm = mesh.GetCommunicator(); + int id = comm.Rank(); + int ntasks = comm.Size(); if ( ntasks == 1 ) return; @@ -125,7 +126,8 @@ namespace netgen *testout << "ParallelMeshTopology :: UpdateCoarseGridGlobal" << endl; const MeshTopology & topology = mesh.GetTopology(); - + MPI_Comm comm = mesh.GetCommunicator(); + if ( id == 0 ) { Array*> sendarrays(ntasks); @@ -160,7 +162,7 @@ namespace netgen Array sendrequests; for (int dest = 1; dest < ntasks; dest++) - sendrequests.Append (MyMPI_ISend (*sendarrays[dest], dest, MPI_TAG_MESH+10)); + sendrequests.Append (MyMPI_ISend (*sendarrays[dest], dest, MPI_TAG_MESH+10, comm)); MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE); for (int dest = 1; dest < ntasks; dest++) @@ -171,7 +173,7 @@ namespace netgen { Array recvarray; - MyMPI_Recv (recvarray, 0, MPI_TAG_MESH+10); + MyMPI_Recv (recvarray, 0, MPI_TAG_MESH+10, comm); int ii = 0; @@ -209,10 +211,12 @@ namespace netgen // cout << "UpdateCoarseGrid" << endl; // if (is_updated) return; - MPI_Comm comm = mesh.GetCommunicator(); - int id = MyMPI_GetId(comm); - int ntasks = MyMPI_GetNTasks(comm); + NgMPI_Comm comm = mesh.GetCommunicator(); + int id = comm.Rank(); + int ntasks = comm.Size(); + if (ntasks == 1) return; + Reset(); static int timer = NgProfiler::CreateTimer ("UpdateCoarseGrid"); NgProfiler::RegionTimer reg(timer); diff --git a/libsrc/meshing/python_mesh.cpp b/libsrc/meshing/python_mesh.cpp index 3c15e3bd..bfc02341 100644 --- a/libsrc/meshing/python_mesh.cpp +++ b/libsrc/meshing/python_mesh.cpp @@ -20,7 +20,7 @@ namespace netgen #ifdef PARALLEL /** we need allreduce in python-wrapped communicators **/ template - inline T MyMPI_AllReduceNG (T d, const MPI_Op & op = MPI_SUM, MPI_Comm comm = ng_comm) + inline T MyMPI_AllReduceNG (T d, const MPI_Op & op /* = MPI_SUM */, MPI_Comm comm) { T global_d; MPI_Allreduce ( &d, &global_d, 1, MyGetMPIType(), op, comm); @@ -30,7 +30,7 @@ namespace netgen enum { MPI_SUM = 0, MPI_MIN = 1, MPI_MAX = 2 }; typedef int MPI_Op; template - inline T MyMPI_AllReduceNG (T d, const MPI_Op & op = MPI_SUM, MPI_Comm comm = ng_comm) + inline T MyMPI_AllReduceNG (T d, const MPI_Op & op /* = MPI_SUM */, MPI_Comm comm) { return d; } #endif } @@ -89,12 +89,11 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m) py::class_ (m, "MPI_Comm") .def_property_readonly ("rank", &NgMPI_Comm::Rank) .def_property_readonly ("size", &NgMPI_Comm::Size) - + .def("Barrier", &NgMPI_Comm::Barrier) + #ifdef PARALLEL - .def("Barrier", [](NgMPI_Comm & c) { MPI_Barrier(c); }) .def("WTime", [](NgMPI_Comm & c) { return MPI_Wtime(); }) #else - .def("Barrier", [](NgMPI_Comm & c) { }) .def("WTime", [](NgMPI_Comm & c) { return -1.0; }) #endif .def("Sum", [](NgMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_SUM, c); }) @@ -557,13 +556,13 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m) .def_property_readonly("_timestamp", &Mesh::GetTimeStamp) .def("Distribute", [](shared_ptr self, NgMPI_Comm comm) { self->SetCommunicator(comm); - if(MyMPI_GetNTasks(comm)==1) return self; + if(comm.Size()==1) return self; // if(MyMPI_GetNTasks(comm)==2) throw NgException("Sorry, cannot handle communicators with NP=2!"); // cout << " rank " << MyMPI_GetId(comm) << " of " << MyMPI_GetNTasks(comm) << " called Distribute " << endl; - if(MyMPI_GetId(comm)==0) self->Distribute(); + if(comm.Rank()==0) self->Distribute(); else self->SendRecvMesh(); return self; - }, py::arg("comm")=NgMPI_Comm(ng_comm)) + }, py::arg("comm")) .def("Receive", [](NgMPI_Comm comm) { auto mesh = make_shared(); mesh->SetCommunicator(comm); @@ -575,9 +574,9 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m) { istream * infile; - MPI_Comm comm = self.GetCommunicator(); - id = MyMPI_GetId(comm); - ntasks = MyMPI_GetNTasks(comm); + NgMPI_Comm comm = self.GetCommunicator(); + id = comm.Rank(); + ntasks = comm.Size(); #ifdef PARALLEL char* buf = nullptr; diff --git a/libsrc/visualization/vssolution.cpp b/libsrc/visualization/vssolution.cpp index 3b0b61cc..6e758687 100644 --- a/libsrc/visualization/vssolution.cpp +++ b/libsrc/visualization/vssolution.cpp @@ -2650,13 +2650,17 @@ namespace netgen static int timer2 = NgProfiler::CreateTimer ("getminmax, surf"); #ifdef PARALLEL - if (id == 0) + auto comm = mesh->GetCommunicator(); + if (comm.Size() > 1) { - MyMPI_SendCmd ("redraw"); - MyMPI_SendCmd ("getminmax"); + if (id == 0) + { + MyMPI_SendCmd ("redraw"); + MyMPI_SendCmd ("getminmax"); + } + MyMPI_Bcast (funcnr, mesh->GetCommunicator()); + MyMPI_Bcast (comp, mesh->GetCommunicator()); } - MyMPI_Bcast (funcnr); - MyMPI_Bcast (comp); #endif // double val; @@ -2744,11 +2748,14 @@ namespace netgen minv = 1e99; maxv = -1e99; } - double hmin, hmax; - MPI_Reduce (&minv, &hmin, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); - MPI_Reduce (&maxv, &hmax, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); - minv = hmin; - maxv = hmax; + if (ntasks > 1) + { + double hmin, hmax; + MPI_Reduce (&minv, &hmin, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); + MPI_Reduce (&maxv, &hmax, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); + minv = hmin; + maxv = hmax; + } #endif } diff --git a/ng/ngappinit.cpp b/ng/ngappinit.cpp index 8c90d069..a3db7789 100644 --- a/ng/ngappinit.cpp +++ b/ng/ngappinit.cpp @@ -77,7 +77,7 @@ int main(int argc, char ** argv) throw ngcore::Exception("Netgen GUI cannot run MPI-parallel"); // MPI_COMM_WORLD is just a local communicator - netgen::ng_comm = ngcore::NgMPI_Comm{MPI_COMM_WORLD, false}; + // netgen::ng_comm = ngcore::NgMPI_Comm{MPI_COMM_WORLD, false}; #endif diff --git a/ng/ngpkg.cpp b/ng/ngpkg.cpp index 5b67637a..bc047495 100644 --- a/ng/ngpkg.cpp +++ b/ng/ngpkg.cpp @@ -1219,7 +1219,7 @@ namespace netgen #ifdef PARALLEL MyMPI_SendCmd ("bcastparthread"); - MyMPI_Bcast (mparam.parthread); + MyMPI_Bcast (mparam.parthread, MPI_COMM_WORLD); #endif return TCL_OK; diff --git a/ng/parallelfunc.cpp b/ng/parallelfunc.cpp index ac95b226..6c7498ac 100644 --- a/ng/parallelfunc.cpp +++ b/ng/parallelfunc.cpp @@ -125,7 +125,7 @@ void ParallelRun() else if ( message == "bcastparthread" ) { - MyMPI_Bcast (mparam.parthread); + MyMPI_Bcast (mparam.parthread, MPI_COMM_WORLD); }