From 3c8f1877c9a476f4749701b58d2a6012f748ed35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joachim=20Sch=C3=B6berl?= Date: Wed, 5 Aug 2020 01:11:26 +0200 Subject: [PATCH] more mpi calls from ngcore --- libsrc/core/mpi_wrapper.hpp | 15 +++++++++++++++ libsrc/general/mpi_interface.hpp | 10 ++++++++-- libsrc/meshing/parallelmesh.cpp | 6 ++++-- libsrc/meshing/paralleltop.cpp | 5 +++-- 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/libsrc/core/mpi_wrapper.hpp b/libsrc/core/mpi_wrapper.hpp index f26e66ec..b2e32731 100644 --- a/libsrc/core/mpi_wrapper.hpp +++ b/libsrc/core/mpi_wrapper.hpp @@ -134,6 +134,10 @@ namespace ngcore void Send (T & val, int dest, int tag) const { MPI_Send (&val, 1, GetMPIType(), dest, tag, comm); } + + void Send (const std::string & s, int dest, int tag) const { + MPI_Send( const_cast (&s[0]), s.length(), MPI_CHAR, dest, tag, comm); + } template())> void Send(FlatArray s, int dest, int tag) const { @@ -145,6 +149,17 @@ namespace ngcore MPI_Recv (&val, 1, GetMPIType(), src, tag, comm, MPI_STATUS_IGNORE); } + void Recv (std::string & s, int src, int tag) const { + MPI_Status status; + int len; + MPI_Probe (src, tag, comm, &status); + MPI_Get_count (&status, MPI_CHAR, &len); + // s.assign (len, ' '); + s.resize (len); + MPI_Recv( &s[0], len, MPI_CHAR, src, tag, comm, MPI_STATUS_IGNORE); + } + + template ())> void Recv (FlatArray s, int src, int tag) const { MPI_Recv (s.Data(), s.Size(), GetMPIType (), src, tag, comm, MPI_STATUS_IGNORE); diff --git a/libsrc/general/mpi_interface.hpp b/libsrc/general/mpi_interface.hpp index 08e7e85a..82f40720 100644 --- a/libsrc/general/mpi_interface.hpp +++ b/libsrc/general/mpi_interface.hpp @@ -59,11 +59,13 @@ namespace netgen MPI_Recv( &i, 1, MPI_INT, src, tag, comm, &status); } + [[deprecated("mympi_send string, use comm.Send instead")]] inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm) { MPI_Send( const_cast (s.c_str()), s.length(), MPI_CHAR, dest, tag, comm); } + [[deprecated("mympi_revc string, use comm.Recv instead")]] inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm) { MPI_Status status; @@ -76,14 +78,15 @@ namespace netgen - template + [[deprecated("mympi_send ngflatarray, use comm.send instead")]] inline void MyMPI_Send (NgFlatArray s, int dest, int tag, MPI_Comm comm) { MPI_Send( &s.First(), s.Size(), GetMPIType(), dest, tag, comm); } template + [[deprecated("mympi_recv ngflatarray, use comm.Recv instead")]] inline void MyMPI_Recv ( NgFlatArray s, int src, int tag, MPI_Comm comm) { MPI_Status status; @@ -136,6 +139,7 @@ namespace netgen */ template + [[deprecated("mympi_isend ngflatarray, use comm.send instead")]] inline MPI_Request MyMPI_ISend (NgFlatArray s, int dest, int tag, MPI_Comm comm) { MPI_Request request; @@ -143,8 +147,8 @@ namespace netgen return request; } - template + [[deprecated("mympi_irecv ngflatarray, use comm.recv instead")]] inline MPI_Request MyMPI_IRecv (NgFlatArray s, int dest, int tag, MPI_Comm comm) { MPI_Request request; @@ -243,12 +247,14 @@ namespace netgen } template + [[deprecated("mympi_allgather deprecated, use comm.allgather")]] inline void MyMPI_Allgather (const T & send, NgFlatArray recv, MPI_Comm comm) { MPI_Allgather( const_cast (&send), 1, GetMPIType(), &recv[0], 1, GetMPIType(), comm); } template + [[deprecated("mympi_alltoall deprecated, use comm.alltoall")]] inline void MyMPI_Alltoall (NgFlatArray send, NgFlatArray recv, MPI_Comm comm) { MPI_Alltoall( &send[0], 1, GetMPIType(), &recv[0], 1, GetMPIType(), comm); diff --git a/libsrc/meshing/parallelmesh.cpp b/libsrc/meshing/parallelmesh.cpp index 7e7b7523..9af0d7a6 100644 --- a/libsrc/meshing/parallelmesh.cpp +++ b/libsrc/meshing/parallelmesh.cpp @@ -321,7 +321,8 @@ namespace netgen for (int dest = 1; dest < ntasks; dest++) { NgFlatArray verts = verts_of_proc[dest]; - sendrequests.Append (MyMPI_ISend (verts, dest, MPI_TAG_MESH+1, comm)); + // sendrequests.Append (MyMPI_ISend (verts, dest, MPI_TAG_MESH+1, comm)); + sendrequests.Append (comm.ISend (FlatArray(verts), dest, MPI_TAG_MESH+1)); MPI_Datatype mptype = MeshPoint::MyGetMPIType(); @@ -399,7 +400,8 @@ namespace netgen } Array req_per; for(int dest = 1; dest < ntasks; dest++) - req_per.Append(MyMPI_ISend(pp_data[dest], dest, MPI_TAG_MESH+1, comm)); + // req_per.Append(MyMPI_ISend(pp_data[dest], dest, MPI_TAG_MESH+1, comm)); + req_per.Append(comm.ISend(FlatArray(pp_data[dest]), dest, MPI_TAG_MESH+1)); MyMPI_WaitAll(req_per); PrintMessage ( 3, "Sending Vertices - distprocs"); diff --git a/libsrc/meshing/paralleltop.cpp b/libsrc/meshing/paralleltop.cpp index 06c08ab5..3fba9f87 100644 --- a/libsrc/meshing/paralleltop.cpp +++ b/libsrc/meshing/paralleltop.cpp @@ -122,7 +122,7 @@ namespace netgen *testout << "ParallelMeshTopology :: UpdateCoarseGridGlobal" << endl; const MeshTopology & topology = mesh.GetTopology(); - MPI_Comm comm = mesh.GetCommunicator(); + auto comm = mesh.GetCommunicator(); if ( id == 0 ) { @@ -160,7 +160,8 @@ namespace netgen Array sendrequests; for (int dest = 1; dest < ntasks; dest++) - sendrequests.Append (MyMPI_ISend (*sendarrays[dest], dest, MPI_TAG_MESH+10, comm)); + // sendrequests.Append (MyMPI_ISend (*sendarrays[dest], dest, MPI_TAG_MESH+10, comm)); + sendrequests.Append (comm.ISend (FlatArray(*sendarrays[dest]), dest, MPI_TAG_MESH+10)); MyMPI_WaitAll (sendrequests); for (int dest = 1; dest < ntasks; dest++)