diff --git a/libsrc/core/mpi_wrapper.hpp b/libsrc/core/mpi_wrapper.hpp index 058dca70..434e2ce5 100644 --- a/libsrc/core/mpi_wrapper.hpp +++ b/libsrc/core/mpi_wrapper.hpp @@ -125,14 +125,15 @@ namespace ngcore } }; - + [[deprecated("use requests.WaitAll instread")]] inline void MyMPI_WaitAll (FlatArray requests) { static Timer t("MPI - WaitAll"); RegionTimer reg(t); if (!requests.Size()) return; NG_MPI_Waitall (requests.Size(), requests.Data(), NG_MPI_STATUSES_IGNORE); } - + + [[deprecated("use requests.WaitAny instread")]] inline int MyMPI_WaitAny (FlatArray requests) { int nr; @@ -403,7 +404,7 @@ namespace ngcore } template - NgMPI_Request IBcast (FlatArray d, int root = 0) const + [[nodiscard]] NgMPI_Request IBcast (FlatArray d, int root = 0) const { NG_MPI_Request request; int ds = d.Size(); @@ -416,7 +417,7 @@ namespace ngcore void AllToAll (FlatArray send, FlatArray recv) const { NG_MPI_Alltoall (send.Data(), 1, GetMPIType(), - recv.Data(), 1, GetMPIType(), comm); + recv.Data(), 1, GetMPIType(), comm); } @@ -425,7 +426,7 @@ namespace ngcore { if (size == 1) return; NG_MPI_Scatter (send.Data(), 1, GetMPIType(), - NG_MPI_IN_PLACE, -1, GetMPIType(), 0, comm); + NG_MPI_IN_PLACE, -1, GetMPIType(), 0, comm); } template @@ -433,7 +434,7 @@ namespace ngcore { if (size == 1) return; NG_MPI_Scatter (NULL, 0, GetMPIType(), - &recv, 1, GetMPIType(), 0, comm); + &recv, 1, GetMPIType(), 0, comm); } template @@ -442,7 +443,7 @@ namespace ngcore recv[0] = T(0); if (size == 1) return; NG_MPI_Gather (NG_MPI_IN_PLACE, 1, GetMPIType(), - recv.Data(), 1, GetMPIType(), 0, comm); + recv.Data(), 1, GetMPIType(), 0, comm); } template @@ -483,16 +484,16 @@ namespace ngcore recv_data = DynamicTable (recv_sizes, true); - Array requests; + NgMPI_Requests requests; for (int dest = 0; dest < size; dest++) if (dest != rank && send_data[dest].Size()) - requests.Append (ISend (FlatArray(send_data[dest]), dest, tag)); + requests += ISend (FlatArray(send_data[dest]), dest, tag); for (int dest = 0; dest < size; dest++) if (dest != rank && recv_data[dest].Size()) - requests.Append (IRecv (FlatArray(recv_data[dest]), dest, tag)); + requests += IRecv (FlatArray(recv_data[dest]), dest, tag); - MyMPI_WaitAll (requests); + requests.WaitAll(); } diff --git a/libsrc/meshing/parallelmesh.cpp b/libsrc/meshing/parallelmesh.cpp index a1c9065b..3e3ac61d 100644 --- a/libsrc/meshing/parallelmesh.cpp +++ b/libsrc/meshing/parallelmesh.cpp @@ -222,8 +222,8 @@ namespace netgen int dim = GetDimension(); comm.Bcast(dim); - Array sendrequests(8*(ntasks-1)); - sendrequests.SetSize0(); + NgMPI_Requests sendrequests; // (8*(ntasks-1)); + // sendrequests.SetSize0(); // If the topology is not already updated, we do not need to // build edges/faces. @@ -457,7 +457,7 @@ namespace netgen { NgFlatArray verts = verts_of_proc[dest]; // sendrequests.Append (MyMPI_ISend (verts, dest, NG_MPI_TAG_MESH+1, comm)); - sendrequests.Append (comm.ISend (FlatArray(verts), dest, NG_MPI_TAG_MESH+1)); + sendrequests += comm.ISend (FlatArray(verts), dest, NG_MPI_TAG_MESH+1); NG_MPI_Datatype mptype = MeshPoint::MyGetMPIType(); @@ -473,7 +473,7 @@ namespace netgen NG_MPI_Request request; NG_MPI_Isend( points.Data(), 1, point_types[dest-1], dest, NG_MPI_TAG_MESH+1, comm, &request); - sendrequests.Append (request); + sendrequests += request; } @@ -533,11 +533,11 @@ namespace netgen } } } - Array req_per; + NgMPI_Requests req_per; for(int dest = 1; dest < ntasks; dest++) // req_per.Append(MyMPI_ISend(pp_data[dest], dest, NG_MPI_TAG_MESH+1, comm)); - req_per.Append(comm.ISend(FlatArray(pp_data[dest]), dest, NG_MPI_TAG_MESH+1)); - MyMPI_WaitAll(req_per); + req_per += comm.ISend(FlatArray(pp_data[dest]), dest, NG_MPI_TAG_MESH+1); + req_per.WaitAll(); PrintMessage ( 3, "Sending Vertices - distprocs"); @@ -570,7 +570,7 @@ namespace netgen tbuilddistpnums.Stop(); for ( int dest = 1; dest < ntasks; dest ++ ) - sendrequests.Append (comm.ISend (distpnums[dest], dest, NG_MPI_TAG_MESH+1)); + sendrequests += comm.ISend (distpnums[dest], dest, NG_MPI_TAG_MESH+1); @@ -604,7 +604,7 @@ namespace netgen tbuildelementtable.Stop(); for (int dest = 1; dest < ntasks; dest ++ ) - sendrequests.Append (comm.ISend (elementarrays[dest], dest, NG_MPI_TAG_MESH+2)); + sendrequests += comm.ISend (elementarrays[dest], dest, NG_MPI_TAG_MESH+2); PrintMessage ( 3, "Sending Face Descriptors" ); @@ -621,7 +621,7 @@ namespace netgen } for (int dest = 1; dest < ntasks; dest++) - sendrequests.Append (comm.ISend (fddata, dest, NG_MPI_TAG_MESH+3)); + sendrequests += comm.ISend (fddata, dest, NG_MPI_TAG_MESH+3); /** Surface Elements **/ @@ -697,7 +697,7 @@ namespace netgen }); // distribute sel data for (int dest = 1; dest < ntasks; dest++) - sendrequests.Append (comm.ISend(selbuf[dest], dest, NG_MPI_TAG_MESH+4)); + sendrequests += comm.ISend(selbuf[dest], dest, NG_MPI_TAG_MESH+4); /** Segments **/ @@ -849,7 +849,7 @@ namespace netgen }); // distribute segment data for (int dest = 1; dest < ntasks; dest++) - sendrequests.Append (comm.ISend(segm_buf[dest], dest, NG_MPI_TAG_MESH+5)); + sendrequests += comm.ISend(segm_buf[dest], dest, NG_MPI_TAG_MESH+5); /** Point-Elements **/ PrintMessage ( 3, "Point-Elements ..."); @@ -870,11 +870,11 @@ namespace netgen iterate_zdes([&](const auto & pack, auto dest) { zde_buf.Add(dest, pack); }); for (int dest = 1; dest < ntasks; dest++) - { sendrequests.Append (comm.ISend(zde_buf[dest], dest, NG_MPI_TAG_MESH+6)); } + sendrequests += comm.ISend(zde_buf[dest], dest, NG_MPI_TAG_MESH+6); PrintMessage ( 3, "now wait ..."); - MyMPI_WaitAll (sendrequests); + sendrequests.WaitAll(); // clean up MPI-datatypes we allocated earlier for (auto t : point_types) @@ -885,20 +885,6 @@ namespace netgen paralleltop -> EnumeratePointsGlobally(); PrintMessage ( 3, "Sending names"); -#ifdef OLD - sendrequests.SetSize(3*ntasks); - /** Send bc/mat/cd*-names **/ - // nr of names - ArrayMem nnames{0,0,0,0}; - nnames[0] = materials.Size(); - nnames[1] = bcnames.Size(); - nnames[2] = GetNCD2Names(); - nnames[3] = GetNCD3Names(); - int tot_nn = nnames[0] + nnames[1] + nnames[2] + nnames[3]; - for( int k = 1; k < ntasks; k++) - sendrequests[k] = comm.ISend(nnames, k, NG_MPI_TAG_MESH+7); -#endif - // sendrequests.SetSize(3); /** Send bc/mat/cd*-names **/ // nr of names std::array nnames{0,0,0,0}; @@ -907,12 +893,10 @@ namespace netgen nnames[2] = GetNCD2Names(); nnames[3] = GetNCD3Names(); int tot_nn = nnames[0] + nnames[1] + nnames[2] + nnames[3]; - // sendrequests[0] = comm.IBcast (nnames); NgMPI_Requests requ; requ += comm.IBcast (nnames); - // (void) NG_MPI_Isend(nnames, 4, NG_MPI_INT, k, NG_MPI_TAG_MESH+6, comm, &sendrequests[k]); auto iterate_names = [&](auto func) { for (int k = 0; k < nnames[0]; k++) func(materials[k]); for (int k = 0; k < nnames[1]; k++) func(bcnames[k]); @@ -923,11 +907,7 @@ namespace netgen Array name_sizes(tot_nn); tot_nn = 0; iterate_names([&](auto ptr) { name_sizes[tot_nn++] = (ptr==NULL) ? 0 : ptr->size(); }); - /* - for( int k = 1; k < ntasks; k++) - (void) NG_MPI_Isend(&name_sizes[0], tot_nn, NG_MPI_INT, k, NG_MPI_TAG_MESH+7, comm, &sendrequests[k]); - */ - // sendrequests[1] = comm.IBcast (name_sizes); + requ += comm.IBcast (name_sizes); // names int strs = 0; @@ -940,16 +920,10 @@ namespace netgen for (int j=0; j < name.size(); j++) compiled_names[strs++] = name[j]; }); - /* - for( int k = 1; k < ntasks; k++) - (void) NG_MPI_Isend(&(compiled_names[0]), strs, NG_MPI_CHAR, k, NG_MPI_TAG_MESH+7, comm, &sendrequests[ntasks+k]); - */ - // sendrequests[2] = comm.IBcast (compiled_names); requ += comm.IBcast (compiled_names); PrintMessage ( 3, "wait for names"); - // MyMPI_WaitAll (sendrequests); requ.WaitAll(); comm.Barrier(); diff --git a/libsrc/meshing/paralleltop.cpp b/libsrc/meshing/paralleltop.cpp index a62d3952..a2107726 100644 --- a/libsrc/meshing/paralleltop.cpp +++ b/libsrc/meshing/paralleltop.cpp @@ -1,6 +1,3 @@ -// #ifdef PARALLEL - - #include #include "paralleltop.hpp" @@ -138,7 +135,6 @@ namespace netgen for (auto p : dps) send_data[p][nsend[p]++] = L2G(pi); - // Array requests; NgMPI_Requests requests; for (int i = 0; i < comm.Size(); i++) { @@ -503,7 +499,6 @@ namespace netgen } DynamicTable recv_verts(ntasks); - // MyMPI_ExchangeTable (send_verts, recv_verts, NG_MPI_TAG_MESH+9, comm); comm.ExchangeTable (send_verts, recv_verts, NG_MPI_TAG_MESH+9); for (int dest = 0; dest < ntasks; dest++) @@ -696,12 +691,8 @@ namespace netgen } } - // cout << "UpdateCoarseGrid - edges mpi-exchange" << endl; - // TABLE recv_edges(ntasks); DynamicTable recv_edges(ntasks); - // MyMPI_ExchangeTable (send_edges, recv_edges, NG_MPI_TAG_MESH+9, comm); comm.ExchangeTable (send_edges, recv_edges, NG_MPI_TAG_MESH+9); - // cout << "UpdateCoarseGrid - edges mpi-exchange done" << endl; for (int dest = 0; dest < ntasks; dest++) { @@ -806,12 +797,8 @@ namespace netgen } } - // cout << "UpdateCoarseGrid - faces mpi-exchange" << endl; - // TABLE recv_faces(ntasks); DynamicTable recv_faces(ntasks); - // MyMPI_ExchangeTable (send_faces, recv_faces, NG_MPI_TAG_MESH+9, comm); comm.ExchangeTable (send_faces, recv_faces, NG_MPI_TAG_MESH+9); - // cout << "UpdateCoarseGrid - faces mpi-exchange done" << endl; for (int dest = 0; dest < ntasks; dest++) { @@ -848,4 +835,3 @@ namespace netgen } -// #endif