mpi cleanup

This commit is contained in:
Joachim Schoeberl 2024-12-01 18:55:01 +01:00
parent 8c1882226c
commit 9935d877cc
3 changed files with 27 additions and 66 deletions

View File

@ -125,14 +125,15 @@ namespace ngcore
}
};
[[deprecated("use requests.WaitAll instread")]]
inline void MyMPI_WaitAll (FlatArray<NG_MPI_Request> requests)
{
static Timer t("MPI - WaitAll"); RegionTimer reg(t);
if (!requests.Size()) return;
NG_MPI_Waitall (requests.Size(), requests.Data(), NG_MPI_STATUSES_IGNORE);
}
[[deprecated("use requests.WaitAny instread")]]
inline int MyMPI_WaitAny (FlatArray<NG_MPI_Request> requests)
{
int nr;
@ -403,7 +404,7 @@ namespace ngcore
}
template <class T>
NgMPI_Request IBcast (FlatArray<T> d, int root = 0) const
[[nodiscard]] NgMPI_Request IBcast (FlatArray<T> d, int root = 0) const
{
NG_MPI_Request request;
int ds = d.Size();
@ -416,7 +417,7 @@ namespace ngcore
void AllToAll (FlatArray<T> send, FlatArray<T> recv) const
{
NG_MPI_Alltoall (send.Data(), 1, GetMPIType<T>(),
recv.Data(), 1, GetMPIType<T>(), comm);
recv.Data(), 1, GetMPIType<T>(), comm);
}
@ -425,7 +426,7 @@ namespace ngcore
{
if (size == 1) return;
NG_MPI_Scatter (send.Data(), 1, GetMPIType<T>(),
NG_MPI_IN_PLACE, -1, GetMPIType<T>(), 0, comm);
NG_MPI_IN_PLACE, -1, GetMPIType<T>(), 0, comm);
}
template <typename T>
@ -433,7 +434,7 @@ namespace ngcore
{
if (size == 1) return;
NG_MPI_Scatter (NULL, 0, GetMPIType<T>(),
&recv, 1, GetMPIType<T>(), 0, comm);
&recv, 1, GetMPIType<T>(), 0, comm);
}
template <typename T>
@ -442,7 +443,7 @@ namespace ngcore
recv[0] = T(0);
if (size == 1) return;
NG_MPI_Gather (NG_MPI_IN_PLACE, 1, GetMPIType<T>(),
recv.Data(), 1, GetMPIType<T>(), 0, comm);
recv.Data(), 1, GetMPIType<T>(), 0, comm);
}
template <typename T>
@ -483,16 +484,16 @@ namespace ngcore
recv_data = DynamicTable<T> (recv_sizes, true);
Array<NG_MPI_Request> requests;
NgMPI_Requests requests;
for (int dest = 0; dest < size; dest++)
if (dest != rank && send_data[dest].Size())
requests.Append (ISend (FlatArray<T>(send_data[dest]), dest, tag));
requests += ISend (FlatArray<T>(send_data[dest]), dest, tag);
for (int dest = 0; dest < size; dest++)
if (dest != rank && recv_data[dest].Size())
requests.Append (IRecv (FlatArray<T>(recv_data[dest]), dest, tag));
requests += IRecv (FlatArray<T>(recv_data[dest]), dest, tag);
MyMPI_WaitAll (requests);
requests.WaitAll();
}

View File

@ -222,8 +222,8 @@ namespace netgen
int dim = GetDimension();
comm.Bcast(dim);
Array<NG_MPI_Request> sendrequests(8*(ntasks-1));
sendrequests.SetSize0();
NgMPI_Requests sendrequests; // (8*(ntasks-1));
// sendrequests.SetSize0();
// If the topology is not already updated, we do not need to
// build edges/faces.
@ -457,7 +457,7 @@ namespace netgen
{
NgFlatArray<PointIndex> verts = verts_of_proc[dest];
// sendrequests.Append (MyMPI_ISend (verts, dest, NG_MPI_TAG_MESH+1, comm));
sendrequests.Append (comm.ISend (FlatArray<PointIndex>(verts), dest, NG_MPI_TAG_MESH+1));
sendrequests += comm.ISend (FlatArray<PointIndex>(verts), dest, NG_MPI_TAG_MESH+1);
NG_MPI_Datatype mptype = MeshPoint::MyGetMPIType();
@ -473,7 +473,7 @@ namespace netgen
NG_MPI_Request request;
NG_MPI_Isend( points.Data(), 1, point_types[dest-1], dest, NG_MPI_TAG_MESH+1, comm, &request);
sendrequests.Append (request);
sendrequests += request;
}
@ -533,11 +533,11 @@ namespace netgen
}
}
}
Array<NG_MPI_Request> req_per;
NgMPI_Requests req_per;
for(int dest = 1; dest < ntasks; dest++)
// req_per.Append(MyMPI_ISend(pp_data[dest], dest, NG_MPI_TAG_MESH+1, comm));
req_per.Append(comm.ISend(FlatArray<int>(pp_data[dest]), dest, NG_MPI_TAG_MESH+1));
MyMPI_WaitAll(req_per);
req_per += comm.ISend(FlatArray<int>(pp_data[dest]), dest, NG_MPI_TAG_MESH+1);
req_per.WaitAll();
PrintMessage ( 3, "Sending Vertices - distprocs");
@ -570,7 +570,7 @@ namespace netgen
tbuilddistpnums.Stop();
for ( int dest = 1; dest < ntasks; dest ++ )
sendrequests.Append (comm.ISend (distpnums[dest], dest, NG_MPI_TAG_MESH+1));
sendrequests += comm.ISend (distpnums[dest], dest, NG_MPI_TAG_MESH+1);
@ -604,7 +604,7 @@ namespace netgen
tbuildelementtable.Stop();
for (int dest = 1; dest < ntasks; dest ++ )
sendrequests.Append (comm.ISend (elementarrays[dest], dest, NG_MPI_TAG_MESH+2));
sendrequests += comm.ISend (elementarrays[dest], dest, NG_MPI_TAG_MESH+2);
PrintMessage ( 3, "Sending Face Descriptors" );
@ -621,7 +621,7 @@ namespace netgen
}
for (int dest = 1; dest < ntasks; dest++)
sendrequests.Append (comm.ISend (fddata, dest, NG_MPI_TAG_MESH+3));
sendrequests += comm.ISend (fddata, dest, NG_MPI_TAG_MESH+3);
/** Surface Elements **/
@ -697,7 +697,7 @@ namespace netgen
});
// distribute sel data
for (int dest = 1; dest < ntasks; dest++)
sendrequests.Append (comm.ISend(selbuf[dest], dest, NG_MPI_TAG_MESH+4));
sendrequests += comm.ISend(selbuf[dest], dest, NG_MPI_TAG_MESH+4);
/** Segments **/
@ -849,7 +849,7 @@ namespace netgen
});
// distribute segment data
for (int dest = 1; dest < ntasks; dest++)
sendrequests.Append (comm.ISend(segm_buf[dest], dest, NG_MPI_TAG_MESH+5));
sendrequests += comm.ISend(segm_buf[dest], dest, NG_MPI_TAG_MESH+5);
/** Point-Elements **/
PrintMessage ( 3, "Point-Elements ...");
@ -870,11 +870,11 @@ namespace netgen
iterate_zdes([&](const auto & pack, auto dest) { zde_buf.Add(dest, pack); });
for (int dest = 1; dest < ntasks; dest++)
{ sendrequests.Append (comm.ISend(zde_buf[dest], dest, NG_MPI_TAG_MESH+6)); }
sendrequests += comm.ISend(zde_buf[dest], dest, NG_MPI_TAG_MESH+6);
PrintMessage ( 3, "now wait ...");
MyMPI_WaitAll (sendrequests);
sendrequests.WaitAll();
// clean up MPI-datatypes we allocated earlier
for (auto t : point_types)
@ -885,20 +885,6 @@ namespace netgen
paralleltop -> EnumeratePointsGlobally();
PrintMessage ( 3, "Sending names");
#ifdef OLD
sendrequests.SetSize(3*ntasks);
/** Send bc/mat/cd*-names **/
// nr of names
ArrayMem<int,4> nnames{0,0,0,0};
nnames[0] = materials.Size();
nnames[1] = bcnames.Size();
nnames[2] = GetNCD2Names();
nnames[3] = GetNCD3Names();
int tot_nn = nnames[0] + nnames[1] + nnames[2] + nnames[3];
for( int k = 1; k < ntasks; k++)
sendrequests[k] = comm.ISend(nnames, k, NG_MPI_TAG_MESH+7);
#endif
// sendrequests.SetSize(3);
/** Send bc/mat/cd*-names **/
// nr of names
std::array<int,4> nnames{0,0,0,0};
@ -907,12 +893,10 @@ namespace netgen
nnames[2] = GetNCD2Names();
nnames[3] = GetNCD3Names();
int tot_nn = nnames[0] + nnames[1] + nnames[2] + nnames[3];
// sendrequests[0] = comm.IBcast (nnames);
NgMPI_Requests requ;
requ += comm.IBcast (nnames);
// (void) NG_MPI_Isend(nnames, 4, NG_MPI_INT, k, NG_MPI_TAG_MESH+6, comm, &sendrequests[k]);
auto iterate_names = [&](auto func) {
for (int k = 0; k < nnames[0]; k++) func(materials[k]);
for (int k = 0; k < nnames[1]; k++) func(bcnames[k]);
@ -923,11 +907,7 @@ namespace netgen
Array<int> name_sizes(tot_nn);
tot_nn = 0;
iterate_names([&](auto ptr) { name_sizes[tot_nn++] = (ptr==NULL) ? 0 : ptr->size(); });
/*
for( int k = 1; k < ntasks; k++)
(void) NG_MPI_Isend(&name_sizes[0], tot_nn, NG_MPI_INT, k, NG_MPI_TAG_MESH+7, comm, &sendrequests[k]);
*/
// sendrequests[1] = comm.IBcast (name_sizes);
requ += comm.IBcast (name_sizes);
// names
int strs = 0;
@ -940,16 +920,10 @@ namespace netgen
for (int j=0; j < name.size(); j++) compiled_names[strs++] = name[j];
});
/*
for( int k = 1; k < ntasks; k++)
(void) NG_MPI_Isend(&(compiled_names[0]), strs, NG_MPI_CHAR, k, NG_MPI_TAG_MESH+7, comm, &sendrequests[ntasks+k]);
*/
// sendrequests[2] = comm.IBcast (compiled_names);
requ += comm.IBcast (compiled_names);
PrintMessage ( 3, "wait for names");
// MyMPI_WaitAll (sendrequests);
requ.WaitAll();
comm.Barrier();

View File

@ -1,6 +1,3 @@
// #ifdef PARALLEL
#include <meshing.hpp>
#include "paralleltop.hpp"
@ -138,7 +135,6 @@ namespace netgen
for (auto p : dps)
send_data[p][nsend[p]++] = L2G(pi);
// Array<NG_MPI_Request> requests;
NgMPI_Requests requests;
for (int i = 0; i < comm.Size(); i++)
{
@ -503,7 +499,6 @@ namespace netgen
}
DynamicTable<int> recv_verts(ntasks);
// MyMPI_ExchangeTable (send_verts, recv_verts, NG_MPI_TAG_MESH+9, comm);
comm.ExchangeTable (send_verts, recv_verts, NG_MPI_TAG_MESH+9);
for (int dest = 0; dest < ntasks; dest++)
@ -696,12 +691,8 @@ namespace netgen
}
}
// cout << "UpdateCoarseGrid - edges mpi-exchange" << endl;
// TABLE<int> recv_edges(ntasks);
DynamicTable<int> recv_edges(ntasks);
// MyMPI_ExchangeTable (send_edges, recv_edges, NG_MPI_TAG_MESH+9, comm);
comm.ExchangeTable (send_edges, recv_edges, NG_MPI_TAG_MESH+9);
// cout << "UpdateCoarseGrid - edges mpi-exchange done" << endl;
for (int dest = 0; dest < ntasks; dest++)
{
@ -806,12 +797,8 @@ namespace netgen
}
}
// cout << "UpdateCoarseGrid - faces mpi-exchange" << endl;
// TABLE<int> recv_faces(ntasks);
DynamicTable<int> recv_faces(ntasks);
// MyMPI_ExchangeTable (send_faces, recv_faces, NG_MPI_TAG_MESH+9, comm);
comm.ExchangeTable (send_faces, recv_faces, NG_MPI_TAG_MESH+9);
// cout << "UpdateCoarseGrid - faces mpi-exchange done" << endl;
for (int dest = 0; dest < ntasks; dest++)
{
@ -848,4 +835,3 @@ namespace netgen
}
// #endif