bcast material etc names using ibcast

This commit is contained in:
Joachim Schoeberl 2024-11-26 13:29:14 +01:00
parent a86d231714
commit 7570468686
7 changed files with 90 additions and 14 deletions

View File

@ -5,6 +5,7 @@ functions = [
("int", "MPI_Alltoall", "void*", "int", "MPI_Datatype", "void*", "int", "MPI_Datatype", "MPI_Comm"),
("int", "MPI_Barrier", "MPI_Comm"),
("int", "MPI_Bcast", "void*", "int", "MPI_Datatype", "int", "MPI_Comm"),
("int", "MPI_Ibcast", "void*", "int", "MPI_Datatype", "int", "MPI_Comm", "MPI_Request*"),
("int", "MPI_Comm_c2f", "MPI_Comm"),
("int", "MPI_Comm_create", "MPI_Comm", "MPI_Group", "MPI_Comm*"),
("int", "MPI_Comm_create_group", "MPI_Comm", "MPI_Group", "int", "MPI_Comm*"),

View File

@ -307,9 +307,18 @@ namespace ngcore
NG_MPI_Bcast (&s, 1, GetMPIType<T>(), root, comm);
}
template <class T, size_t S>
void Bcast (std::array<T,S> & d, int root = 0) const
{
if (size == 1) return;
if (S != 0)
NG_MPI_Bcast (&d[0], S, GetMPIType<T>(), root, comm);
}
template <class T>
void Bcast (Array<T> & d, int root = 0)
void Bcast (Array<T> & d, int root = 0) const
{
if (size == 1) return;
@ -330,6 +339,25 @@ namespace ngcore
NG_MPI_Bcast (&s[0], len, NG_MPI_CHAR, root, comm);
}
template <class T, size_t S>
NG_MPI_Request IBcast (std::array<T,S> & d, int root = 0) const
{
NG_MPI_Request request;
NG_MPI_Ibcast (&d[0], S, GetMPIType<T>(), root, comm, &request);
return request;
}
template <class T>
NG_MPI_Request IBcast (FlatArray<T> d, int root = 0) const
{
NG_MPI_Request request;
int ds = d.Size();
NG_MPI_Ibcast (d.Data(), ds, GetMPIType<T>(), root, comm, &request);
return request;
}
template <typename T>
void AllToAll (FlatArray<T> send, FlatArray<T> recv) const
{
@ -506,9 +534,18 @@ namespace ngcore
template <typename T>
void Bcast (T & s, int root = 0) const { ; }
template <class T, size_t S>
void Bcast (std::array<T,S> & d, int root = 0) const {}
template <class T>
void Bcast (Array<T> & d, int root = 0) { ; }
void Bcast (Array<T> & d, int root = 0) const { ; }
template <class T, size_t S>
NG_MPI_Request IBcast (std::array<T,S> & d, int root = 0) const { return 0; }
template <class T>
NG_MPI_Request IBcast (FlatArray<T> d, int root = 0) const { return 0; }
template <typename T>
void AllGather (T val, FlatArray<T> recv) const
{

View File

@ -5,6 +5,7 @@ NGCORE_API extern int (*NG_MPI_Allreduce)(void*, void*, int, NG_MPI_Datatype, NG
NGCORE_API extern int (*NG_MPI_Alltoall)(void*, int, NG_MPI_Datatype, void*, int, NG_MPI_Datatype, NG_MPI_Comm);
NGCORE_API extern int (*NG_MPI_Barrier)(NG_MPI_Comm);
NGCORE_API extern int (*NG_MPI_Bcast)(void*, int, NG_MPI_Datatype, int, NG_MPI_Comm);
NGCORE_API extern int (*NG_MPI_Ibcast)(void*, int, NG_MPI_Datatype, int, NG_MPI_Comm, NG_MPI_Request*);
NGCORE_API extern int (*NG_MPI_Comm_c2f)(NG_MPI_Comm);
NGCORE_API extern int (*NG_MPI_Comm_create)(NG_MPI_Comm, NG_MPI_Group, NG_MPI_Comm*);
NGCORE_API extern int (*NG_MPI_Comm_create_group)(NG_MPI_Comm, NG_MPI_Group, int, NG_MPI_Comm*);
@ -81,6 +82,7 @@ NGCORE_API extern void* NG_MPI_IN_PLACE;
#define NG_MPI_Alltoall MPI_Alltoall
#define NG_MPI_Barrier MPI_Barrier
#define NG_MPI_Bcast MPI_Bcast
#define NG_MPI_Ibcast MPI_Ibcast
#define NG_MPI_Comm_c2f MPI_Comm_c2f
#define NG_MPI_Comm_create MPI_Comm_create
#define NG_MPI_Comm_create_group MPI_Comm_create_group

View File

@ -4,6 +4,7 @@ decltype(NG_MPI_Allreduce) NG_MPI_Allreduce = [](void*, void*, int, NG_MPI_Datat
decltype(NG_MPI_Alltoall) NG_MPI_Alltoall = [](void*, int, NG_MPI_Datatype, void*, int, NG_MPI_Datatype, NG_MPI_Comm)->int { throw no_mpi(); };
decltype(NG_MPI_Barrier) NG_MPI_Barrier = [](NG_MPI_Comm)->int { throw no_mpi(); };
decltype(NG_MPI_Bcast) NG_MPI_Bcast = [](void*, int, NG_MPI_Datatype, int, NG_MPI_Comm)->int { throw no_mpi(); };
decltype(NG_MPI_Ibcast) NG_MPI_Ibcast = [](void*, int, NG_MPI_Datatype, int, NG_MPI_Comm, NG_MPI_Request*)->int { throw no_mpi(); };
decltype(NG_MPI_Comm_c2f) NG_MPI_Comm_c2f = [](NG_MPI_Comm)->int { throw no_mpi(); };
decltype(NG_MPI_Comm_create) NG_MPI_Comm_create = [](NG_MPI_Comm, NG_MPI_Group, NG_MPI_Comm*)->int { throw no_mpi(); };
decltype(NG_MPI_Comm_create_group) NG_MPI_Comm_create_group = [](NG_MPI_Comm, NG_MPI_Group, int, NG_MPI_Comm*)->int { throw no_mpi(); };

View File

@ -4,6 +4,7 @@ NG_MPI_Allreduce = [](void* arg0, void* arg1, int arg2, NG_MPI_Datatype arg3, NG
NG_MPI_Alltoall = [](void* arg0, int arg1, NG_MPI_Datatype arg2, void* arg3, int arg4, NG_MPI_Datatype arg5, NG_MPI_Comm arg6)->int { return MPI_Alltoall( arg0, arg1, ng2mpi(arg2), arg3, arg4, ng2mpi(arg5), ng2mpi(arg6)); };
NG_MPI_Barrier = [](NG_MPI_Comm arg0)->int { return MPI_Barrier( ng2mpi(arg0)); };
NG_MPI_Bcast = [](void* arg0, int arg1, NG_MPI_Datatype arg2, int arg3, NG_MPI_Comm arg4)->int { return MPI_Bcast( arg0, arg1, ng2mpi(arg2), arg3, ng2mpi(arg4)); };
NG_MPI_Ibcast = [](void* arg0, int arg1, NG_MPI_Datatype arg2, int arg3, NG_MPI_Comm arg4, NG_MPI_Request* arg5)->int { return MPI_Ibcast( arg0, arg1, ng2mpi(arg2), arg3, ng2mpi(arg4), ng2mpi(arg5)); };
NG_MPI_Comm_c2f = [](NG_MPI_Comm arg0)->int { return MPI_Comm_c2f( ng2mpi(arg0)); };
NG_MPI_Comm_create = [](NG_MPI_Comm arg0, NG_MPI_Group arg1, NG_MPI_Comm* arg2)->int { return MPI_Comm_create( ng2mpi(arg0), ng2mpi(arg1), ng2mpi(arg2)); };
NG_MPI_Comm_create_group = [](NG_MPI_Comm arg0, NG_MPI_Group arg1, int arg2, NG_MPI_Comm* arg3)->int { return MPI_Comm_create_group( ng2mpi(arg0), ng2mpi(arg1), arg2, ng2mpi(arg3)); };

View File

@ -789,7 +789,7 @@ namespace netgen
return count;
};
for (auto i : Range(5)) {
for ([[maybe_unused]] auto i : Range(5)) {
auto num_bad_segs = num_nonconforming();
PrintMessage(1, "Non-conforming free segments in domain ", domain, ": ", num_bad_segs);
@ -799,7 +799,7 @@ namespace netgen
MeshingParameters dummymp;
MeshOptimize3d optmesh(mesh, dummymp, OPT_CONFORM);
for (auto i : Range(3)) {
for ([[maybe_unused]] auto i : Range(3)) {
optmesh.SwapImprove2 ();
optmesh.SwapImprove();
optmesh.CombineImprove();

View File

@ -885,6 +885,7 @@ namespace netgen
paralleltop -> EnumeratePointsGlobally();
PrintMessage ( 3, "Sending names");
#ifdef OLD
sendrequests.SetSize(3*ntasks);
/** Send bc/mat/cd*-names **/
// nr of names
@ -896,6 +897,18 @@ namespace netgen
int tot_nn = nnames[0] + nnames[1] + nnames[2] + nnames[3];
for( int k = 1; k < ntasks; k++)
sendrequests[k] = comm.ISend(nnames, k, NG_MPI_TAG_MESH+7);
#endif
sendrequests.SetSize(3);
/** Send bc/mat/cd*-names **/
// nr of names
std::array<int,4> nnames{0,0,0,0};
nnames[0] = materials.Size();
nnames[1] = bcnames.Size();
nnames[2] = GetNCD2Names();
nnames[3] = GetNCD3Names();
int tot_nn = nnames[0] + nnames[1] + nnames[2] + nnames[3];
sendrequests[0] = comm.IBcast (nnames);
// (void) NG_MPI_Isend(nnames, 4, NG_MPI_INT, k, NG_MPI_TAG_MESH+6, comm, &sendrequests[k]);
auto iterate_names = [&](auto func) {
for (int k = 0; k < nnames[0]; k++) func(materials[k]);
@ -904,24 +917,31 @@ namespace netgen
for (int k = 0; k < nnames[3]; k++) func(cd3names[k]);
};
// sizes of names
NgArray<int> name_sizes(tot_nn);
Array<int> name_sizes(tot_nn);
tot_nn = 0;
iterate_names([&](auto ptr) { name_sizes[tot_nn++] = (ptr==NULL) ? 0 : ptr->size(); });
/*
for( int k = 1; k < ntasks; k++)
(void) NG_MPI_Isend(&name_sizes[0], tot_nn, NG_MPI_INT, k, NG_MPI_TAG_MESH+7, comm, &sendrequests[ntasks+k]);
(void) NG_MPI_Isend(&name_sizes[0], tot_nn, NG_MPI_INT, k, NG_MPI_TAG_MESH+7, comm, &sendrequests[k]);
*/
sendrequests[1] = comm.IBcast (name_sizes);
// names
int strs = 0;
iterate_names([&](auto ptr) { strs += (ptr==NULL) ? 0 : ptr->size(); });
NgArray<char> compiled_names(strs);
Array<char> compiled_names(strs);
strs = 0;
iterate_names([&](auto ptr) {
if (ptr==NULL) return;
auto& name = *ptr;
for (int j=0; j < name.size(); j++) compiled_names[strs++] = name[j];
});
for( int k = 1; k < ntasks; k++)
(void) NG_MPI_Isend(&(compiled_names[0]), strs, NG_MPI_CHAR, k, NG_MPI_TAG_MESH+7, comm, &sendrequests[2*ntasks+k]);
/*
for( int k = 1; k < ntasks; k++)
(void) NG_MPI_Isend(&(compiled_names[0]), strs, NG_MPI_CHAR, k, NG_MPI_TAG_MESH+7, comm, &sendrequests[ntasks+k]);
*/
sendrequests[2] = comm.IBcast (compiled_names);
PrintMessage ( 3, "wait for names");
MyMPI_WaitAll (sendrequests);
@ -1182,9 +1202,17 @@ namespace netgen
// paralleltop -> SetNV_Loc2Glob (0);
paralleltop -> EnumeratePointsGlobally();
/** Recv bc-names **/
/*
ArrayMem<int,4> nnames{0,0,0,0};
// NG_MPI_Recv(nnames, 4, NG_MPI_INT, 0, NG_MPI_TAG_MESH+6, comm, NG_MPI_STATUS_IGNORE);
comm.Recv(nnames, 0, NG_MPI_TAG_MESH+7);
*/
Array<NG_MPI_Request> recvrequests(1);
std::array<int,4> nnames;
recvrequests[0] = comm.IBcast (nnames);
MyMPI_WaitAll (recvrequests);
// cout << "nnames = " << FlatArray(nnames) << endl;
materials.SetSize(nnames[0]);
bcnames.SetSize(nnames[1]);
@ -1192,14 +1220,20 @@ namespace netgen
cd3names.SetSize(nnames[3]);
int tot_nn = nnames[0] + nnames[1] + nnames[2] + nnames[3];
NgArray<int> name_sizes(tot_nn);
NG_MPI_Recv(&name_sizes[0], tot_nn, NG_MPI_INT, 0, NG_MPI_TAG_MESH+7, comm, NG_MPI_STATUS_IGNORE);
Array<int> name_sizes(tot_nn);
// NG_MPI_Recv(&name_sizes[0], tot_nn, NG_MPI_INT, 0, NG_MPI_TAG_MESH+7, comm, NG_MPI_STATUS_IGNORE);
recvrequests[0] = comm.IBcast (name_sizes);
MyMPI_WaitAll (recvrequests);
int tot_size = 0;
for (int k = 0; k < tot_nn; k++) tot_size += name_sizes[k];
NgArray<char> compiled_names(tot_size);
NG_MPI_Recv(&(compiled_names[0]), tot_size, NG_MPI_CHAR, 0, NG_MPI_TAG_MESH+7, comm, NG_MPI_STATUS_IGNORE);
// NgArray<char> compiled_names(tot_size);
// NG_MPI_Recv(&(compiled_names[0]), tot_size, NG_MPI_CHAR, 0, NG_MPI_TAG_MESH+7, comm, NG_MPI_STATUS_IGNORE);
Array<char> compiled_names(tot_size);
recvrequests[0] = comm.IBcast (compiled_names);
MyMPI_WaitAll (recvrequests);
tot_nn = tot_size = 0;
auto write_names = [&] (auto & array) {
for (int k = 0; k < array.Size(); k++) {