Fewer ifdefs for MPI. More consistent use of mpi_interface instead of direct MPI calls.

This commit is contained in:
Lukas 2019-01-30 11:27:21 +01:00
parent ff847e6eff
commit 0f4ca1c7a7
5 changed files with 41 additions and 38 deletions

View File

@ -19,15 +19,17 @@ namespace netgen
#ifndef PARALLEL
typedef int MPI_Comm;
enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) { return 1; }
inline int MyMPI_GetId (MPI_Comm comm = ng_comm) { return 0; }
#endif
/** This is the "standard" communicator that will be used for netgen-objects. **/
extern MPI_Comm ng_comm;
#ifndef PARALLEL
enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) { return 1; }
inline int MyMPI_GetId (MPI_Comm comm = ng_comm) { return 0; }
#endif
#ifdef PARALLEL
@ -61,6 +63,10 @@ namespace netgen
inline MPI_Datatype MyGetMPIType<double> ( )
{ return MPI_DOUBLE; }
template <>
inline MPI_Datatype MyGetMPIType<char> ( )
{ return MPI_CHAR; }
inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm = ng_comm)
{
@ -284,7 +290,7 @@ namespace netgen
{
int size = s.Size();
MyMPI_Bcast (size, comm);
if (id != 0) s.SetSize (size);
if (MyMPI_GetId(comm) != 0) s.SetSize (size);
MPI_Bcast (&s[0], size, MyGetMPIType<T>(), 0, comm);
}

View File

@ -37,6 +37,9 @@
// max number of nodes per surface element
#define NG_SURFACE_ELEMENT_MAXPOINTS 8
#ifndef PARALLEL
typedef int MPI_Comm;
#endif
namespace netgen { extern MPI_Comm ng_comm; }

View File

@ -141,7 +141,7 @@ void Ng_LoadMesh (const char * filename, MPI_Comm comm)
}
istream * infile;
char* buf; // for distributing geometry!
Array<char> buf; // for distributing geometry!
int strs;
if( id == 0) {
@ -162,8 +162,9 @@ void Ng_LoadMesh (const char * filename, MPI_Comm comm)
geom_part << infile->rdbuf();
string geom_part_string = geom_part.str();
strs = geom_part_string.size();
buf = new char[strs];
memcpy(buf, geom_part_string.c_str(), strs*sizeof(char));
// buf = new char[strs];
buf.SetSize(strs);
memcpy(&buf[0], geom_part_string.c_str(), strs*sizeof(char));
}
delete infile;
@ -238,15 +239,15 @@ void Ng_LoadMesh (const char * filename, MPI_Comm comm)
}
if(!ng_geometry && ntasks>1) {
/** Scatter the geometry-string **/
MPI_Bcast(&strs, 1, MPI_INT, 0, comm);
if(id!=0) buf = new char[strs];
MPI_Bcast(buf, strs, MPI_CHAR, 0, comm);
#ifdef PARALLEL
/** Scatter the geometry-string (no dummy-implementation in mpi_interface) **/
MyMPI_Bcast(buf, comm);
#endif
}
if(!ng_geometry) {
infile = new istringstream(string((const char*)buf, (size_t)strs));
delete[] buf;
infile = new istringstream(string((const char*)&buf[0], (size_t)strs));
// delete[] buf;
for (int i = 0; i < geometryregister.Size(); i++)
{
NetgenGeometry * hgeom = geometryregister[i]->LoadFromMeshFile (*infile);

View File

@ -43,8 +43,8 @@ namespace netgen
bcnames.SetSize(0);
cd2names.SetSize(0);
#ifdef PARALLEL
this->comm = netgen :: ng_comm;
#ifdef PARALLEL
paralleltop = new ParallelMeshTopology (*this);
#endif
}
@ -83,12 +83,10 @@ namespace netgen
#endif
}
#ifdef PARALLEL
void Mesh :: SetCommunicator(MPI_Comm acomm)
{
this->comm = acomm;
}
#endif
Mesh & Mesh :: operator= (const Mesh & mesh2)
{
@ -1327,14 +1325,8 @@ namespace netgen
if (archive.Input())
{
int rank, ntasks;
#ifdef PARALLEL
MPI_Comm_size(this->comm, &ntasks);
MPI_Comm_rank(this->comm, &rank);
#else
rank = 0;
ntasks = 1;
#endif
int rank = MyMPI_GetId(GetCommunicator());
int ntasks = MyMPI_GetNTasks(GetCommunicator());
RebuildSurfaceElementLists();

View File

@ -71,9 +71,10 @@ namespace netgen
void Mesh :: SendMesh () const
{
Array<MPI_Request> sendrequests;
int id = MyMPI_GetId(GetCommunicator());
int np = MyMPI_GetNTasks(GetCommunicator());
MPI_Comm comm = GetCommunicator();
int id = MyMPI_GetId(comm);
int ntasks = MyMPI_GetNTasks(comm);
int dim = GetDimension();
MyMPI_Bcast(dim, comm);
@ -751,7 +752,7 @@ namespace netgen
NgProfiler::RegionTimer reg(timer);
int id = MyMPI_GetId(GetCommunicator());
int np = MyMPI_GetNTasks(GetCommunicator());
int ntasks = MyMPI_GetNTasks(GetCommunicator());
int dim;
MyMPI_Bcast(dim, comm);
@ -1009,9 +1010,9 @@ namespace netgen
// call it only for the master !
void Mesh :: Distribute ()
{
MPI_Comm comm = this->comm;
MPI_Comm_size(comm, &ntasks);
MPI_Comm_rank(comm, &id);
MPI_Comm comm = GetCommunicator();
int id = MyMPI_GetId(comm);
int ntasks = MyMPI_GetNTasks(comm);
if (id != 0 || ntasks == 1 ) return;
@ -1070,7 +1071,7 @@ namespace netgen
eptr.Append (eind.Size());
Array<idx_t> epart(ne), npart(nn);
idxtype nparts = ntasks-1;
idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1;
idxtype edgecut;
idxtype ncommon = 3;
@ -1275,9 +1276,9 @@ namespace netgen
// call it only for the master !
void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights)
{
MPI_Comm comm = this->comm;
MPI_Comm_size(comm, &ntasks);
MPI_Comm_rank(comm, &id);
MPI_Comm comm = GetCommunicator();
int id = MyMPI_GetId(comm);
int ntasks = MyMPI_GetNTasks(comm);
if (id != 0 || ntasks == 1 ) return;
@ -1367,7 +1368,7 @@ namespace netgen
eptr.Append (eind.Size());
Array<idx_t> epart(ne), npart(nn);
idxtype nparts = ntasks-1;
idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1;
idxtype edgecut;