Fewer ifdefs for MPI. More consistent use of mpi_interface instead of direct MPI calls.

This commit is contained in:
Lukas 2019-01-30 11:27:21 +01:00
parent ff847e6eff
commit 0f4ca1c7a7
5 changed files with 41 additions and 38 deletions

View File

@ -19,15 +19,17 @@ namespace netgen
#ifndef PARALLEL #ifndef PARALLEL
typedef int MPI_Comm; typedef int MPI_Comm;
enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) { return 1; }
inline int MyMPI_GetId (MPI_Comm comm = ng_comm) { return 0; }
#endif #endif
/** This is the "standard" communicator that will be used for netgen-objects. **/ /** This is the "standard" communicator that will be used for netgen-objects. **/
extern MPI_Comm ng_comm; extern MPI_Comm ng_comm;
#ifndef PARALLEL
enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) { return 1; }
inline int MyMPI_GetId (MPI_Comm comm = ng_comm) { return 0; }
#endif
#ifdef PARALLEL #ifdef PARALLEL
@ -61,6 +63,10 @@ namespace netgen
inline MPI_Datatype MyGetMPIType<double> ( ) inline MPI_Datatype MyGetMPIType<double> ( )
{ return MPI_DOUBLE; } { return MPI_DOUBLE; }
template <>
inline MPI_Datatype MyGetMPIType<char> ( )
{ return MPI_CHAR; }
inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm = ng_comm) inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm = ng_comm)
{ {
@ -284,7 +290,7 @@ namespace netgen
{ {
int size = s.Size(); int size = s.Size();
MyMPI_Bcast (size, comm); MyMPI_Bcast (size, comm);
if (id != 0) s.SetSize (size); if (MyMPI_GetId(comm) != 0) s.SetSize (size);
MPI_Bcast (&s[0], size, MyGetMPIType<T>(), 0, comm); MPI_Bcast (&s[0], size, MyGetMPIType<T>(), 0, comm);
} }

View File

@ -37,6 +37,9 @@
// max number of nodes per surface element // max number of nodes per surface element
#define NG_SURFACE_ELEMENT_MAXPOINTS 8 #define NG_SURFACE_ELEMENT_MAXPOINTS 8
#ifndef PARALLEL
typedef int MPI_Comm;
#endif
namespace netgen { extern MPI_Comm ng_comm; } namespace netgen { extern MPI_Comm ng_comm; }

View File

@ -141,7 +141,7 @@ void Ng_LoadMesh (const char * filename, MPI_Comm comm)
} }
istream * infile; istream * infile;
char* buf; // for distributing geometry! Array<char> buf; // for distributing geometry!
int strs; int strs;
if( id == 0) { if( id == 0) {
@ -162,8 +162,9 @@ void Ng_LoadMesh (const char * filename, MPI_Comm comm)
geom_part << infile->rdbuf(); geom_part << infile->rdbuf();
string geom_part_string = geom_part.str(); string geom_part_string = geom_part.str();
strs = geom_part_string.size(); strs = geom_part_string.size();
buf = new char[strs]; // buf = new char[strs];
memcpy(buf, geom_part_string.c_str(), strs*sizeof(char)); buf.SetSize(strs);
memcpy(&buf[0], geom_part_string.c_str(), strs*sizeof(char));
} }
delete infile; delete infile;
@ -238,15 +239,15 @@ void Ng_LoadMesh (const char * filename, MPI_Comm comm)
} }
if(!ng_geometry && ntasks>1) { if(!ng_geometry && ntasks>1) {
/** Scatter the geometry-string **/ #ifdef PARALLEL
MPI_Bcast(&strs, 1, MPI_INT, 0, comm); /** Scatter the geometry-string (no dummy-implementation in mpi_interface) **/
if(id!=0) buf = new char[strs]; MyMPI_Bcast(buf, comm);
MPI_Bcast(buf, strs, MPI_CHAR, 0, comm); #endif
} }
if(!ng_geometry) { if(!ng_geometry) {
infile = new istringstream(string((const char*)buf, (size_t)strs)); infile = new istringstream(string((const char*)&buf[0], (size_t)strs));
delete[] buf; // delete[] buf;
for (int i = 0; i < geometryregister.Size(); i++) for (int i = 0; i < geometryregister.Size(); i++)
{ {
NetgenGeometry * hgeom = geometryregister[i]->LoadFromMeshFile (*infile); NetgenGeometry * hgeom = geometryregister[i]->LoadFromMeshFile (*infile);

View File

@ -43,8 +43,8 @@ namespace netgen
bcnames.SetSize(0); bcnames.SetSize(0);
cd2names.SetSize(0); cd2names.SetSize(0);
#ifdef PARALLEL
this->comm = netgen :: ng_comm; this->comm = netgen :: ng_comm;
#ifdef PARALLEL
paralleltop = new ParallelMeshTopology (*this); paralleltop = new ParallelMeshTopology (*this);
#endif #endif
} }
@ -83,12 +83,10 @@ namespace netgen
#endif #endif
} }
#ifdef PARALLEL
void Mesh :: SetCommunicator(MPI_Comm acomm) void Mesh :: SetCommunicator(MPI_Comm acomm)
{ {
this->comm = acomm; this->comm = acomm;
} }
#endif
Mesh & Mesh :: operator= (const Mesh & mesh2) Mesh & Mesh :: operator= (const Mesh & mesh2)
{ {
@ -1327,14 +1325,8 @@ namespace netgen
if (archive.Input()) if (archive.Input())
{ {
int rank, ntasks; int rank = MyMPI_GetId(GetCommunicator());
#ifdef PARALLEL int ntasks = MyMPI_GetNTasks(GetCommunicator());
MPI_Comm_size(this->comm, &ntasks);
MPI_Comm_rank(this->comm, &rank);
#else
rank = 0;
ntasks = 1;
#endif
RebuildSurfaceElementLists(); RebuildSurfaceElementLists();

View File

@ -71,9 +71,10 @@ namespace netgen
void Mesh :: SendMesh () const void Mesh :: SendMesh () const
{ {
Array<MPI_Request> sendrequests; Array<MPI_Request> sendrequests;
int id = MyMPI_GetId(GetCommunicator()); MPI_Comm comm = GetCommunicator();
int np = MyMPI_GetNTasks(GetCommunicator()); int id = MyMPI_GetId(comm);
int ntasks = MyMPI_GetNTasks(comm);
int dim = GetDimension(); int dim = GetDimension();
MyMPI_Bcast(dim, comm); MyMPI_Bcast(dim, comm);
@ -751,7 +752,7 @@ namespace netgen
NgProfiler::RegionTimer reg(timer); NgProfiler::RegionTimer reg(timer);
int id = MyMPI_GetId(GetCommunicator()); int id = MyMPI_GetId(GetCommunicator());
int np = MyMPI_GetNTasks(GetCommunicator()); int ntasks = MyMPI_GetNTasks(GetCommunicator());
int dim; int dim;
MyMPI_Bcast(dim, comm); MyMPI_Bcast(dim, comm);
@ -1009,9 +1010,9 @@ namespace netgen
// call it only for the master ! // call it only for the master !
void Mesh :: Distribute () void Mesh :: Distribute ()
{ {
MPI_Comm comm = this->comm; MPI_Comm comm = GetCommunicator();
MPI_Comm_size(comm, &ntasks); int id = MyMPI_GetId(comm);
MPI_Comm_rank(comm, &id); int ntasks = MyMPI_GetNTasks(comm);
if (id != 0 || ntasks == 1 ) return; if (id != 0 || ntasks == 1 ) return;
@ -1070,7 +1071,7 @@ namespace netgen
eptr.Append (eind.Size()); eptr.Append (eind.Size());
Array<idx_t> epart(ne), npart(nn); Array<idx_t> epart(ne), npart(nn);
idxtype nparts = ntasks-1; idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1;
idxtype edgecut; idxtype edgecut;
idxtype ncommon = 3; idxtype ncommon = 3;
@ -1275,9 +1276,9 @@ namespace netgen
// call it only for the master ! // call it only for the master !
void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights) void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights)
{ {
MPI_Comm comm = this->comm; MPI_Comm comm = GetCommunicator();
MPI_Comm_size(comm, &ntasks); int id = MyMPI_GetId(comm);
MPI_Comm_rank(comm, &id); int ntasks = MyMPI_GetNTasks(comm);
if (id != 0 || ntasks == 1 ) return; if (id != 0 || ntasks == 1 ) return;
@ -1367,7 +1368,7 @@ namespace netgen
eptr.Append (eind.Size()); eptr.Append (eind.Size());
Array<idx_t> epart(ne), npart(nn); Array<idx_t> epart(ne), npart(nn);
idxtype nparts = ntasks-1; idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1;
idxtype edgecut; idxtype edgecut;