Add a global communicator. The mesh now has a communicator.

This commit is contained in:
Lukas 2019-01-29 16:10:02 +01:00
parent 4f40087866
commit c7fb6c7e4a
12 changed files with 195 additions and 106 deletions

View File

@ -17,14 +17,38 @@ namespace netgen
using ngcore::id; using ngcore::id;
using ngcore::ntasks; using ngcore::ntasks;
#ifndef PARALLEL
typedef int MPI_Comm;
enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) { return 1; }
inline int MyMPI_GetId (MPI_Comm comm = ng_comm) { return 0; }
#endif
/** This is the "standard" communicator that will be used for netgen-objects. **/
extern MPI_Comm ng_comm;
#ifdef PARALLEL #ifdef PARALLEL
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm)
{
int ntasks;
MPI_Comm_size(comm, &ntasks);
return ntasks;
}
inline int MyMPI_GetId (MPI_Comm comm = ng_comm)
{
int id;
MPI_Comm_rank(comm, &id);
return id;
}
enum { MPI_TAG_CMD = 110 }; enum { MPI_TAG_CMD = 110 };
enum { MPI_TAG_MESH = 210 }; enum { MPI_TAG_MESH = 210 };
enum { MPI_TAG_VIS = 310 }; enum { MPI_TAG_VIS = 310 };
extern MPI_Comm mesh_comm;
template <class T> template <class T>
MPI_Datatype MyGetMPIType ( ) MPI_Datatype MyGetMPIType ( )
{ cerr << "ERROR in GetMPIType() -- no type found" << endl;return 0; } { cerr << "ERROR in GetMPIType() -- no type found" << endl;return 0; }
@ -38,76 +62,76 @@ namespace netgen
{ return MPI_DOUBLE; } { return MPI_DOUBLE; }
inline void MyMPI_Send (int i, int dest, int tag) inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm = ng_comm)
{ {
int hi = i; int hi = i;
MPI_Send( &hi, 1, MPI_INT, dest, tag, MPI_COMM_WORLD); MPI_Send( &hi, 1, MPI_INT, dest, tag, comm);
} }
inline void MyMPI_Recv (int & i, int src, int tag) inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm = ng_comm)
{ {
MPI_Status status; MPI_Status status;
MPI_Recv( &i, 1, MPI_INT, src, tag, MPI_COMM_WORLD, &status); MPI_Recv( &i, 1, MPI_INT, src, tag, comm, &status);
} }
inline void MyMPI_Send (const string & s, int dest, int tag) inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm = ng_comm)
{ {
MPI_Send( const_cast<char*> (s.c_str()), s.length(), MPI_CHAR, dest, tag, MPI_COMM_WORLD); MPI_Send( const_cast<char*> (s.c_str()), s.length(), MPI_CHAR, dest, tag, comm);
} }
inline void MyMPI_Recv (string & s, int src, int tag) inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm = ng_comm)
{ {
MPI_Status status; MPI_Status status;
int len; int len;
MPI_Probe (src, tag, MPI_COMM_WORLD, &status); MPI_Probe (src, tag, MPI_COMM_WORLD, &status);
MPI_Get_count (&status, MPI_CHAR, &len); MPI_Get_count (&status, MPI_CHAR, &len);
s.assign (len, ' '); s.assign (len, ' ');
MPI_Recv( &s[0], len, MPI_CHAR, src, tag, MPI_COMM_WORLD, &status); MPI_Recv( &s[0], len, MPI_CHAR, src, tag, comm, &status);
} }
template <class T, int BASE> template <class T, int BASE>
inline void MyMPI_Send (FlatArray<T, BASE> s, int dest, int tag) inline void MyMPI_Send (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
{ {
MPI_Send( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, MPI_COMM_WORLD); MPI_Send( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm);
} }
template <class T, int BASE> template <class T, int BASE>
inline void MyMPI_Recv ( FlatArray<T, BASE> s, int src, int tag) inline void MyMPI_Recv ( FlatArray<T, BASE> s, int src, int tag, MPI_Comm comm = ng_comm)
{ {
MPI_Status status; MPI_Status status;
MPI_Recv( &s.First(), s.Size(), MyGetMPIType<T>(), src, tag, MPI_COMM_WORLD, &status); MPI_Recv( &s.First(), s.Size(), MyGetMPIType<T>(), src, tag, comm, &status);
} }
template <class T, int BASE> template <class T, int BASE>
inline void MyMPI_Recv ( Array <T, BASE> & s, int src, int tag) inline void MyMPI_Recv ( Array <T, BASE> & s, int src, int tag, MPI_Comm comm = ng_comm)
{ {
MPI_Status status; MPI_Status status;
int len; int len;
MPI_Probe (src, tag, MPI_COMM_WORLD, &status); MPI_Probe (src, tag, comm, &status);
MPI_Get_count (&status, MyGetMPIType<T>(), &len); MPI_Get_count (&status, MyGetMPIType<T>(), &len);
s.SetSize (len); s.SetSize (len);
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, MPI_COMM_WORLD, &status); MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, comm, &status);
} }
template <class T, int BASE> template <class T, int BASE>
inline int MyMPI_Recv ( Array <T, BASE> & s, int tag) inline int MyMPI_Recv ( Array <T, BASE> & s, int tag, MPI_Comm comm = ng_comm)
{ {
MPI_Status status; MPI_Status status;
int len; int len;
MPI_Probe (MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status); MPI_Probe (MPI_ANY_SOURCE, tag, comm, &status);
int src = status.MPI_SOURCE; int src = status.MPI_SOURCE;
MPI_Get_count (&status, MyGetMPIType<T>(), &len); MPI_Get_count (&status, MyGetMPIType<T>(), &len);
s.SetSize (len); s.SetSize (len);
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, MPI_COMM_WORLD, &status); MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, comm, &status);
return src; return src;
} }
@ -129,7 +153,7 @@ namespace netgen
*/ */
template <class T, int BASE> template <class T, int BASE>
inline MPI_Request MyMPI_ISend (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = MPI_COMM_WORLD) inline MPI_Request MyMPI_ISend (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
{ {
MPI_Request request; MPI_Request request;
MPI_Isend( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request); MPI_Isend( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
@ -138,7 +162,7 @@ namespace netgen
template <class T, int BASE> template <class T, int BASE>
inline MPI_Request MyMPI_IRecv (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = MPI_COMM_WORLD) inline MPI_Request MyMPI_IRecv (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
{ {
MPI_Request request; MPI_Request request;
MPI_Irecv( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request); MPI_Irecv( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
@ -203,11 +227,10 @@ namespace netgen
template <typename T> template <typename T>
inline void MyMPI_ExchangeTable (TABLE<T> & send_data, inline void MyMPI_ExchangeTable (TABLE<T> & send_data,
TABLE<T> & recv_data, int tag, TABLE<T> & recv_data, int tag,
MPI_Comm comm = MPI_COMM_WORLD) MPI_Comm comm = ng_comm)
{ {
int ntasks, rank; int rank = MyMPI_GetId(comm);
MPI_Comm_size(comm, &ntasks); int ntasks = MyMPI_GetNTasks(comm);
MPI_Comm_rank(comm, &rank);
Array<int> send_sizes(ntasks); Array<int> send_sizes(ntasks);
Array<int> recv_sizes(ntasks); Array<int> recv_sizes(ntasks);
@ -251,13 +274,13 @@ namespace netgen
template <class T> template <class T>
inline void MyMPI_Bcast (T & s, MPI_Comm comm = MPI_COMM_WORLD) inline void MyMPI_Bcast (T & s, MPI_Comm comm = ng_comm)
{ {
MPI_Bcast (&s, 1, MyGetMPIType<T>(), 0, comm); MPI_Bcast (&s, 1, MyGetMPIType<T>(), 0, comm);
} }
template <class T> template <class T>
inline void MyMPI_Bcast (Array<T, 0> & s, MPI_Comm comm = MPI_COMM_WORLD) inline void MyMPI_Bcast (Array<T, 0> & s, MPI_Comm comm = ng_comm)
{ {
int size = s.Size(); int size = s.Size();
MyMPI_Bcast (size, comm); MyMPI_Bcast (size, comm);
@ -266,7 +289,7 @@ namespace netgen
} }
template <class T> template <class T>
inline void MyMPI_Bcast (Array<T, 0> & s, int root, MPI_Comm comm = MPI_COMM_WORLD) inline void MyMPI_Bcast (Array<T, 0> & s, int root, MPI_Comm comm = ng_comm)
{ {
int id; int id;
MPI_Comm_rank(comm, &id); MPI_Comm_rank(comm, &id);
@ -279,19 +302,19 @@ namespace netgen
} }
template <class T, class T2> template <class T, class T2>
inline void MyMPI_Allgather (const T & send, FlatArray<T2> recv, MPI_Comm comm) inline void MyMPI_Allgather (const T & send, FlatArray<T2> recv, MPI_Comm comm = ng_comm)
{ {
MPI_Allgather( const_cast<T*> (&send), 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm); MPI_Allgather( const_cast<T*> (&send), 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
} }
template <class T, class T2> template <class T, class T2>
inline void MyMPI_Alltoall (FlatArray<T> send, FlatArray<T2> recv, MPI_Comm comm) inline void MyMPI_Alltoall (FlatArray<T> send, FlatArray<T2> recv, MPI_Comm comm = ng_comm)
{ {
MPI_Alltoall( &send[0], 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm); MPI_Alltoall( &send[0], 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
} }
// template <class T, class T2> // template <class T, class T2>
// inline void MyMPI_Alltoall_Block (FlatArray<T> send, FlatArray<T2> recv, int blocklen, MPI_Comm comm) // inline void MyMPI_Alltoall_Block (FlatArray<T> send, FlatArray<T2> recv, int blocklen, MPI_Comm comm = ng_comm)
// { // {
// MPI_Alltoall( &send[0], blocklen, MyGetMPIType<T>(), &recv[0], blocklen, MyGetMPIType<T2>(), comm); // MPI_Alltoall( &send[0], blocklen, MyGetMPIType<T>(), &recv[0], blocklen, MyGetMPIType<T2>(), comm);
// } // }

View File

@ -37,6 +37,7 @@
// max number of nodes per surface element // max number of nodes per surface element
#define NG_SURFACE_ELEMENT_MAXPOINTS 8 #define NG_SURFACE_ELEMENT_MAXPOINTS 8
namespace netgen { extern MPI_Comm ng_comm; }
// implemented element types: // implemented element types:
@ -62,7 +63,7 @@ extern "C" {
DLL_HEADER void Ng_LoadGeometry (const char * filename); DLL_HEADER void Ng_LoadGeometry (const char * filename);
// load netgen mesh // load netgen mesh
DLL_HEADER void Ng_LoadMesh (const char * filename); DLL_HEADER void Ng_LoadMesh (const char * filename, MPI_Comm comm = netgen::ng_comm);
// load netgen mesh // load netgen mesh
DLL_HEADER void Ng_LoadMeshFromString (const char * mesh_as_string); DLL_HEADER void Ng_LoadMeshFromString (const char * mesh_as_string);

View File

@ -249,6 +249,9 @@ namespace netgen
{ {
private: private:
shared_ptr<Mesh> mesh; shared_ptr<Mesh> mesh;
#ifdef PARALLEL
MPI_Comm comm;
#endif
public: public:
// Ngx_Mesh () { ; } // Ngx_Mesh () { ; }
@ -261,6 +264,11 @@ namespace netgen
void UpdateTopology (); void UpdateTopology ();
void DoArchive (Archive & archive); void DoArchive (Archive & archive);
#ifdef PARALLEL
MPI_Comm GetCommunicator() const;
void SetCommunicator(MPI_Comm acomm);
#endif
virtual ~Ngx_Mesh(); virtual ~Ngx_Mesh();
bool Valid () { return mesh != NULL; } bool Valid () { return mesh != NULL; }

View File

@ -117,14 +117,10 @@ void Ng_LoadMeshFromStream ( istream & input )
} }
void Ng_LoadMesh (const char * filename, MPI_Comm comm)
void Ng_LoadMesh (const char * filename)
{ {
#ifdef PARALLEL int id = MyMPI_GetId(comm);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks); int ntasks = MyMPI_GetNTasks(comm);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
#endif
{ {
ifstream infile(filename); ifstream infile(filename);
@ -134,11 +130,10 @@ void Ng_LoadMesh (const char * filename)
if ( string(filename).find(".vol") == string::npos ) if ( string(filename).find(".vol") == string::npos )
{ {
#ifdef PARALLEL
if(ntasks>1) if(ntasks>1)
throw NgException("Not sure what to do with this?? Does this work with MPI??"); throw NgException("Not sure what to do with this?? Does this work with MPI??");
#endif
mesh.reset (new Mesh()); mesh.reset (new Mesh());
mesh->SetCommunicator(comm);
ReadFile(*mesh,filename); ReadFile(*mesh,filename);
//mesh->SetGlobalH (mparam.maxh); //mesh->SetGlobalH (mparam.maxh);
//mesh->CalcLocalH(); //mesh->CalcLocalH();
@ -149,9 +144,7 @@ void Ng_LoadMesh (const char * filename)
char* buf; // for distributing geometry! char* buf; // for distributing geometry!
int strs; int strs;
#ifdef PARALLEL
if( id == 0) { if( id == 0) {
#endif
string fn(filename); string fn(filename);
if (fn.substr (fn.length()-3, 3) == ".gz") if (fn.substr (fn.length()-3, 3) == ".gz")
@ -159,6 +152,7 @@ void Ng_LoadMesh (const char * filename)
else else
infile = new ifstream (filename); infile = new ifstream (filename);
mesh.reset (new Mesh()); mesh.reset (new Mesh());
mesh->SetCommunicator(comm);
mesh -> Load(*infile); mesh -> Load(*infile);
SetGlobalMesh (mesh); SetGlobalMesh (mesh);
@ -173,7 +167,6 @@ void Ng_LoadMesh (const char * filename)
} }
delete infile; delete infile;
#ifdef PARALLEL
if (ntasks > 1) if (ntasks > 1)
{ {
@ -239,17 +232,17 @@ void Ng_LoadMesh (const char * filename)
} // id==0 end } // id==0 end
else { else {
mesh.reset (new Mesh()); mesh.reset (new Mesh());
mesh->SetCommunicator(comm);
SetGlobalMesh (mesh); SetGlobalMesh (mesh);
mesh->SendRecvMesh(); mesh->SendRecvMesh();
} }
if(!ng_geometry && ntasks>1) { if(!ng_geometry && ntasks>1) {
/** Scatter the geometry-string **/ /** Scatter the geometry-string **/
MPI_Bcast(&strs, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&strs, 1, MPI_INT, 0, comm);
if(id!=0) buf = new char[strs]; if(id!=0) buf = new char[strs];
MPI_Bcast(buf, strs, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast(buf, strs, MPI_CHAR, 0, comm);
} }
#endif
if(!ng_geometry) { if(!ng_geometry) {
infile = new istringstream(string((const char*)buf, (size_t)strs)); infile = new istringstream(string((const char*)buf, (size_t)strs));

View File

@ -34,23 +34,40 @@ namespace netgen
Ngx_Mesh :: Ngx_Mesh (shared_ptr<Mesh> amesh) Ngx_Mesh :: Ngx_Mesh (shared_ptr<Mesh> amesh)
{ {
if (amesh) if (amesh) {
mesh = amesh; mesh = amesh;
else comm = amesh->GetCommunicator();
mesh = netgen::mesh;
} }
else {
mesh = netgen::mesh;
comm = netgen::ng_comm;
}
}
#ifdef PARALLEL
void Ngx_Mesh :: SetCommunicator (MPI_Comm acomm)
{
if (Valid() && acomm!=mesh->GetCommunicator())
throw NgException("Redistribution of mesh not possible!");
this->comm = acomm;
}
MPI_Comm Ngx_Mesh :: GetCommunicator() const
{ return comm; }
#endif
Ngx_Mesh * LoadMesh (const string & filename) Ngx_Mesh * LoadMesh (const string & filename)
{ {
netgen::mesh.reset(); netgen::mesh.reset();
Ng_LoadMesh (filename.c_str()); Ng_LoadMesh (filename.c_str(), netgen::ng_comm);
return new Ngx_Mesh (netgen::mesh); return new Ngx_Mesh (netgen::mesh);
} }
void Ngx_Mesh :: LoadMesh (const string & filename) void Ngx_Mesh :: LoadMesh (const string & filename)
{ {
netgen::mesh.reset(); netgen::mesh.reset();
Ng_LoadMesh (filename.c_str()); Ng_LoadMesh (filename.c_str(), this->comm);
// mesh = move(netgen::mesh); // mesh = move(netgen::mesh);
mesh = netgen::mesh; mesh = netgen::mesh;
} }
@ -71,7 +88,12 @@ namespace netgen
void Ngx_Mesh :: DoArchive (Archive & archive) void Ngx_Mesh :: DoArchive (Archive & archive)
{ {
if (archive.Input()) mesh = make_shared<Mesh>(); #ifdef PARALLEL
if (archive.Input()) {
mesh = make_shared<Mesh>();
mesh->SetCommunicator(GetCommunicator());
}
#endif
mesh->DoArchive(archive); mesh->DoArchive(archive);
if (archive.Input()) if (archive.Input())
{ {

View File

@ -553,14 +553,18 @@ namespace netgen
order = 1; order = 1;
MPI_Comm curve_comm;
#ifdef PARALLEL #ifdef PARALLEL
enum { MPI_TAG_CURVE = MPI_TAG_MESH+20 }; enum { MPI_TAG_CURVE = MPI_TAG_MESH+20 };
const ParallelMeshTopology & partop = mesh.GetParallelTopology (); const ParallelMeshTopology & partop = mesh.GetParallelTopology ();
MPI_Comm curve_comm; MPI_Comm_dup (mesh.GetCommunicator(), &curve_comm);
MPI_Comm_dup (MPI_COMM_WORLD, &curve_comm);
Array<int> procs; Array<int> procs;
#else
curve_comm = ng_comm; // dummy!
#endif #endif
int rank = MyMPI_GetId(curve_comm);
int ntasks = MyMPI_GetNTasks(curve_comm);
if (working) if (working)
order = aorder; order = aorder;

View File

@ -31,6 +31,9 @@ namespace netgen
DLL_HEADER shared_ptr<NetgenGeometry> ng_geometry; DLL_HEADER shared_ptr<NetgenGeometry> ng_geometry;
// TraceGlobal glob2("global2"); // TraceGlobal glob2("global2");
// global communicator for netgen
MPI_Comm ng_comm = MPI_COMM_WORLD;
weak_ptr<Mesh> global_mesh; weak_ptr<Mesh> global_mesh;
void SetGlobalMesh (shared_ptr<Mesh> m) void SetGlobalMesh (shared_ptr<Mesh> m)
{ {

View File

@ -59,6 +59,10 @@ namespace netgen
DLL_HEADER extern weak_ptr<Mesh> global_mesh; DLL_HEADER extern weak_ptr<Mesh> global_mesh;
DLL_HEADER void SetGlobalMesh (shared_ptr<Mesh> m); DLL_HEADER void SetGlobalMesh (shared_ptr<Mesh> m);
// global communicator for netgen (dummy if no MPI)
extern MPI_Comm ng_comm;
} }
#endif #endif

View File

@ -44,7 +44,7 @@ namespace netgen
cd2names.SetSize(0); cd2names.SetSize(0);
#ifdef PARALLEL #ifdef PARALLEL
this->comm = MPI_COMM_WORLD; this->comm = netgen :: ng_comm;
paralleltop = new ParallelMeshTopology (*this); paralleltop = new ParallelMeshTopology (*this);
#endif #endif
} }
@ -83,6 +83,12 @@ namespace netgen
#endif #endif
} }
#ifdef PARALLEL
void Mesh :: SetCommunicator(MPI_Comm acomm)
{
this->comm = acomm;
}
#endif
Mesh & Mesh :: operator= (const Mesh & mesh2) Mesh & Mesh :: operator= (const Mesh & mesh2)
{ {
@ -1321,6 +1327,15 @@ namespace netgen
if (archive.Input()) if (archive.Input())
{ {
int rank, ntasks;
#ifdef PARALLEL
MPI_Comm_size(this->comm, &ntasks);
MPI_Comm_rank(this->comm, &rank);
#else
rank = 0;
ntasks = 1;
#endif
RebuildSurfaceElementLists(); RebuildSurfaceElementLists();
CalcSurfacesOfNode (); CalcSurfacesOfNode ();

View File

@ -606,6 +606,11 @@ namespace netgen
int AddEdgeDescriptor(const EdgeDescriptor & fd) int AddEdgeDescriptor(const EdgeDescriptor & fd)
{ edgedecoding.Append(fd); return edgedecoding.Size() - 1; } { edgedecoding.Append(fd); return edgedecoding.Size() - 1; }
#ifdef PARALLEL
MPI_Comm GetCommunicator() const { return this->comm; }
void SetCommunicator(MPI_Comm acomm);
#endif
/// ///
DLL_HEADER void SetMaterial (int domnr, const string & mat); DLL_HEADER void SetMaterial (int domnr, const string & mat);
/// ///

View File

@ -35,9 +35,8 @@ namespace netgen
void Mesh :: SendRecvMesh () void Mesh :: SendRecvMesh ()
{ {
int id, np; int id = MyMPI_GetId(GetCommunicator());
MPI_Comm_rank(this->comm, &id); int np = MyMPI_GetNTasks(GetCommunicator());
MPI_Comm_size(this->comm, &np);
if (np == 1) { if (np == 1) {
throw NgException("SendRecvMesh called, but only one rank in communicator!!"); throw NgException("SendRecvMesh called, but only one rank in communicator!!");
@ -73,16 +72,17 @@ namespace netgen
{ {
Array<MPI_Request> sendrequests; Array<MPI_Request> sendrequests;
int id = MyMPI_GetId(GetCommunicator());
int np = MyMPI_GetNTasks(GetCommunicator());
int dim = GetDimension(); int dim = GetDimension();
MyMPI_Bcast(dim); MyMPI_Bcast(dim, comm);
const_cast<MeshTopology&>(GetTopology()).Update(); const_cast<MeshTopology&>(GetTopology()).Update();
PrintMessage ( 3, "Sending nr of elements"); PrintMessage ( 3, "Sending nr of elements");
MPI_Comm comm = this->comm;
Array<int> num_els_on_proc(ntasks); Array<int> num_els_on_proc(ntasks);
num_els_on_proc = 0; num_els_on_proc = 0;
for (ElementIndex ei = 0; ei < GetNE(); ei++) for (ElementIndex ei = 0; ei < GetNE(); ei++)
@ -285,7 +285,7 @@ namespace netgen
for (int dest = 1; dest < ntasks; dest++) for (int dest = 1; dest < ntasks; dest++)
{ {
FlatArray<PointIndex> verts = verts_of_proc[dest]; FlatArray<PointIndex> verts = verts_of_proc[dest];
sendrequests.Append (MyMPI_ISend (verts, dest, MPI_TAG_MESH+1)); sendrequests.Append (MyMPI_ISend (verts, dest, MPI_TAG_MESH+1, comm));
MPI_Datatype mptype = MeshPoint::MyGetMPIType(); MPI_Datatype mptype = MeshPoint::MyGetMPIType();
@ -301,7 +301,7 @@ namespace netgen
MPI_Type_commit (&newtype); MPI_Type_commit (&newtype);
MPI_Request request; MPI_Request request;
MPI_Isend( &points[0], 1, newtype, dest, MPI_TAG_MESH+1, MPI_COMM_WORLD, &request); MPI_Isend( &points[0], 1, newtype, dest, MPI_TAG_MESH+1, comm, &request);
sendrequests.Append (request); sendrequests.Append (request);
} }
@ -367,7 +367,7 @@ namespace netgen
} }
Array<MPI_Request> req_per; Array<MPI_Request> req_per;
for(int dest = 1; dest < ntasks; dest++) for(int dest = 1; dest < ntasks; dest++)
req_per.Append(MyMPI_ISend(pp_data[dest], dest, MPI_TAG_MESH+1)); req_per.Append(MyMPI_ISend(pp_data[dest], dest, MPI_TAG_MESH+1, comm));
MPI_Waitall(req_per.Size(), &req_per[0], MPI_STATUS_IGNORE); MPI_Waitall(req_per.Size(), &req_per[0], MPI_STATUS_IGNORE);
PrintMessage ( 3, "Sending Vertices - distprocs"); PrintMessage ( 3, "Sending Vertices - distprocs");
@ -395,7 +395,7 @@ namespace netgen
} }
for ( int dest = 1; dest < ntasks; dest ++ ) for ( int dest = 1; dest < ntasks; dest ++ )
sendrequests.Append (MyMPI_ISend (distpnums[dest], dest, MPI_TAG_MESH+1)); sendrequests.Append (MyMPI_ISend (distpnums[dest], dest, MPI_TAG_MESH+1, comm));
@ -425,7 +425,7 @@ namespace netgen
} }
for (int dest = 1; dest < ntasks; dest ++ ) for (int dest = 1; dest < ntasks; dest ++ )
sendrequests.Append (MyMPI_ISend (elementarrays[dest], dest, MPI_TAG_MESH+2)); sendrequests.Append (MyMPI_ISend (elementarrays[dest], dest, MPI_TAG_MESH+2, comm));
PrintMessage ( 3, "Sending Face Descriptors" ); PrintMessage ( 3, "Sending Face Descriptors" );
@ -442,7 +442,7 @@ namespace netgen
} }
for (int dest = 1; dest < ntasks; dest++) for (int dest = 1; dest < ntasks; dest++)
sendrequests.Append (MyMPI_ISend (fddata, dest, MPI_TAG_MESH+3)); sendrequests.Append (MyMPI_ISend (fddata, dest, MPI_TAG_MESH+3, comm));
/** Surface Elements **/ /** Surface Elements **/
@ -526,7 +526,7 @@ namespace netgen
}); });
// distribute sel data // distribute sel data
for (int dest = 1; dest < ntasks; dest++) for (int dest = 1; dest < ntasks; dest++)
sendrequests.Append (MyMPI_ISend(selbuf[dest], dest, MPI_TAG_MESH+4)); sendrequests.Append (MyMPI_ISend(selbuf[dest], dest, MPI_TAG_MESH+4, comm));
/** Segments **/ /** Segments **/
@ -676,7 +676,7 @@ namespace netgen
}); });
// distrubute segment data // distrubute segment data
for (int dest = 1; dest < ntasks; dest++) for (int dest = 1; dest < ntasks; dest++)
sendrequests.Append (MyMPI_ISend(segm_buf[dest], dest, MPI_TAG_MESH+5)); sendrequests.Append (MyMPI_ISend(segm_buf[dest], dest, MPI_TAG_MESH+5, comm));
PrintMessage ( 3, "now wait ..."); PrintMessage ( 3, "now wait ...");
@ -700,9 +700,9 @@ namespace netgen
compiled_bcnames[tot_bcsize++] = (*bcnames[k])[j]; compiled_bcnames[tot_bcsize++] = (*bcnames[k])[j];
for(int k=1;k<ntasks;k++) { for(int k=1;k<ntasks;k++) {
sendrequests[6*(k-1)] = MyMPI_ISend(FlatArray<int>(1, &nbcs), k, MPI_TAG_MESH+6); sendrequests[6*(k-1)] = MyMPI_ISend(FlatArray<int>(1, &nbcs), k, MPI_TAG_MESH+6, comm);
sendrequests[6*(k-1)+1] = MyMPI_ISend(bcname_sizes, k, MPI_TAG_MESH+6); sendrequests[6*(k-1)+1] = MyMPI_ISend(bcname_sizes, k, MPI_TAG_MESH+6, comm);
(void) MPI_Isend(compiled_bcnames, tot_bcsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+2]); (void) MPI_Isend(compiled_bcnames, tot_bcsize, MPI_CHAR, k, MPI_TAG_MESH+6, comm, &sendrequests[6*(k-1)+2]);
} }
/** Send mat-names **/ /** Send mat-names **/
@ -719,9 +719,9 @@ namespace netgen
for(int j=0;j<mat_sizes[k];j++) for(int j=0;j<mat_sizes[k];j++)
compiled_mats[tot_matsize++] = (*materials[k])[j]; compiled_mats[tot_matsize++] = (*materials[k])[j];
for(int k=1;k<ntasks;k++) { for(int k=1;k<ntasks;k++) {
sendrequests[6*(k-1)+3] = MyMPI_ISend(FlatArray<int>(1, &nmats), k, MPI_TAG_MESH+6); sendrequests[6*(k-1)+3] = MyMPI_ISend(FlatArray<int>(1, &nmats), k, MPI_TAG_MESH+6, comm);
sendrequests[6*(k-1)+4] = MyMPI_ISend(mat_sizes, k, MPI_TAG_MESH+6); sendrequests[6*(k-1)+4] = MyMPI_ISend(mat_sizes, k, MPI_TAG_MESH+6, comm);
(void) MPI_Isend(compiled_mats, tot_matsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+5]); (void) MPI_Isend(compiled_mats, tot_matsize, MPI_CHAR, k, MPI_TAG_MESH+6, comm, &sendrequests[6*(k-1)+5]);
} }
/* now wait ... **/ /* now wait ... **/
@ -731,7 +731,7 @@ namespace netgen
PrintMessage( 3, "send mesh complete"); PrintMessage( 3, "send mesh complete");
MPI_Barrier(MPI_COMM_WORLD); MPI_Barrier(comm);
} }
@ -750,14 +750,17 @@ namespace netgen
int timer_sels = NgProfiler::CreateTimer ("Receive surface elements"); int timer_sels = NgProfiler::CreateTimer ("Receive surface elements");
NgProfiler::RegionTimer reg(timer); NgProfiler::RegionTimer reg(timer);
int id = MyMPI_GetId(GetCommunicator());
int np = MyMPI_GetNTasks(GetCommunicator());
int dim; int dim;
MyMPI_Bcast(dim); MyMPI_Bcast(dim, comm);
SetDimension(dim); SetDimension(dim);
// Receive number of local elements // Receive number of local elements
int nelloc; int nelloc;
MPI_Scatter (NULL, 0, MPI_INT, MPI_Scatter (NULL, 0, MPI_INT,
&nelloc, 1, MPI_INT, 0, MPI_COMM_WORLD); &nelloc, 1, MPI_INT, 0, comm);
paralleltop -> SetNE (nelloc); paralleltop -> SetNE (nelloc);
// string st; // string st;
@ -766,8 +769,7 @@ namespace netgen
NgProfiler::StartTimer (timer_pts); NgProfiler::StartTimer (timer_pts);
Array<int> verts; Array<int> verts;
MyMPI_Recv (verts, 0, MPI_TAG_MESH+1); MyMPI_Recv (verts, 0, MPI_TAG_MESH+1, comm);
int numvert = verts.Size(); int numvert = verts.Size();
paralleltop -> SetNV (numvert); paralleltop -> SetNV (numvert);
@ -787,11 +789,10 @@ namespace netgen
MPI_Datatype mptype = MeshPoint::MyGetMPIType(); MPI_Datatype mptype = MeshPoint::MyGetMPIType();
MPI_Status status; MPI_Status status;
MPI_Recv( &points[1], numvert, mptype, 0, MPI_TAG_MESH+1, MPI_COMM_WORLD, &status); MPI_Recv( &points[1], numvert, mptype, 0, MPI_TAG_MESH+1, comm, &status);
Array<int> pp_data; Array<int> pp_data;
MyMPI_Recv(pp_data, 0, MPI_TAG_MESH+1); MyMPI_Recv(pp_data, 0, MPI_TAG_MESH+1, comm);
int maxidentnr = pp_data[0]; int maxidentnr = pp_data[0];
auto & idents = GetIdentifications(); auto & idents = GetIdentifications();
@ -815,7 +816,7 @@ namespace netgen
} }
Array<int> dist_pnums; Array<int> dist_pnums;
MyMPI_Recv (dist_pnums, 0, MPI_TAG_MESH+1); MyMPI_Recv (dist_pnums, 0, MPI_TAG_MESH+1, comm);
for (int hi = 0; hi < dist_pnums.Size(); hi += 3) for (int hi = 0; hi < dist_pnums.Size(); hi += 3)
paralleltop -> paralleltop ->
@ -828,7 +829,7 @@ namespace netgen
Element el; Element el;
Array<int> elarray; Array<int> elarray;
MyMPI_Recv (elarray, 0, MPI_TAG_MESH+2); MyMPI_Recv (elarray, 0, MPI_TAG_MESH+2, comm);
NgProfiler::RegionTimer reg(timer_els); NgProfiler::RegionTimer reg(timer_els);
@ -848,7 +849,7 @@ namespace netgen
{ {
Array<double> fddata; Array<double> fddata;
MyMPI_Recv (fddata, 0, MPI_TAG_MESH+3); MyMPI_Recv (fddata, 0, MPI_TAG_MESH+3, comm);
for (int i = 0; i < fddata.Size(); i += 6) for (int i = 0; i < fddata.Size(); i += 6)
{ {
int faceind = AddFaceDescriptor int faceind = AddFaceDescriptor
@ -863,7 +864,7 @@ namespace netgen
NgProfiler::RegionTimer reg(timer_sels); NgProfiler::RegionTimer reg(timer_sels);
Array<int> selbuf; Array<int> selbuf;
MyMPI_Recv ( selbuf, 0, MPI_TAG_MESH+4); MyMPI_Recv ( selbuf, 0, MPI_TAG_MESH+4, comm);
int ii = 0; int ii = 0;
int sel = 0; int sel = 0;
@ -894,7 +895,7 @@ namespace netgen
{ {
Array<double> segmbuf; Array<double> segmbuf;
MyMPI_Recv ( segmbuf, 0, MPI_TAG_MESH+5); MyMPI_Recv ( segmbuf, 0, MPI_TAG_MESH+5, comm);
Segment seg; Segment seg;
int globsegi; int globsegi;
@ -939,14 +940,14 @@ namespace netgen
/** Recv bc-names **/ /** Recv bc-names **/
int nbcs; int nbcs;
MyMPI_Recv(nbcs, 0, MPI_TAG_MESH+6); MyMPI_Recv(nbcs, 0, MPI_TAG_MESH+6, comm);
Array<int> bcs(nbcs); Array<int> bcs(nbcs);
MyMPI_Recv(bcs, 0, MPI_TAG_MESH+6); MyMPI_Recv(bcs, 0, MPI_TAG_MESH+6, comm);
int size_bc = 0; int size_bc = 0;
for(int k=0;k<nbcs;k++) for(int k=0;k<nbcs;k++)
size_bc += bcs[k]; size_bc += bcs[k];
char compiled_bcnames[size_bc]; char compiled_bcnames[size_bc];
MPI_Recv(compiled_bcnames, size_bc, MPI_CHAR, 0, MPI_TAG_MESH+6, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(compiled_bcnames, size_bc, MPI_CHAR, 0, MPI_TAG_MESH+6, comm, MPI_STATUS_IGNORE);
SetNBCNames(nbcs); SetNBCNames(nbcs);
int cnt = 0; int cnt = 0;
@ -957,14 +958,14 @@ namespace netgen
/** Recv mat-names **/ /** Recv mat-names **/
int nmats; int nmats;
MyMPI_Recv(nmats, 0, MPI_TAG_MESH+6); MyMPI_Recv(nmats, 0, MPI_TAG_MESH+6, comm);
Array<int> matsz(nmats); Array<int> matsz(nmats);
MyMPI_Recv(matsz, 0, MPI_TAG_MESH+6); MyMPI_Recv(matsz, 0, MPI_TAG_MESH+6, comm);
int size_mats = 0; int size_mats = 0;
for(int k=0;k<nmats;k++) for(int k=0;k<nmats;k++)
size_mats += matsz[k]; size_mats += matsz[k];
char compiled_mats[size_mats]; char compiled_mats[size_mats];
MPI_Recv(compiled_mats, size_mats, MPI_CHAR, 0, MPI_TAG_MESH+6, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(compiled_mats, size_mats, MPI_CHAR, 0, MPI_TAG_MESH+6, comm, MPI_STATUS_IGNORE);
cnt = 0; cnt = 0;
materials.SetSize(nmats); materials.SetSize(nmats);
for(int k=0;k<nmats;k++) { for(int k=0;k<nmats;k++) {
@ -973,7 +974,7 @@ namespace netgen
cnt += matsz[k]; cnt += matsz[k];
} }
MPI_Barrier(MPI_COMM_WORLD); MPI_Barrier(comm);
int timerloc = NgProfiler::CreateTimer ("Update local mesh"); int timerloc = NgProfiler::CreateTimer ("Update local mesh");
int timerloc2 = NgProfiler::CreateTimer ("CalcSurfacesOfNode"); int timerloc2 = NgProfiler::CreateTimer ("CalcSurfacesOfNode");
@ -1008,8 +1009,9 @@ namespace netgen
// call it only for the master ! // call it only for the master !
void Mesh :: Distribute () void Mesh :: Distribute ()
{ {
MPI_Comm_size(MPI_COMM_WORLD, &ntasks); MPI_Comm comm = this->comm;
MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Comm_size(comm, &ntasks);
MPI_Comm_rank(comm, &id);
if (id != 0 || ntasks == 1 ) return; if (id != 0 || ntasks == 1 ) return;
@ -1120,12 +1122,9 @@ namespace netgen
for (SurfaceElementIndex sei = 0; sei < GetNSE(); sei++) for (SurfaceElementIndex sei = 0; sei < GetNSE(); sei++)
{ {
const Element2d & el = (*this)[sei]; const Element2d & el = (*this)[sei];
cout << "surf-el " << sei << " verts: " << endl;
for (int j = 0; j < el.GetNP(); j++) { for (int j = 0; j < el.GetNP(); j++) {
cout << el[j] << " ";
f(el[j], sei); f(el[j], sei);
} }
cout << endl;
} }
}; };
auto loop_els_3d = [&](auto f) { auto loop_els_3d = [&](auto f) {
@ -1150,7 +1149,6 @@ namespace netgen
if(boundarypoints[vertex]) if(boundarypoints[vertex])
cnt[vertex]++; cnt[vertex]++;
}); });
cout << "count: " << endl << cnt << endl;
TABLE<int, PointIndex::BASE> pnt2el(cnt); TABLE<int, PointIndex::BASE> pnt2el(cnt);
loop_els([&](auto vertex, int index) loop_els([&](auto vertex, int index)
{ {
@ -1277,8 +1275,9 @@ namespace netgen
// call it only for the master ! // call it only for the master !
void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights) void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights)
{ {
MPI_Comm_size(MPI_COMM_WORLD, &ntasks); MPI_Comm comm = this->comm;
MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Comm_size(comm, &ntasks);
MPI_Comm_rank(comm, &id);
if (id != 0 || ntasks == 1 ) return; if (id != 0 || ntasks == 1 ) return;

View File

@ -25,11 +25,18 @@ namespace netgen
{ {
*testout << "ParallelMeshTopology::Reset" << endl; *testout << "ParallelMeshTopology::Reset" << endl;
int id = MyMPI_GetId(mesh.GetCommunicator());
int ntasks = MyMPI_GetNTasks(mesh.GetCommunicator());
if ( ntasks == 1 ) return; if ( ntasks == 1 ) return;
cout << "Reset CG, this = " << this << " , mesh: " << &mesh << endl;
int ned = mesh.GetTopology().GetNEdges(); int ned = mesh.GetTopology().GetNEdges();
int nfa = mesh.GetTopology().GetNFaces(); int nfa = mesh.GetTopology().GetNFaces();
cout << "nnodes : " << mesh.GetNV() << " " << ned << " " << nfa << endl;
if (glob_edge.Size() != ned) if (glob_edge.Size() != ned)
{ {
glob_edge.SetSize(ned); glob_edge.SetSize(ned);
@ -206,6 +213,11 @@ namespace netgen
// cout << "UpdateCoarseGrid" << endl; // cout << "UpdateCoarseGrid" << endl;
// if (is_updated) return; // if (is_updated) return;
int id = MyMPI_GetId(mesh.GetCommunicator());
int ntasks = MyMPI_GetNTasks(mesh.GetCommunicator());
cout << "Update CG, this = " << this << " , mesh: " << &mesh << endl;
Reset(); Reset();
static int timer = NgProfiler::CreateTimer ("UpdateCoarseGrid"); static int timer = NgProfiler::CreateTimer ("UpdateCoarseGrid");
NgProfiler::RegionTimer reg(timer); NgProfiler::RegionTimer reg(timer);