mirror of
https://github.com/NGSolve/netgen.git
synced 2024-12-25 21:40:33 +05:00
Add a global communicator. The mesh now has a communicator.
This commit is contained in:
parent
4f40087866
commit
c7fb6c7e4a
@ -17,14 +17,38 @@ namespace netgen
|
||||
using ngcore::id;
|
||||
using ngcore::ntasks;
|
||||
|
||||
#ifdef PARALLEL
|
||||
#ifndef PARALLEL
|
||||
typedef int MPI_Comm;
|
||||
enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
|
||||
|
||||
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) { return 1; }
|
||||
inline int MyMPI_GetId (MPI_Comm comm = ng_comm) { return 0; }
|
||||
|
||||
#endif
|
||||
|
||||
/** This is the "standard" communicator that will be used for netgen-objects. **/
|
||||
extern MPI_Comm ng_comm;
|
||||
|
||||
#ifdef PARALLEL
|
||||
|
||||
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm)
|
||||
{
|
||||
int ntasks;
|
||||
MPI_Comm_size(comm, &ntasks);
|
||||
return ntasks;
|
||||
}
|
||||
|
||||
inline int MyMPI_GetId (MPI_Comm comm = ng_comm)
|
||||
{
|
||||
int id;
|
||||
MPI_Comm_rank(comm, &id);
|
||||
return id;
|
||||
}
|
||||
|
||||
enum { MPI_TAG_CMD = 110 };
|
||||
enum { MPI_TAG_MESH = 210 };
|
||||
enum { MPI_TAG_VIS = 310 };
|
||||
|
||||
extern MPI_Comm mesh_comm;
|
||||
|
||||
template <class T>
|
||||
MPI_Datatype MyGetMPIType ( )
|
||||
{ cerr << "ERROR in GetMPIType() -- no type found" << endl;return 0; }
|
||||
@ -38,76 +62,76 @@ namespace netgen
|
||||
{ return MPI_DOUBLE; }
|
||||
|
||||
|
||||
inline void MyMPI_Send (int i, int dest, int tag)
|
||||
inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
int hi = i;
|
||||
MPI_Send( &hi, 1, MPI_INT, dest, tag, MPI_COMM_WORLD);
|
||||
MPI_Send( &hi, 1, MPI_INT, dest, tag, comm);
|
||||
}
|
||||
|
||||
inline void MyMPI_Recv (int & i, int src, int tag)
|
||||
inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Status status;
|
||||
MPI_Recv( &i, 1, MPI_INT, src, tag, MPI_COMM_WORLD, &status);
|
||||
MPI_Recv( &i, 1, MPI_INT, src, tag, comm, &status);
|
||||
}
|
||||
|
||||
|
||||
|
||||
inline void MyMPI_Send (const string & s, int dest, int tag)
|
||||
inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Send( const_cast<char*> (s.c_str()), s.length(), MPI_CHAR, dest, tag, MPI_COMM_WORLD);
|
||||
MPI_Send( const_cast<char*> (s.c_str()), s.length(), MPI_CHAR, dest, tag, comm);
|
||||
}
|
||||
|
||||
inline void MyMPI_Recv (string & s, int src, int tag)
|
||||
inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Status status;
|
||||
int len;
|
||||
MPI_Probe (src, tag, MPI_COMM_WORLD, &status);
|
||||
MPI_Get_count (&status, MPI_CHAR, &len);
|
||||
s.assign (len, ' ');
|
||||
MPI_Recv( &s[0], len, MPI_CHAR, src, tag, MPI_COMM_WORLD, &status);
|
||||
MPI_Recv( &s[0], len, MPI_CHAR, src, tag, comm, &status);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
template <class T, int BASE>
|
||||
inline void MyMPI_Send (FlatArray<T, BASE> s, int dest, int tag)
|
||||
inline void MyMPI_Send (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Send( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, MPI_COMM_WORLD);
|
||||
MPI_Send( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm);
|
||||
}
|
||||
|
||||
template <class T, int BASE>
|
||||
inline void MyMPI_Recv ( FlatArray<T, BASE> s, int src, int tag)
|
||||
inline void MyMPI_Recv ( FlatArray<T, BASE> s, int src, int tag, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Status status;
|
||||
MPI_Recv( &s.First(), s.Size(), MyGetMPIType<T>(), src, tag, MPI_COMM_WORLD, &status);
|
||||
MPI_Recv( &s.First(), s.Size(), MyGetMPIType<T>(), src, tag, comm, &status);
|
||||
}
|
||||
|
||||
template <class T, int BASE>
|
||||
inline void MyMPI_Recv ( Array <T, BASE> & s, int src, int tag)
|
||||
inline void MyMPI_Recv ( Array <T, BASE> & s, int src, int tag, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Status status;
|
||||
int len;
|
||||
MPI_Probe (src, tag, MPI_COMM_WORLD, &status);
|
||||
MPI_Probe (src, tag, comm, &status);
|
||||
MPI_Get_count (&status, MyGetMPIType<T>(), &len);
|
||||
|
||||
s.SetSize (len);
|
||||
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, MPI_COMM_WORLD, &status);
|
||||
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, comm, &status);
|
||||
}
|
||||
|
||||
template <class T, int BASE>
|
||||
inline int MyMPI_Recv ( Array <T, BASE> & s, int tag)
|
||||
inline int MyMPI_Recv ( Array <T, BASE> & s, int tag, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Status status;
|
||||
int len;
|
||||
MPI_Probe (MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status);
|
||||
MPI_Probe (MPI_ANY_SOURCE, tag, comm, &status);
|
||||
|
||||
int src = status.MPI_SOURCE;
|
||||
|
||||
MPI_Get_count (&status, MyGetMPIType<T>(), &len);
|
||||
|
||||
s.SetSize (len);
|
||||
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, MPI_COMM_WORLD, &status);
|
||||
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, comm, &status);
|
||||
|
||||
return src;
|
||||
}
|
||||
@ -129,7 +153,7 @@ namespace netgen
|
||||
*/
|
||||
|
||||
template <class T, int BASE>
|
||||
inline MPI_Request MyMPI_ISend (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = MPI_COMM_WORLD)
|
||||
inline MPI_Request MyMPI_ISend (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Request request;
|
||||
MPI_Isend( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
||||
@ -138,7 +162,7 @@ namespace netgen
|
||||
|
||||
|
||||
template <class T, int BASE>
|
||||
inline MPI_Request MyMPI_IRecv (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = MPI_COMM_WORLD)
|
||||
inline MPI_Request MyMPI_IRecv (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Request request;
|
||||
MPI_Irecv( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
||||
@ -203,11 +227,10 @@ namespace netgen
|
||||
template <typename T>
|
||||
inline void MyMPI_ExchangeTable (TABLE<T> & send_data,
|
||||
TABLE<T> & recv_data, int tag,
|
||||
MPI_Comm comm = MPI_COMM_WORLD)
|
||||
MPI_Comm comm = ng_comm)
|
||||
{
|
||||
int ntasks, rank;
|
||||
MPI_Comm_size(comm, &ntasks);
|
||||
MPI_Comm_rank(comm, &rank);
|
||||
int rank = MyMPI_GetId(comm);
|
||||
int ntasks = MyMPI_GetNTasks(comm);
|
||||
|
||||
Array<int> send_sizes(ntasks);
|
||||
Array<int> recv_sizes(ntasks);
|
||||
@ -251,13 +274,13 @@ namespace netgen
|
||||
|
||||
|
||||
template <class T>
|
||||
inline void MyMPI_Bcast (T & s, MPI_Comm comm = MPI_COMM_WORLD)
|
||||
inline void MyMPI_Bcast (T & s, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Bcast (&s, 1, MyGetMPIType<T>(), 0, comm);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void MyMPI_Bcast (Array<T, 0> & s, MPI_Comm comm = MPI_COMM_WORLD)
|
||||
inline void MyMPI_Bcast (Array<T, 0> & s, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
int size = s.Size();
|
||||
MyMPI_Bcast (size, comm);
|
||||
@ -266,7 +289,7 @@ namespace netgen
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void MyMPI_Bcast (Array<T, 0> & s, int root, MPI_Comm comm = MPI_COMM_WORLD)
|
||||
inline void MyMPI_Bcast (Array<T, 0> & s, int root, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
int id;
|
||||
MPI_Comm_rank(comm, &id);
|
||||
@ -279,19 +302,19 @@ namespace netgen
|
||||
}
|
||||
|
||||
template <class T, class T2>
|
||||
inline void MyMPI_Allgather (const T & send, FlatArray<T2> recv, MPI_Comm comm)
|
||||
inline void MyMPI_Allgather (const T & send, FlatArray<T2> recv, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Allgather( const_cast<T*> (&send), 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
||||
}
|
||||
|
||||
template <class T, class T2>
|
||||
inline void MyMPI_Alltoall (FlatArray<T> send, FlatArray<T2> recv, MPI_Comm comm)
|
||||
inline void MyMPI_Alltoall (FlatArray<T> send, FlatArray<T2> recv, MPI_Comm comm = ng_comm)
|
||||
{
|
||||
MPI_Alltoall( &send[0], 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
||||
}
|
||||
|
||||
// template <class T, class T2>
|
||||
// inline void MyMPI_Alltoall_Block (FlatArray<T> send, FlatArray<T2> recv, int blocklen, MPI_Comm comm)
|
||||
// inline void MyMPI_Alltoall_Block (FlatArray<T> send, FlatArray<T2> recv, int blocklen, MPI_Comm comm = ng_comm)
|
||||
// {
|
||||
// MPI_Alltoall( &send[0], blocklen, MyGetMPIType<T>(), &recv[0], blocklen, MyGetMPIType<T2>(), comm);
|
||||
// }
|
||||
|
@ -37,6 +37,7 @@
|
||||
// max number of nodes per surface element
|
||||
#define NG_SURFACE_ELEMENT_MAXPOINTS 8
|
||||
|
||||
namespace netgen { extern MPI_Comm ng_comm; }
|
||||
|
||||
|
||||
// implemented element types:
|
||||
@ -60,9 +61,9 @@ extern "C" {
|
||||
|
||||
// load geometry from file
|
||||
DLL_HEADER void Ng_LoadGeometry (const char * filename);
|
||||
|
||||
|
||||
// load netgen mesh
|
||||
DLL_HEADER void Ng_LoadMesh (const char * filename);
|
||||
DLL_HEADER void Ng_LoadMesh (const char * filename, MPI_Comm comm = netgen::ng_comm);
|
||||
|
||||
// load netgen mesh
|
||||
DLL_HEADER void Ng_LoadMeshFromString (const char * mesh_as_string);
|
||||
|
@ -249,6 +249,9 @@ namespace netgen
|
||||
{
|
||||
private:
|
||||
shared_ptr<Mesh> mesh;
|
||||
#ifdef PARALLEL
|
||||
MPI_Comm comm;
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Ngx_Mesh () { ; }
|
||||
@ -261,6 +264,11 @@ namespace netgen
|
||||
void UpdateTopology ();
|
||||
void DoArchive (Archive & archive);
|
||||
|
||||
#ifdef PARALLEL
|
||||
MPI_Comm GetCommunicator() const;
|
||||
void SetCommunicator(MPI_Comm acomm);
|
||||
#endif
|
||||
|
||||
virtual ~Ngx_Mesh();
|
||||
|
||||
bool Valid () { return mesh != NULL; }
|
||||
|
@ -117,14 +117,10 @@ void Ng_LoadMeshFromStream ( istream & input )
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void Ng_LoadMesh (const char * filename)
|
||||
void Ng_LoadMesh (const char * filename, MPI_Comm comm)
|
||||
{
|
||||
#ifdef PARALLEL
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &id);
|
||||
#endif
|
||||
int id = MyMPI_GetId(comm);
|
||||
int ntasks = MyMPI_GetNTasks(comm);
|
||||
|
||||
{
|
||||
ifstream infile(filename);
|
||||
@ -134,11 +130,10 @@ void Ng_LoadMesh (const char * filename)
|
||||
|
||||
if ( string(filename).find(".vol") == string::npos )
|
||||
{
|
||||
#ifdef PARALLEL
|
||||
if(ntasks>1)
|
||||
throw NgException("Not sure what to do with this?? Does this work with MPI??");
|
||||
#endif
|
||||
mesh.reset (new Mesh());
|
||||
mesh->SetCommunicator(comm);
|
||||
ReadFile(*mesh,filename);
|
||||
//mesh->SetGlobalH (mparam.maxh);
|
||||
//mesh->CalcLocalH();
|
||||
@ -149,9 +144,7 @@ void Ng_LoadMesh (const char * filename)
|
||||
char* buf; // for distributing geometry!
|
||||
int strs;
|
||||
|
||||
#ifdef PARALLEL
|
||||
if( id == 0) {
|
||||
#endif
|
||||
|
||||
string fn(filename);
|
||||
if (fn.substr (fn.length()-3, 3) == ".gz")
|
||||
@ -159,6 +152,7 @@ void Ng_LoadMesh (const char * filename)
|
||||
else
|
||||
infile = new ifstream (filename);
|
||||
mesh.reset (new Mesh());
|
||||
mesh->SetCommunicator(comm);
|
||||
mesh -> Load(*infile);
|
||||
SetGlobalMesh (mesh);
|
||||
|
||||
@ -173,7 +167,6 @@ void Ng_LoadMesh (const char * filename)
|
||||
}
|
||||
delete infile;
|
||||
|
||||
#ifdef PARALLEL
|
||||
if (ntasks > 1)
|
||||
{
|
||||
|
||||
@ -239,17 +232,17 @@ void Ng_LoadMesh (const char * filename)
|
||||
} // id==0 end
|
||||
else {
|
||||
mesh.reset (new Mesh());
|
||||
mesh->SetCommunicator(comm);
|
||||
SetGlobalMesh (mesh);
|
||||
mesh->SendRecvMesh();
|
||||
}
|
||||
|
||||
if(!ng_geometry && ntasks>1) {
|
||||
/** Scatter the geometry-string **/
|
||||
MPI_Bcast(&strs, 1, MPI_INT, 0, MPI_COMM_WORLD);
|
||||
MPI_Bcast(&strs, 1, MPI_INT, 0, comm);
|
||||
if(id!=0) buf = new char[strs];
|
||||
MPI_Bcast(buf, strs, MPI_CHAR, 0, MPI_COMM_WORLD);
|
||||
MPI_Bcast(buf, strs, MPI_CHAR, 0, comm);
|
||||
}
|
||||
#endif
|
||||
|
||||
if(!ng_geometry) {
|
||||
infile = new istringstream(string((const char*)buf, (size_t)strs));
|
||||
|
@ -34,23 +34,40 @@ namespace netgen
|
||||
|
||||
Ngx_Mesh :: Ngx_Mesh (shared_ptr<Mesh> amesh)
|
||||
{
|
||||
if (amesh)
|
||||
if (amesh) {
|
||||
mesh = amesh;
|
||||
else
|
||||
comm = amesh->GetCommunicator();
|
||||
}
|
||||
else {
|
||||
mesh = netgen::mesh;
|
||||
comm = netgen::ng_comm;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef PARALLEL
|
||||
void Ngx_Mesh :: SetCommunicator (MPI_Comm acomm)
|
||||
{
|
||||
if (Valid() && acomm!=mesh->GetCommunicator())
|
||||
throw NgException("Redistribution of mesh not possible!");
|
||||
this->comm = acomm;
|
||||
}
|
||||
|
||||
|
||||
MPI_Comm Ngx_Mesh :: GetCommunicator() const
|
||||
{ return comm; }
|
||||
#endif
|
||||
|
||||
Ngx_Mesh * LoadMesh (const string & filename)
|
||||
{
|
||||
netgen::mesh.reset();
|
||||
Ng_LoadMesh (filename.c_str());
|
||||
Ng_LoadMesh (filename.c_str(), netgen::ng_comm);
|
||||
return new Ngx_Mesh (netgen::mesh);
|
||||
}
|
||||
|
||||
void Ngx_Mesh :: LoadMesh (const string & filename)
|
||||
{
|
||||
netgen::mesh.reset();
|
||||
Ng_LoadMesh (filename.c_str());
|
||||
Ng_LoadMesh (filename.c_str(), this->comm);
|
||||
// mesh = move(netgen::mesh);
|
||||
mesh = netgen::mesh;
|
||||
}
|
||||
@ -71,7 +88,12 @@ namespace netgen
|
||||
|
||||
void Ngx_Mesh :: DoArchive (Archive & archive)
|
||||
{
|
||||
if (archive.Input()) mesh = make_shared<Mesh>();
|
||||
#ifdef PARALLEL
|
||||
if (archive.Input()) {
|
||||
mesh = make_shared<Mesh>();
|
||||
mesh->SetCommunicator(GetCommunicator());
|
||||
}
|
||||
#endif
|
||||
mesh->DoArchive(archive);
|
||||
if (archive.Input())
|
||||
{
|
||||
|
@ -553,14 +553,18 @@ namespace netgen
|
||||
order = 1;
|
||||
|
||||
|
||||
MPI_Comm curve_comm;
|
||||
#ifdef PARALLEL
|
||||
enum { MPI_TAG_CURVE = MPI_TAG_MESH+20 };
|
||||
|
||||
const ParallelMeshTopology & partop = mesh.GetParallelTopology ();
|
||||
MPI_Comm curve_comm;
|
||||
MPI_Comm_dup (MPI_COMM_WORLD, &curve_comm);
|
||||
MPI_Comm_dup (mesh.GetCommunicator(), &curve_comm);
|
||||
Array<int> procs;
|
||||
#else
|
||||
curve_comm = ng_comm; // dummy!
|
||||
#endif
|
||||
int rank = MyMPI_GetId(curve_comm);
|
||||
int ntasks = MyMPI_GetNTasks(curve_comm);
|
||||
|
||||
if (working)
|
||||
order = aorder;
|
||||
|
@ -31,6 +31,9 @@ namespace netgen
|
||||
DLL_HEADER shared_ptr<NetgenGeometry> ng_geometry;
|
||||
// TraceGlobal glob2("global2");
|
||||
|
||||
// global communicator for netgen
|
||||
MPI_Comm ng_comm = MPI_COMM_WORLD;
|
||||
|
||||
weak_ptr<Mesh> global_mesh;
|
||||
void SetGlobalMesh (shared_ptr<Mesh> m)
|
||||
{
|
||||
|
@ -59,6 +59,10 @@ namespace netgen
|
||||
|
||||
DLL_HEADER extern weak_ptr<Mesh> global_mesh;
|
||||
DLL_HEADER void SetGlobalMesh (shared_ptr<Mesh> m);
|
||||
|
||||
// global communicator for netgen (dummy if no MPI)
|
||||
extern MPI_Comm ng_comm;
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -44,7 +44,7 @@ namespace netgen
|
||||
cd2names.SetSize(0);
|
||||
|
||||
#ifdef PARALLEL
|
||||
this->comm = MPI_COMM_WORLD;
|
||||
this->comm = netgen :: ng_comm;
|
||||
paralleltop = new ParallelMeshTopology (*this);
|
||||
#endif
|
||||
}
|
||||
@ -83,6 +83,12 @@ namespace netgen
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef PARALLEL
|
||||
void Mesh :: SetCommunicator(MPI_Comm acomm)
|
||||
{
|
||||
this->comm = acomm;
|
||||
}
|
||||
#endif
|
||||
|
||||
Mesh & Mesh :: operator= (const Mesh & mesh2)
|
||||
{
|
||||
@ -1321,6 +1327,15 @@ namespace netgen
|
||||
|
||||
if (archive.Input())
|
||||
{
|
||||
int rank, ntasks;
|
||||
#ifdef PARALLEL
|
||||
MPI_Comm_size(this->comm, &ntasks);
|
||||
MPI_Comm_rank(this->comm, &rank);
|
||||
#else
|
||||
rank = 0;
|
||||
ntasks = 1;
|
||||
#endif
|
||||
|
||||
RebuildSurfaceElementLists();
|
||||
|
||||
CalcSurfacesOfNode ();
|
||||
|
@ -606,6 +606,11 @@ namespace netgen
|
||||
int AddEdgeDescriptor(const EdgeDescriptor & fd)
|
||||
{ edgedecoding.Append(fd); return edgedecoding.Size() - 1; }
|
||||
|
||||
#ifdef PARALLEL
|
||||
MPI_Comm GetCommunicator() const { return this->comm; }
|
||||
void SetCommunicator(MPI_Comm acomm);
|
||||
#endif
|
||||
|
||||
///
|
||||
DLL_HEADER void SetMaterial (int domnr, const string & mat);
|
||||
///
|
||||
|
@ -35,9 +35,8 @@ namespace netgen
|
||||
|
||||
void Mesh :: SendRecvMesh ()
|
||||
{
|
||||
int id, np;
|
||||
MPI_Comm_rank(this->comm, &id);
|
||||
MPI_Comm_size(this->comm, &np);
|
||||
int id = MyMPI_GetId(GetCommunicator());
|
||||
int np = MyMPI_GetNTasks(GetCommunicator());
|
||||
|
||||
if (np == 1) {
|
||||
throw NgException("SendRecvMesh called, but only one rank in communicator!!");
|
||||
@ -72,17 +71,18 @@ namespace netgen
|
||||
void Mesh :: SendMesh () const
|
||||
{
|
||||
Array<MPI_Request> sendrequests;
|
||||
|
||||
int id = MyMPI_GetId(GetCommunicator());
|
||||
int np = MyMPI_GetNTasks(GetCommunicator());
|
||||
|
||||
int dim = GetDimension();
|
||||
MyMPI_Bcast(dim);
|
||||
MyMPI_Bcast(dim, comm);
|
||||
|
||||
|
||||
const_cast<MeshTopology&>(GetTopology()).Update();
|
||||
|
||||
PrintMessage ( 3, "Sending nr of elements");
|
||||
|
||||
MPI_Comm comm = this->comm;
|
||||
|
||||
Array<int> num_els_on_proc(ntasks);
|
||||
num_els_on_proc = 0;
|
||||
for (ElementIndex ei = 0; ei < GetNE(); ei++)
|
||||
@ -285,7 +285,7 @@ namespace netgen
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
{
|
||||
FlatArray<PointIndex> verts = verts_of_proc[dest];
|
||||
sendrequests.Append (MyMPI_ISend (verts, dest, MPI_TAG_MESH+1));
|
||||
sendrequests.Append (MyMPI_ISend (verts, dest, MPI_TAG_MESH+1, comm));
|
||||
|
||||
MPI_Datatype mptype = MeshPoint::MyGetMPIType();
|
||||
|
||||
@ -301,7 +301,7 @@ namespace netgen
|
||||
MPI_Type_commit (&newtype);
|
||||
|
||||
MPI_Request request;
|
||||
MPI_Isend( &points[0], 1, newtype, dest, MPI_TAG_MESH+1, MPI_COMM_WORLD, &request);
|
||||
MPI_Isend( &points[0], 1, newtype, dest, MPI_TAG_MESH+1, comm, &request);
|
||||
sendrequests.Append (request);
|
||||
}
|
||||
|
||||
@ -367,7 +367,7 @@ namespace netgen
|
||||
}
|
||||
Array<MPI_Request> req_per;
|
||||
for(int dest = 1; dest < ntasks; dest++)
|
||||
req_per.Append(MyMPI_ISend(pp_data[dest], dest, MPI_TAG_MESH+1));
|
||||
req_per.Append(MyMPI_ISend(pp_data[dest], dest, MPI_TAG_MESH+1, comm));
|
||||
MPI_Waitall(req_per.Size(), &req_per[0], MPI_STATUS_IGNORE);
|
||||
|
||||
PrintMessage ( 3, "Sending Vertices - distprocs");
|
||||
@ -395,7 +395,7 @@ namespace netgen
|
||||
}
|
||||
|
||||
for ( int dest = 1; dest < ntasks; dest ++ )
|
||||
sendrequests.Append (MyMPI_ISend (distpnums[dest], dest, MPI_TAG_MESH+1));
|
||||
sendrequests.Append (MyMPI_ISend (distpnums[dest], dest, MPI_TAG_MESH+1, comm));
|
||||
|
||||
|
||||
|
||||
@ -425,7 +425,7 @@ namespace netgen
|
||||
}
|
||||
|
||||
for (int dest = 1; dest < ntasks; dest ++ )
|
||||
sendrequests.Append (MyMPI_ISend (elementarrays[dest], dest, MPI_TAG_MESH+2));
|
||||
sendrequests.Append (MyMPI_ISend (elementarrays[dest], dest, MPI_TAG_MESH+2, comm));
|
||||
|
||||
|
||||
PrintMessage ( 3, "Sending Face Descriptors" );
|
||||
@ -442,7 +442,7 @@ namespace netgen
|
||||
|
||||
}
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
sendrequests.Append (MyMPI_ISend (fddata, dest, MPI_TAG_MESH+3));
|
||||
sendrequests.Append (MyMPI_ISend (fddata, dest, MPI_TAG_MESH+3, comm));
|
||||
|
||||
/** Surface Elements **/
|
||||
|
||||
@ -526,7 +526,7 @@ namespace netgen
|
||||
});
|
||||
// distribute sel data
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
sendrequests.Append (MyMPI_ISend(selbuf[dest], dest, MPI_TAG_MESH+4));
|
||||
sendrequests.Append (MyMPI_ISend(selbuf[dest], dest, MPI_TAG_MESH+4, comm));
|
||||
|
||||
|
||||
/** Segments **/
|
||||
@ -676,7 +676,7 @@ namespace netgen
|
||||
});
|
||||
// distrubute segment data
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
sendrequests.Append (MyMPI_ISend(segm_buf[dest], dest, MPI_TAG_MESH+5));
|
||||
sendrequests.Append (MyMPI_ISend(segm_buf[dest], dest, MPI_TAG_MESH+5, comm));
|
||||
|
||||
PrintMessage ( 3, "now wait ...");
|
||||
|
||||
@ -700,9 +700,9 @@ namespace netgen
|
||||
compiled_bcnames[tot_bcsize++] = (*bcnames[k])[j];
|
||||
|
||||
for(int k=1;k<ntasks;k++) {
|
||||
sendrequests[6*(k-1)] = MyMPI_ISend(FlatArray<int>(1, &nbcs), k, MPI_TAG_MESH+6);
|
||||
sendrequests[6*(k-1)+1] = MyMPI_ISend(bcname_sizes, k, MPI_TAG_MESH+6);
|
||||
(void) MPI_Isend(compiled_bcnames, tot_bcsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+2]);
|
||||
sendrequests[6*(k-1)] = MyMPI_ISend(FlatArray<int>(1, &nbcs), k, MPI_TAG_MESH+6, comm);
|
||||
sendrequests[6*(k-1)+1] = MyMPI_ISend(bcname_sizes, k, MPI_TAG_MESH+6, comm);
|
||||
(void) MPI_Isend(compiled_bcnames, tot_bcsize, MPI_CHAR, k, MPI_TAG_MESH+6, comm, &sendrequests[6*(k-1)+2]);
|
||||
}
|
||||
|
||||
/** Send mat-names **/
|
||||
@ -719,9 +719,9 @@ namespace netgen
|
||||
for(int j=0;j<mat_sizes[k];j++)
|
||||
compiled_mats[tot_matsize++] = (*materials[k])[j];
|
||||
for(int k=1;k<ntasks;k++) {
|
||||
sendrequests[6*(k-1)+3] = MyMPI_ISend(FlatArray<int>(1, &nmats), k, MPI_TAG_MESH+6);
|
||||
sendrequests[6*(k-1)+4] = MyMPI_ISend(mat_sizes, k, MPI_TAG_MESH+6);
|
||||
(void) MPI_Isend(compiled_mats, tot_matsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+5]);
|
||||
sendrequests[6*(k-1)+3] = MyMPI_ISend(FlatArray<int>(1, &nmats), k, MPI_TAG_MESH+6, comm);
|
||||
sendrequests[6*(k-1)+4] = MyMPI_ISend(mat_sizes, k, MPI_TAG_MESH+6, comm);
|
||||
(void) MPI_Isend(compiled_mats, tot_matsize, MPI_CHAR, k, MPI_TAG_MESH+6, comm, &sendrequests[6*(k-1)+5]);
|
||||
}
|
||||
|
||||
/* now wait ... **/
|
||||
@ -731,7 +731,7 @@ namespace netgen
|
||||
|
||||
PrintMessage( 3, "send mesh complete");
|
||||
|
||||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
MPI_Barrier(comm);
|
||||
}
|
||||
|
||||
|
||||
@ -750,14 +750,17 @@ namespace netgen
|
||||
int timer_sels = NgProfiler::CreateTimer ("Receive surface elements");
|
||||
NgProfiler::RegionTimer reg(timer);
|
||||
|
||||
int id = MyMPI_GetId(GetCommunicator());
|
||||
int np = MyMPI_GetNTasks(GetCommunicator());
|
||||
|
||||
int dim;
|
||||
MyMPI_Bcast(dim);
|
||||
MyMPI_Bcast(dim, comm);
|
||||
SetDimension(dim);
|
||||
|
||||
// Receive number of local elements
|
||||
int nelloc;
|
||||
MPI_Scatter (NULL, 0, MPI_INT,
|
||||
&nelloc, 1, MPI_INT, 0, MPI_COMM_WORLD);
|
||||
&nelloc, 1, MPI_INT, 0, comm);
|
||||
paralleltop -> SetNE (nelloc);
|
||||
|
||||
// string st;
|
||||
@ -766,8 +769,7 @@ namespace netgen
|
||||
NgProfiler::StartTimer (timer_pts);
|
||||
|
||||
Array<int> verts;
|
||||
MyMPI_Recv (verts, 0, MPI_TAG_MESH+1);
|
||||
|
||||
MyMPI_Recv (verts, 0, MPI_TAG_MESH+1, comm);
|
||||
|
||||
int numvert = verts.Size();
|
||||
paralleltop -> SetNV (numvert);
|
||||
@ -787,11 +789,10 @@ namespace netgen
|
||||
|
||||
MPI_Datatype mptype = MeshPoint::MyGetMPIType();
|
||||
MPI_Status status;
|
||||
MPI_Recv( &points[1], numvert, mptype, 0, MPI_TAG_MESH+1, MPI_COMM_WORLD, &status);
|
||||
MPI_Recv( &points[1], numvert, mptype, 0, MPI_TAG_MESH+1, comm, &status);
|
||||
|
||||
Array<int> pp_data;
|
||||
MyMPI_Recv(pp_data, 0, MPI_TAG_MESH+1);
|
||||
|
||||
MyMPI_Recv(pp_data, 0, MPI_TAG_MESH+1, comm);
|
||||
|
||||
int maxidentnr = pp_data[0];
|
||||
auto & idents = GetIdentifications();
|
||||
@ -815,7 +816,7 @@ namespace netgen
|
||||
}
|
||||
|
||||
Array<int> dist_pnums;
|
||||
MyMPI_Recv (dist_pnums, 0, MPI_TAG_MESH+1);
|
||||
MyMPI_Recv (dist_pnums, 0, MPI_TAG_MESH+1, comm);
|
||||
|
||||
for (int hi = 0; hi < dist_pnums.Size(); hi += 3)
|
||||
paralleltop ->
|
||||
@ -828,7 +829,7 @@ namespace netgen
|
||||
Element el;
|
||||
|
||||
Array<int> elarray;
|
||||
MyMPI_Recv (elarray, 0, MPI_TAG_MESH+2);
|
||||
MyMPI_Recv (elarray, 0, MPI_TAG_MESH+2, comm);
|
||||
|
||||
NgProfiler::RegionTimer reg(timer_els);
|
||||
|
||||
@ -848,7 +849,7 @@ namespace netgen
|
||||
|
||||
{
|
||||
Array<double> fddata;
|
||||
MyMPI_Recv (fddata, 0, MPI_TAG_MESH+3);
|
||||
MyMPI_Recv (fddata, 0, MPI_TAG_MESH+3, comm);
|
||||
for (int i = 0; i < fddata.Size(); i += 6)
|
||||
{
|
||||
int faceind = AddFaceDescriptor
|
||||
@ -863,7 +864,7 @@ namespace netgen
|
||||
NgProfiler::RegionTimer reg(timer_sels);
|
||||
Array<int> selbuf;
|
||||
|
||||
MyMPI_Recv ( selbuf, 0, MPI_TAG_MESH+4);
|
||||
MyMPI_Recv ( selbuf, 0, MPI_TAG_MESH+4, comm);
|
||||
|
||||
int ii = 0;
|
||||
int sel = 0;
|
||||
@ -894,7 +895,7 @@ namespace netgen
|
||||
|
||||
{
|
||||
Array<double> segmbuf;
|
||||
MyMPI_Recv ( segmbuf, 0, MPI_TAG_MESH+5);
|
||||
MyMPI_Recv ( segmbuf, 0, MPI_TAG_MESH+5, comm);
|
||||
|
||||
Segment seg;
|
||||
int globsegi;
|
||||
@ -939,14 +940,14 @@ namespace netgen
|
||||
|
||||
/** Recv bc-names **/
|
||||
int nbcs;
|
||||
MyMPI_Recv(nbcs, 0, MPI_TAG_MESH+6);
|
||||
MyMPI_Recv(nbcs, 0, MPI_TAG_MESH+6, comm);
|
||||
Array<int> bcs(nbcs);
|
||||
MyMPI_Recv(bcs, 0, MPI_TAG_MESH+6);
|
||||
MyMPI_Recv(bcs, 0, MPI_TAG_MESH+6, comm);
|
||||
int size_bc = 0;
|
||||
for(int k=0;k<nbcs;k++)
|
||||
size_bc += bcs[k];
|
||||
char compiled_bcnames[size_bc];
|
||||
MPI_Recv(compiled_bcnames, size_bc, MPI_CHAR, 0, MPI_TAG_MESH+6, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||
MPI_Recv(compiled_bcnames, size_bc, MPI_CHAR, 0, MPI_TAG_MESH+6, comm, MPI_STATUS_IGNORE);
|
||||
|
||||
SetNBCNames(nbcs);
|
||||
int cnt = 0;
|
||||
@ -957,14 +958,14 @@ namespace netgen
|
||||
|
||||
/** Recv mat-names **/
|
||||
int nmats;
|
||||
MyMPI_Recv(nmats, 0, MPI_TAG_MESH+6);
|
||||
MyMPI_Recv(nmats, 0, MPI_TAG_MESH+6, comm);
|
||||
Array<int> matsz(nmats);
|
||||
MyMPI_Recv(matsz, 0, MPI_TAG_MESH+6);
|
||||
MyMPI_Recv(matsz, 0, MPI_TAG_MESH+6, comm);
|
||||
int size_mats = 0;
|
||||
for(int k=0;k<nmats;k++)
|
||||
size_mats += matsz[k];
|
||||
char compiled_mats[size_mats];
|
||||
MPI_Recv(compiled_mats, size_mats, MPI_CHAR, 0, MPI_TAG_MESH+6, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||
MPI_Recv(compiled_mats, size_mats, MPI_CHAR, 0, MPI_TAG_MESH+6, comm, MPI_STATUS_IGNORE);
|
||||
cnt = 0;
|
||||
materials.SetSize(nmats);
|
||||
for(int k=0;k<nmats;k++) {
|
||||
@ -973,7 +974,7 @@ namespace netgen
|
||||
cnt += matsz[k];
|
||||
}
|
||||
|
||||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
MPI_Barrier(comm);
|
||||
|
||||
int timerloc = NgProfiler::CreateTimer ("Update local mesh");
|
||||
int timerloc2 = NgProfiler::CreateTimer ("CalcSurfacesOfNode");
|
||||
@ -1008,8 +1009,9 @@ namespace netgen
|
||||
// call it only for the master !
|
||||
void Mesh :: Distribute ()
|
||||
{
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &id);
|
||||
MPI_Comm comm = this->comm;
|
||||
MPI_Comm_size(comm, &ntasks);
|
||||
MPI_Comm_rank(comm, &id);
|
||||
|
||||
if (id != 0 || ntasks == 1 ) return;
|
||||
|
||||
@ -1120,12 +1122,9 @@ namespace netgen
|
||||
for (SurfaceElementIndex sei = 0; sei < GetNSE(); sei++)
|
||||
{
|
||||
const Element2d & el = (*this)[sei];
|
||||
cout << "surf-el " << sei << " verts: " << endl;
|
||||
for (int j = 0; j < el.GetNP(); j++) {
|
||||
cout << el[j] << " ";
|
||||
f(el[j], sei);
|
||||
}
|
||||
cout << endl;
|
||||
}
|
||||
};
|
||||
auto loop_els_3d = [&](auto f) {
|
||||
@ -1150,7 +1149,6 @@ namespace netgen
|
||||
if(boundarypoints[vertex])
|
||||
cnt[vertex]++;
|
||||
});
|
||||
cout << "count: " << endl << cnt << endl;
|
||||
TABLE<int, PointIndex::BASE> pnt2el(cnt);
|
||||
loop_els([&](auto vertex, int index)
|
||||
{
|
||||
@ -1277,8 +1275,9 @@ namespace netgen
|
||||
// call it only for the master !
|
||||
void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights)
|
||||
{
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &id);
|
||||
MPI_Comm comm = this->comm;
|
||||
MPI_Comm_size(comm, &ntasks);
|
||||
MPI_Comm_rank(comm, &id);
|
||||
|
||||
if (id != 0 || ntasks == 1 ) return;
|
||||
|
||||
|
@ -24,12 +24,19 @@ namespace netgen
|
||||
void ParallelMeshTopology :: Reset ()
|
||||
{
|
||||
*testout << "ParallelMeshTopology::Reset" << endl;
|
||||
|
||||
int id = MyMPI_GetId(mesh.GetCommunicator());
|
||||
int ntasks = MyMPI_GetNTasks(mesh.GetCommunicator());
|
||||
|
||||
if ( ntasks == 1 ) return;
|
||||
|
||||
cout << "Reset CG, this = " << this << " , mesh: " << &mesh << endl;
|
||||
|
||||
int ned = mesh.GetTopology().GetNEdges();
|
||||
int nfa = mesh.GetTopology().GetNFaces();
|
||||
|
||||
cout << "nnodes : " << mesh.GetNV() << " " << ned << " " << nfa << endl;
|
||||
|
||||
if (glob_edge.Size() != ned)
|
||||
{
|
||||
glob_edge.SetSize(ned);
|
||||
@ -206,6 +213,11 @@ namespace netgen
|
||||
// cout << "UpdateCoarseGrid" << endl;
|
||||
// if (is_updated) return;
|
||||
|
||||
int id = MyMPI_GetId(mesh.GetCommunicator());
|
||||
int ntasks = MyMPI_GetNTasks(mesh.GetCommunicator());
|
||||
|
||||
cout << "Update CG, this = " << this << " , mesh: " << &mesh << endl;
|
||||
|
||||
Reset();
|
||||
static int timer = NgProfiler::CreateTimer ("UpdateCoarseGrid");
|
||||
NgProfiler::RegionTimer reg(timer);
|
||||
|
Loading…
Reference in New Issue
Block a user