mirror of
https://github.com/NGSolve/netgen.git
synced 2025-01-13 14:40:35 +05:00
Merge branch 'subcomm_mr' into 'master'
Communicators for Meshes See merge request jschoeberl/netgen!123
This commit is contained in:
commit
8671e2423f
@ -17,97 +17,169 @@ namespace netgen
|
|||||||
using ngcore::id;
|
using ngcore::id;
|
||||||
using ngcore::ntasks;
|
using ngcore::ntasks;
|
||||||
|
|
||||||
|
#ifndef PARALLEL
|
||||||
|
/** without MPI, we need a dummy typedef **/
|
||||||
|
typedef int MPI_Comm;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/** This is the "standard" communicator that will be used for netgen-objects. **/
|
||||||
|
extern MPI_Comm ng_comm;
|
||||||
|
|
||||||
|
#ifdef PARALLEL
|
||||||
|
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm)
|
||||||
|
{
|
||||||
|
int ntasks;
|
||||||
|
MPI_Comm_size(comm, &ntasks);
|
||||||
|
return ntasks;
|
||||||
|
}
|
||||||
|
inline int MyMPI_GetId (MPI_Comm comm = ng_comm)
|
||||||
|
{
|
||||||
|
int id;
|
||||||
|
MPI_Comm_rank(comm, &id);
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
|
||||||
|
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) { return 1; }
|
||||||
|
inline int MyMPI_GetId (MPI_Comm comm = ng_comm) { return 0; }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef PARALLEL
|
||||||
|
// For python wrapping of communicators
|
||||||
|
struct PyMPI_Comm {
|
||||||
|
MPI_Comm comm;
|
||||||
|
bool owns_comm;
|
||||||
|
PyMPI_Comm (MPI_Comm _comm, bool _owns_comm = false) : comm(_comm), owns_comm(_owns_comm) { }
|
||||||
|
PyMPI_Comm (const PyMPI_Comm & c) = delete;
|
||||||
|
~PyMPI_Comm () {
|
||||||
|
if (owns_comm)
|
||||||
|
MPI_Comm_free(&comm);
|
||||||
|
}
|
||||||
|
inline int Rank() const { return MyMPI_GetId(comm); }
|
||||||
|
inline int Size() const { return MyMPI_GetNTasks(comm); }
|
||||||
|
};
|
||||||
|
#else
|
||||||
|
// dummy without MPI
|
||||||
|
struct PyMPI_Comm {
|
||||||
|
MPI_Comm comm = 0;
|
||||||
|
PyMPI_Comm (MPI_Comm _comm, bool _owns_comm = false) { }
|
||||||
|
~PyMPI_Comm () { }
|
||||||
|
inline int Rank() const { return 0; }
|
||||||
|
inline int Size() const { return 1; }
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef PARALLEL
|
||||||
|
template <class T>
|
||||||
|
inline MPI_Datatype MyGetMPIType ( )
|
||||||
|
{ cerr << "ERROR in GetMPIType() -- no type found" << endl;return 0; }
|
||||||
|
template <>
|
||||||
|
inline MPI_Datatype MyGetMPIType<int> ( )
|
||||||
|
{ return MPI_INT; }
|
||||||
|
template <>
|
||||||
|
inline MPI_Datatype MyGetMPIType<double> ( )
|
||||||
|
{ return MPI_DOUBLE; }
|
||||||
|
template <>
|
||||||
|
inline MPI_Datatype MyGetMPIType<char> ( )
|
||||||
|
{ return MPI_CHAR; }
|
||||||
|
template<>
|
||||||
|
inline MPI_Datatype MyGetMPIType<size_t> ( )
|
||||||
|
{ return MPI_UINT64_T; }
|
||||||
|
#else
|
||||||
|
typedef int MPI_Datatype;
|
||||||
|
template <class T> inline MPI_Datatype MyGetMPIType ( ) { return 0; }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef PARALLEL
|
||||||
|
inline MPI_Comm MyMPI_SubCommunicator(MPI_Comm comm, Array<int> & procs)
|
||||||
|
{
|
||||||
|
MPI_Comm subcomm;
|
||||||
|
MPI_Group gcomm, gsubcomm;
|
||||||
|
MPI_Comm_group(comm, &gcomm);
|
||||||
|
MPI_Group_incl(gcomm, procs.Size(), &(procs[0]), &gsubcomm);
|
||||||
|
MPI_Comm_create_group(comm, gsubcomm, 6969, &subcomm);
|
||||||
|
return subcomm;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
inline MPI_Comm MyMPI_SubCommunicator(MPI_Comm comm, Array<int> & procs)
|
||||||
|
{ return comm; }
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
|
|
||||||
enum { MPI_TAG_CMD = 110 };
|
enum { MPI_TAG_CMD = 110 };
|
||||||
enum { MPI_TAG_MESH = 210 };
|
enum { MPI_TAG_MESH = 210 };
|
||||||
enum { MPI_TAG_VIS = 310 };
|
enum { MPI_TAG_VIS = 310 };
|
||||||
|
|
||||||
extern MPI_Comm mesh_comm;
|
inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm = ng_comm)
|
||||||
|
|
||||||
template <class T>
|
|
||||||
MPI_Datatype MyGetMPIType ( )
|
|
||||||
{ cerr << "ERROR in GetMPIType() -- no type found" << endl;return 0; }
|
|
||||||
|
|
||||||
template <>
|
|
||||||
inline MPI_Datatype MyGetMPIType<int> ( )
|
|
||||||
{ return MPI_INT; }
|
|
||||||
|
|
||||||
template <>
|
|
||||||
inline MPI_Datatype MyGetMPIType<double> ( )
|
|
||||||
{ return MPI_DOUBLE; }
|
|
||||||
|
|
||||||
|
|
||||||
inline void MyMPI_Send (int i, int dest, int tag)
|
|
||||||
{
|
{
|
||||||
int hi = i;
|
int hi = i;
|
||||||
MPI_Send( &hi, 1, MPI_INT, dest, tag, MPI_COMM_WORLD);
|
MPI_Send( &hi, 1, MPI_INT, dest, tag, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MyMPI_Recv (int & i, int src, int tag)
|
inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
MPI_Recv( &i, 1, MPI_INT, src, tag, MPI_COMM_WORLD, &status);
|
MPI_Recv( &i, 1, MPI_INT, src, tag, comm, &status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
inline void MyMPI_Send (const string & s, int dest, int tag)
|
inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Send( const_cast<char*> (s.c_str()), s.length(), MPI_CHAR, dest, tag, MPI_COMM_WORLD);
|
MPI_Send( const_cast<char*> (s.c_str()), s.length(), MPI_CHAR, dest, tag, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MyMPI_Recv (string & s, int src, int tag)
|
inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
int len;
|
int len;
|
||||||
MPI_Probe (src, tag, MPI_COMM_WORLD, &status);
|
MPI_Probe (src, tag, MPI_COMM_WORLD, &status);
|
||||||
MPI_Get_count (&status, MPI_CHAR, &len);
|
MPI_Get_count (&status, MPI_CHAR, &len);
|
||||||
s.assign (len, ' ');
|
s.assign (len, ' ');
|
||||||
MPI_Recv( &s[0], len, MPI_CHAR, src, tag, MPI_COMM_WORLD, &status);
|
MPI_Recv( &s[0], len, MPI_CHAR, src, tag, comm, &status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline void MyMPI_Send (FlatArray<T, BASE> s, int dest, int tag)
|
inline void MyMPI_Send (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Send( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, MPI_COMM_WORLD);
|
MPI_Send( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline void MyMPI_Recv ( FlatArray<T, BASE> s, int src, int tag)
|
inline void MyMPI_Recv ( FlatArray<T, BASE> s, int src, int tag, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
MPI_Recv( &s.First(), s.Size(), MyGetMPIType<T>(), src, tag, MPI_COMM_WORLD, &status);
|
MPI_Recv( &s.First(), s.Size(), MyGetMPIType<T>(), src, tag, comm, &status);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline void MyMPI_Recv ( Array <T, BASE> & s, int src, int tag)
|
inline void MyMPI_Recv ( Array <T, BASE> & s, int src, int tag, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
int len;
|
int len;
|
||||||
MPI_Probe (src, tag, MPI_COMM_WORLD, &status);
|
MPI_Probe (src, tag, comm, &status);
|
||||||
MPI_Get_count (&status, MyGetMPIType<T>(), &len);
|
MPI_Get_count (&status, MyGetMPIType<T>(), &len);
|
||||||
|
|
||||||
s.SetSize (len);
|
s.SetSize (len);
|
||||||
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, MPI_COMM_WORLD, &status);
|
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, comm, &status);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline int MyMPI_Recv ( Array <T, BASE> & s, int tag)
|
inline int MyMPI_Recv ( Array <T, BASE> & s, int tag, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
int len;
|
int len;
|
||||||
MPI_Probe (MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status);
|
MPI_Probe (MPI_ANY_SOURCE, tag, comm, &status);
|
||||||
|
|
||||||
int src = status.MPI_SOURCE;
|
int src = status.MPI_SOURCE;
|
||||||
|
|
||||||
MPI_Get_count (&status, MyGetMPIType<T>(), &len);
|
MPI_Get_count (&status, MyGetMPIType<T>(), &len);
|
||||||
|
|
||||||
s.SetSize (len);
|
s.SetSize (len);
|
||||||
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, MPI_COMM_WORLD, &status);
|
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, comm, &status);
|
||||||
|
|
||||||
return src;
|
return src;
|
||||||
}
|
}
|
||||||
@ -129,7 +201,7 @@ namespace netgen
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline MPI_Request MyMPI_ISend (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = MPI_COMM_WORLD)
|
inline MPI_Request MyMPI_ISend (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Request request;
|
MPI_Request request;
|
||||||
MPI_Isend( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
MPI_Isend( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
||||||
@ -138,7 +210,7 @@ namespace netgen
|
|||||||
|
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline MPI_Request MyMPI_IRecv (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = MPI_COMM_WORLD)
|
inline MPI_Request MyMPI_IRecv (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Request request;
|
MPI_Request request;
|
||||||
MPI_Irecv( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
MPI_Irecv( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
||||||
@ -203,11 +275,10 @@ namespace netgen
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
inline void MyMPI_ExchangeTable (TABLE<T> & send_data,
|
inline void MyMPI_ExchangeTable (TABLE<T> & send_data,
|
||||||
TABLE<T> & recv_data, int tag,
|
TABLE<T> & recv_data, int tag,
|
||||||
MPI_Comm comm = MPI_COMM_WORLD)
|
MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
int ntasks, rank;
|
int rank = MyMPI_GetId(comm);
|
||||||
MPI_Comm_size(comm, &ntasks);
|
int ntasks = MyMPI_GetNTasks(comm);
|
||||||
MPI_Comm_rank(comm, &rank);
|
|
||||||
|
|
||||||
Array<int> send_sizes(ntasks);
|
Array<int> send_sizes(ntasks);
|
||||||
Array<int> recv_sizes(ntasks);
|
Array<int> recv_sizes(ntasks);
|
||||||
@ -251,22 +322,22 @@ namespace netgen
|
|||||||
|
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
inline void MyMPI_Bcast (T & s, MPI_Comm comm = MPI_COMM_WORLD)
|
inline void MyMPI_Bcast (T & s, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Bcast (&s, 1, MyGetMPIType<T>(), 0, comm);
|
MPI_Bcast (&s, 1, MyGetMPIType<T>(), 0, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
inline void MyMPI_Bcast (Array<T, 0> & s, MPI_Comm comm = MPI_COMM_WORLD)
|
inline void MyMPI_Bcast (Array<T, 0> & s, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
int size = s.Size();
|
int size = s.Size();
|
||||||
MyMPI_Bcast (size, comm);
|
MyMPI_Bcast (size, comm);
|
||||||
if (id != 0) s.SetSize (size);
|
if (MyMPI_GetId(comm) != 0) s.SetSize (size);
|
||||||
MPI_Bcast (&s[0], size, MyGetMPIType<T>(), 0, comm);
|
MPI_Bcast (&s[0], size, MyGetMPIType<T>(), 0, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
inline void MyMPI_Bcast (Array<T, 0> & s, int root, MPI_Comm comm = MPI_COMM_WORLD)
|
inline void MyMPI_Bcast (Array<T, 0> & s, int root, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
int id;
|
int id;
|
||||||
MPI_Comm_rank(comm, &id);
|
MPI_Comm_rank(comm, &id);
|
||||||
@ -279,19 +350,19 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class T2>
|
template <class T, class T2>
|
||||||
inline void MyMPI_Allgather (const T & send, FlatArray<T2> recv, MPI_Comm comm)
|
inline void MyMPI_Allgather (const T & send, FlatArray<T2> recv, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Allgather( const_cast<T*> (&send), 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
MPI_Allgather( const_cast<T*> (&send), 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class T2>
|
template <class T, class T2>
|
||||||
inline void MyMPI_Alltoall (FlatArray<T> send, FlatArray<T2> recv, MPI_Comm comm)
|
inline void MyMPI_Alltoall (FlatArray<T> send, FlatArray<T2> recv, MPI_Comm comm = ng_comm)
|
||||||
{
|
{
|
||||||
MPI_Alltoall( &send[0], 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
MPI_Alltoall( &send[0], 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
// template <class T, class T2>
|
// template <class T, class T2>
|
||||||
// inline void MyMPI_Alltoall_Block (FlatArray<T> send, FlatArray<T2> recv, int blocklen, MPI_Comm comm)
|
// inline void MyMPI_Alltoall_Block (FlatArray<T> send, FlatArray<T2> recv, int blocklen, MPI_Comm comm = ng_comm)
|
||||||
// {
|
// {
|
||||||
// MPI_Alltoall( &send[0], blocklen, MyGetMPIType<T>(), &recv[0], blocklen, MyGetMPIType<T2>(), comm);
|
// MPI_Alltoall( &send[0], blocklen, MyGetMPIType<T>(), &recv[0], blocklen, MyGetMPIType<T2>(), comm);
|
||||||
// }
|
// }
|
||||||
|
@ -37,6 +37,10 @@
|
|||||||
// max number of nodes per surface element
|
// max number of nodes per surface element
|
||||||
#define NG_SURFACE_ELEMENT_MAXPOINTS 8
|
#define NG_SURFACE_ELEMENT_MAXPOINTS 8
|
||||||
|
|
||||||
|
#ifndef PARALLEL
|
||||||
|
typedef int MPI_Comm;
|
||||||
|
#endif
|
||||||
|
namespace netgen { extern MPI_Comm ng_comm; }
|
||||||
|
|
||||||
|
|
||||||
// implemented element types:
|
// implemented element types:
|
||||||
@ -60,9 +64,9 @@ extern "C" {
|
|||||||
|
|
||||||
// load geometry from file
|
// load geometry from file
|
||||||
DLL_HEADER void Ng_LoadGeometry (const char * filename);
|
DLL_HEADER void Ng_LoadGeometry (const char * filename);
|
||||||
|
|
||||||
// load netgen mesh
|
// load netgen mesh
|
||||||
DLL_HEADER void Ng_LoadMesh (const char * filename);
|
DLL_HEADER void Ng_LoadMesh (const char * filename, MPI_Comm comm = netgen::ng_comm);
|
||||||
|
|
||||||
// load netgen mesh
|
// load netgen mesh
|
||||||
DLL_HEADER void Ng_LoadMeshFromString (const char * mesh_as_string);
|
DLL_HEADER void Ng_LoadMeshFromString (const char * mesh_as_string);
|
||||||
|
@ -253,17 +253,24 @@ namespace netgen
|
|||||||
public:
|
public:
|
||||||
// Ngx_Mesh () { ; }
|
// Ngx_Mesh () { ; }
|
||||||
// Ngx_Mesh(class Mesh * amesh) : mesh(amesh) { ; }
|
// Ngx_Mesh(class Mesh * amesh) : mesh(amesh) { ; }
|
||||||
Ngx_Mesh(shared_ptr<Mesh> amesh = NULL);
|
|
||||||
void LoadMesh (const string & filename);
|
|
||||||
|
|
||||||
void LoadMesh (istream & str);
|
/** reuse a netgen-mesh **/
|
||||||
|
Ngx_Mesh (shared_ptr<Mesh> amesh);
|
||||||
|
/** load a new mesh **/
|
||||||
|
Ngx_Mesh (string filename, MPI_Comm acomm = netgen::ng_comm);
|
||||||
|
|
||||||
|
void LoadMesh (const string & filename, MPI_Comm comm = netgen::ng_comm);
|
||||||
|
|
||||||
|
void LoadMesh (istream & str, MPI_Comm comm = netgen::ng_comm);
|
||||||
void SaveMesh (ostream & str) const;
|
void SaveMesh (ostream & str) const;
|
||||||
void UpdateTopology ();
|
void UpdateTopology ();
|
||||||
void DoArchive (Archive & archive);
|
void DoArchive (Archive & archive);
|
||||||
|
|
||||||
|
MPI_Comm GetCommunicator() const;
|
||||||
|
|
||||||
virtual ~Ngx_Mesh();
|
virtual ~Ngx_Mesh();
|
||||||
|
|
||||||
bool Valid () { return mesh != NULL; }
|
bool Valid () const { return mesh != NULL; }
|
||||||
|
|
||||||
int GetDimension() const;
|
int GetDimension() const;
|
||||||
int GetNLevels() const;
|
int GetNLevels() const;
|
||||||
|
@ -117,14 +117,10 @@ void Ng_LoadMeshFromStream ( istream & input )
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Ng_LoadMesh (const char * filename, MPI_Comm comm)
|
||||||
|
|
||||||
void Ng_LoadMesh (const char * filename)
|
|
||||||
{
|
{
|
||||||
#ifdef PARALLEL
|
int id = MyMPI_GetId(comm);
|
||||||
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
|
int ntasks = MyMPI_GetNTasks(comm);
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &id);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
{
|
{
|
||||||
ifstream infile(filename);
|
ifstream infile(filename);
|
||||||
@ -134,11 +130,10 @@ void Ng_LoadMesh (const char * filename)
|
|||||||
|
|
||||||
if ( string(filename).find(".vol") == string::npos )
|
if ( string(filename).find(".vol") == string::npos )
|
||||||
{
|
{
|
||||||
#ifdef PARALLEL
|
|
||||||
if(ntasks>1)
|
if(ntasks>1)
|
||||||
throw NgException("Not sure what to do with this?? Does this work with MPI??");
|
throw NgException("Not sure what to do with this?? Does this work with MPI??");
|
||||||
#endif
|
|
||||||
mesh.reset (new Mesh());
|
mesh.reset (new Mesh());
|
||||||
|
mesh->SetCommunicator(comm);
|
||||||
ReadFile(*mesh,filename);
|
ReadFile(*mesh,filename);
|
||||||
//mesh->SetGlobalH (mparam.maxh);
|
//mesh->SetGlobalH (mparam.maxh);
|
||||||
//mesh->CalcLocalH();
|
//mesh->CalcLocalH();
|
||||||
@ -146,12 +141,10 @@ void Ng_LoadMesh (const char * filename)
|
|||||||
}
|
}
|
||||||
|
|
||||||
istream * infile;
|
istream * infile;
|
||||||
char* buf; // for distributing geometry!
|
Array<char> buf; // for distributing geometry!
|
||||||
int strs;
|
int strs;
|
||||||
|
|
||||||
#ifdef PARALLEL
|
|
||||||
if( id == 0) {
|
if( id == 0) {
|
||||||
#endif
|
|
||||||
|
|
||||||
string fn(filename);
|
string fn(filename);
|
||||||
if (fn.substr (fn.length()-3, 3) == ".gz")
|
if (fn.substr (fn.length()-3, 3) == ".gz")
|
||||||
@ -159,6 +152,7 @@ void Ng_LoadMesh (const char * filename)
|
|||||||
else
|
else
|
||||||
infile = new ifstream (filename);
|
infile = new ifstream (filename);
|
||||||
mesh.reset (new Mesh());
|
mesh.reset (new Mesh());
|
||||||
|
mesh->SetCommunicator(comm);
|
||||||
mesh -> Load(*infile);
|
mesh -> Load(*infile);
|
||||||
SetGlobalMesh (mesh);
|
SetGlobalMesh (mesh);
|
||||||
|
|
||||||
@ -168,12 +162,12 @@ void Ng_LoadMesh (const char * filename)
|
|||||||
geom_part << infile->rdbuf();
|
geom_part << infile->rdbuf();
|
||||||
string geom_part_string = geom_part.str();
|
string geom_part_string = geom_part.str();
|
||||||
strs = geom_part_string.size();
|
strs = geom_part_string.size();
|
||||||
buf = new char[strs];
|
// buf = new char[strs];
|
||||||
memcpy(buf, geom_part_string.c_str(), strs*sizeof(char));
|
buf.SetSize(strs);
|
||||||
|
memcpy(&buf[0], geom_part_string.c_str(), strs*sizeof(char));
|
||||||
}
|
}
|
||||||
delete infile;
|
delete infile;
|
||||||
|
|
||||||
#ifdef PARALLEL
|
|
||||||
if (ntasks > 1)
|
if (ntasks > 1)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -239,21 +233,21 @@ void Ng_LoadMesh (const char * filename)
|
|||||||
} // id==0 end
|
} // id==0 end
|
||||||
else {
|
else {
|
||||||
mesh.reset (new Mesh());
|
mesh.reset (new Mesh());
|
||||||
|
mesh->SetCommunicator(comm);
|
||||||
SetGlobalMesh (mesh);
|
SetGlobalMesh (mesh);
|
||||||
mesh->SendRecvMesh();
|
mesh->SendRecvMesh();
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!ng_geometry && ntasks>1) {
|
if(!ng_geometry && ntasks>1) {
|
||||||
/** Scatter the geometry-string **/
|
#ifdef PARALLEL
|
||||||
MPI_Bcast(&strs, 1, MPI_INT, 0, MPI_COMM_WORLD);
|
/** Scatter the geometry-string (no dummy-implementation in mpi_interface) **/
|
||||||
if(id!=0) buf = new char[strs];
|
MyMPI_Bcast(buf, comm);
|
||||||
MPI_Bcast(buf, strs, MPI_CHAR, 0, MPI_COMM_WORLD);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
if(!ng_geometry) {
|
if(!ng_geometry) {
|
||||||
infile = new istringstream(string((const char*)buf, (size_t)strs));
|
infile = new istringstream(string((const char*)&buf[0], (size_t)strs));
|
||||||
delete[] buf;
|
// delete[] buf;
|
||||||
for (int i = 0; i < geometryregister.Size(); i++)
|
for (int i = 0; i < geometryregister.Size(); i++)
|
||||||
{
|
{
|
||||||
NetgenGeometry * hgeom = geometryregister[i]->LoadFromMeshFile (*infile);
|
NetgenGeometry * hgeom = geometryregister[i]->LoadFromMeshFile (*infile);
|
||||||
|
@ -31,39 +31,39 @@ namespace netgen
|
|||||||
return hmesh;
|
return hmesh;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ngx_Mesh :: Ngx_Mesh (shared_ptr<Mesh> amesh)
|
||||||
Ngx_Mesh :: Ngx_Mesh (shared_ptr<Mesh> amesh)
|
{ mesh = amesh ? amesh : netgen::mesh; }
|
||||||
{
|
Ngx_Mesh :: Ngx_Mesh (string filename, MPI_Comm acomm)
|
||||||
if (amesh)
|
{ LoadMesh(filename, acomm); }
|
||||||
mesh = amesh;
|
|
||||||
else
|
|
||||||
mesh = netgen::mesh;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ngx_Mesh * LoadMesh (const string & filename)
|
Ngx_Mesh * LoadMesh (const string & filename, MPI_Comm comm = netgen::ng_comm)
|
||||||
{
|
{
|
||||||
netgen::mesh.reset();
|
netgen::mesh.reset();
|
||||||
Ng_LoadMesh (filename.c_str());
|
Ng_LoadMesh (filename.c_str(), comm);
|
||||||
return new Ngx_Mesh (netgen::mesh);
|
return new Ngx_Mesh (netgen::mesh);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Ngx_Mesh :: LoadMesh (const string & filename)
|
void Ngx_Mesh :: LoadMesh (const string & filename, MPI_Comm comm)
|
||||||
{
|
{
|
||||||
netgen::mesh.reset();
|
netgen::mesh.reset();
|
||||||
Ng_LoadMesh (filename.c_str());
|
Ng_LoadMesh (filename.c_str(), comm);
|
||||||
// mesh = move(netgen::mesh);
|
// mesh = move(netgen::mesh);
|
||||||
mesh = netgen::mesh;
|
mesh = netgen::mesh;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Ngx_Mesh :: LoadMesh (istream & ist)
|
void Ngx_Mesh :: LoadMesh (istream & ist, MPI_Comm comm)
|
||||||
{
|
{
|
||||||
netgen::mesh = make_shared<Mesh>();
|
netgen::mesh = make_shared<Mesh>();
|
||||||
|
netgen::mesh->SetCommunicator(comm);
|
||||||
netgen::mesh -> Load (ist);
|
netgen::mesh -> Load (ist);
|
||||||
// mesh = move(netgen::mesh);
|
// mesh = move(netgen::mesh);
|
||||||
mesh = netgen::mesh;
|
mesh = netgen::mesh;
|
||||||
SetGlobalMesh (mesh);
|
SetGlobalMesh (mesh);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MPI_Comm Ngx_Mesh :: GetCommunicator() const
|
||||||
|
{ return Valid() ? mesh->GetCommunicator() : MPI_COMM_NULL; }
|
||||||
|
|
||||||
void Ngx_Mesh :: SaveMesh (ostream & ost) const
|
void Ngx_Mesh :: SaveMesh (ostream & ost) const
|
||||||
{
|
{
|
||||||
mesh -> Save (ost);
|
mesh -> Save (ost);
|
||||||
@ -71,7 +71,12 @@ namespace netgen
|
|||||||
|
|
||||||
void Ngx_Mesh :: DoArchive (Archive & archive)
|
void Ngx_Mesh :: DoArchive (Archive & archive)
|
||||||
{
|
{
|
||||||
if (archive.Input()) mesh = make_shared<Mesh>();
|
#ifdef PARALLEL
|
||||||
|
if (archive.Input()) {
|
||||||
|
mesh = make_shared<Mesh>();
|
||||||
|
mesh->SetCommunicator(GetCommunicator());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
mesh->DoArchive(archive);
|
mesh->DoArchive(archive);
|
||||||
if (archive.Input())
|
if (archive.Input())
|
||||||
{
|
{
|
||||||
|
@ -553,14 +553,18 @@ namespace netgen
|
|||||||
order = 1;
|
order = 1;
|
||||||
|
|
||||||
|
|
||||||
|
MPI_Comm curve_comm;
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
enum { MPI_TAG_CURVE = MPI_TAG_MESH+20 };
|
enum { MPI_TAG_CURVE = MPI_TAG_MESH+20 };
|
||||||
|
|
||||||
const ParallelMeshTopology & partop = mesh.GetParallelTopology ();
|
const ParallelMeshTopology & partop = mesh.GetParallelTopology ();
|
||||||
MPI_Comm curve_comm;
|
MPI_Comm_dup (mesh.GetCommunicator(), &curve_comm);
|
||||||
MPI_Comm_dup (MPI_COMM_WORLD, &curve_comm);
|
|
||||||
Array<int> procs;
|
Array<int> procs;
|
||||||
|
#else
|
||||||
|
curve_comm = ng_comm; // dummy!
|
||||||
#endif
|
#endif
|
||||||
|
int rank = MyMPI_GetId(curve_comm);
|
||||||
|
int ntasks = MyMPI_GetNTasks(curve_comm);
|
||||||
|
|
||||||
if (working)
|
if (working)
|
||||||
order = aorder;
|
order = aorder;
|
||||||
|
@ -31,6 +31,9 @@ namespace netgen
|
|||||||
DLL_HEADER shared_ptr<NetgenGeometry> ng_geometry;
|
DLL_HEADER shared_ptr<NetgenGeometry> ng_geometry;
|
||||||
// TraceGlobal glob2("global2");
|
// TraceGlobal glob2("global2");
|
||||||
|
|
||||||
|
// global communicator for netgen
|
||||||
|
MPI_Comm ng_comm = MPI_COMM_WORLD;
|
||||||
|
|
||||||
weak_ptr<Mesh> global_mesh;
|
weak_ptr<Mesh> global_mesh;
|
||||||
void SetGlobalMesh (shared_ptr<Mesh> m)
|
void SetGlobalMesh (shared_ptr<Mesh> m)
|
||||||
{
|
{
|
||||||
|
@ -59,6 +59,10 @@ namespace netgen
|
|||||||
|
|
||||||
DLL_HEADER extern weak_ptr<Mesh> global_mesh;
|
DLL_HEADER extern weak_ptr<Mesh> global_mesh;
|
||||||
DLL_HEADER void SetGlobalMesh (shared_ptr<Mesh> m);
|
DLL_HEADER void SetGlobalMesh (shared_ptr<Mesh> m);
|
||||||
|
|
||||||
|
// global communicator for netgen (dummy if no MPI)
|
||||||
|
extern MPI_Comm ng_comm;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -43,8 +43,8 @@ namespace netgen
|
|||||||
bcnames.SetSize(0);
|
bcnames.SetSize(0);
|
||||||
cd2names.SetSize(0);
|
cd2names.SetSize(0);
|
||||||
|
|
||||||
|
this->comm = netgen :: ng_comm;
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
this->comm = MPI_COMM_WORLD;
|
|
||||||
paralleltop = new ParallelMeshTopology (*this);
|
paralleltop = new ParallelMeshTopology (*this);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@ -83,6 +83,10 @@ namespace netgen
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Mesh :: SetCommunicator(MPI_Comm acomm)
|
||||||
|
{
|
||||||
|
this->comm = acomm;
|
||||||
|
}
|
||||||
|
|
||||||
Mesh & Mesh :: operator= (const Mesh & mesh2)
|
Mesh & Mesh :: operator= (const Mesh & mesh2)
|
||||||
{
|
{
|
||||||
@ -1321,6 +1325,9 @@ namespace netgen
|
|||||||
|
|
||||||
if (archive.Input())
|
if (archive.Input())
|
||||||
{
|
{
|
||||||
|
int rank = MyMPI_GetId(GetCommunicator());
|
||||||
|
int ntasks = MyMPI_GetNTasks(GetCommunicator());
|
||||||
|
|
||||||
RebuildSurfaceElementLists();
|
RebuildSurfaceElementLists();
|
||||||
|
|
||||||
CalcSurfacesOfNode ();
|
CalcSurfacesOfNode ();
|
||||||
|
@ -32,10 +32,8 @@ namespace netgen
|
|||||||
/// point coordinates
|
/// point coordinates
|
||||||
T_POINTS points;
|
T_POINTS points;
|
||||||
|
|
||||||
#ifdef PARALLEL
|
// The communicator for this mesh. Just a dummy if compiled without MPI.
|
||||||
// The communicator for this mesh. (more or less dummy for now!)
|
|
||||||
MPI_Comm comm;
|
MPI_Comm comm;
|
||||||
#endif
|
|
||||||
|
|
||||||
/// line-segments at edges
|
/// line-segments at edges
|
||||||
Array<Segment, 0, size_t> segments;
|
Array<Segment, 0, size_t> segments;
|
||||||
@ -606,6 +604,9 @@ namespace netgen
|
|||||||
int AddEdgeDescriptor(const EdgeDescriptor & fd)
|
int AddEdgeDescriptor(const EdgeDescriptor & fd)
|
||||||
{ edgedecoding.Append(fd); return edgedecoding.Size() - 1; }
|
{ edgedecoding.Append(fd); return edgedecoding.Size() - 1; }
|
||||||
|
|
||||||
|
MPI_Comm GetCommunicator() const { return this->comm; }
|
||||||
|
void SetCommunicator(MPI_Comm acomm);
|
||||||
|
|
||||||
///
|
///
|
||||||
DLL_HEADER void SetMaterial (int domnr, const string & mat);
|
DLL_HEADER void SetMaterial (int domnr, const string & mat);
|
||||||
///
|
///
|
||||||
@ -858,7 +859,11 @@ namespace netgen
|
|||||||
void SendMesh ( ) const; // Mesh * mastermesh, Array<int> & neloc) const;
|
void SendMesh ( ) const; // Mesh * mastermesh, Array<int> & neloc) const;
|
||||||
/// loads a mesh sent from master processor
|
/// loads a mesh sent from master processor
|
||||||
void ReceiveParallelMesh ();
|
void ReceiveParallelMesh ();
|
||||||
|
#else
|
||||||
|
void Distribute () {}
|
||||||
|
void SendRecvMesh () {}
|
||||||
|
void Distribute (Array<int> & volume_weights, Array<int> & surface_weights,
|
||||||
|
Array<int> & segment_weights){ }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
@ -35,9 +35,8 @@ namespace netgen
|
|||||||
|
|
||||||
void Mesh :: SendRecvMesh ()
|
void Mesh :: SendRecvMesh ()
|
||||||
{
|
{
|
||||||
int id, np;
|
int id = MyMPI_GetId(GetCommunicator());
|
||||||
MPI_Comm_rank(this->comm, &id);
|
int np = MyMPI_GetNTasks(GetCommunicator());
|
||||||
MPI_Comm_size(this->comm, &np);
|
|
||||||
|
|
||||||
if (np == 1) {
|
if (np == 1) {
|
||||||
throw NgException("SendRecvMesh called, but only one rank in communicator!!");
|
throw NgException("SendRecvMesh called, but only one rank in communicator!!");
|
||||||
@ -73,16 +72,18 @@ namespace netgen
|
|||||||
{
|
{
|
||||||
Array<MPI_Request> sendrequests;
|
Array<MPI_Request> sendrequests;
|
||||||
|
|
||||||
|
MPI_Comm comm = GetCommunicator();
|
||||||
|
int id = MyMPI_GetId(comm);
|
||||||
|
int ntasks = MyMPI_GetNTasks(comm);
|
||||||
|
|
||||||
int dim = GetDimension();
|
int dim = GetDimension();
|
||||||
MyMPI_Bcast(dim);
|
MyMPI_Bcast(dim, comm);
|
||||||
|
|
||||||
|
|
||||||
const_cast<MeshTopology&>(GetTopology()).Update();
|
const_cast<MeshTopology&>(GetTopology()).Update();
|
||||||
|
|
||||||
PrintMessage ( 3, "Sending nr of elements");
|
PrintMessage ( 3, "Sending nr of elements");
|
||||||
|
|
||||||
MPI_Comm comm = this->comm;
|
|
||||||
|
|
||||||
Array<int> num_els_on_proc(ntasks);
|
Array<int> num_els_on_proc(ntasks);
|
||||||
num_els_on_proc = 0;
|
num_els_on_proc = 0;
|
||||||
for (ElementIndex ei = 0; ei < GetNE(); ei++)
|
for (ElementIndex ei = 0; ei < GetNE(); ei++)
|
||||||
@ -285,7 +286,7 @@ namespace netgen
|
|||||||
for (int dest = 1; dest < ntasks; dest++)
|
for (int dest = 1; dest < ntasks; dest++)
|
||||||
{
|
{
|
||||||
FlatArray<PointIndex> verts = verts_of_proc[dest];
|
FlatArray<PointIndex> verts = verts_of_proc[dest];
|
||||||
sendrequests.Append (MyMPI_ISend (verts, dest, MPI_TAG_MESH+1));
|
sendrequests.Append (MyMPI_ISend (verts, dest, MPI_TAG_MESH+1, comm));
|
||||||
|
|
||||||
MPI_Datatype mptype = MeshPoint::MyGetMPIType();
|
MPI_Datatype mptype = MeshPoint::MyGetMPIType();
|
||||||
|
|
||||||
@ -301,7 +302,7 @@ namespace netgen
|
|||||||
MPI_Type_commit (&newtype);
|
MPI_Type_commit (&newtype);
|
||||||
|
|
||||||
MPI_Request request;
|
MPI_Request request;
|
||||||
MPI_Isend( &points[0], 1, newtype, dest, MPI_TAG_MESH+1, MPI_COMM_WORLD, &request);
|
MPI_Isend( &points[0], 1, newtype, dest, MPI_TAG_MESH+1, comm, &request);
|
||||||
sendrequests.Append (request);
|
sendrequests.Append (request);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -367,7 +368,7 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
Array<MPI_Request> req_per;
|
Array<MPI_Request> req_per;
|
||||||
for(int dest = 1; dest < ntasks; dest++)
|
for(int dest = 1; dest < ntasks; dest++)
|
||||||
req_per.Append(MyMPI_ISend(pp_data[dest], dest, MPI_TAG_MESH+1));
|
req_per.Append(MyMPI_ISend(pp_data[dest], dest, MPI_TAG_MESH+1, comm));
|
||||||
MPI_Waitall(req_per.Size(), &req_per[0], MPI_STATUS_IGNORE);
|
MPI_Waitall(req_per.Size(), &req_per[0], MPI_STATUS_IGNORE);
|
||||||
|
|
||||||
PrintMessage ( 3, "Sending Vertices - distprocs");
|
PrintMessage ( 3, "Sending Vertices - distprocs");
|
||||||
@ -395,7 +396,7 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
|
|
||||||
for ( int dest = 1; dest < ntasks; dest ++ )
|
for ( int dest = 1; dest < ntasks; dest ++ )
|
||||||
sendrequests.Append (MyMPI_ISend (distpnums[dest], dest, MPI_TAG_MESH+1));
|
sendrequests.Append (MyMPI_ISend (distpnums[dest], dest, MPI_TAG_MESH+1, comm));
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -425,7 +426,7 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (int dest = 1; dest < ntasks; dest ++ )
|
for (int dest = 1; dest < ntasks; dest ++ )
|
||||||
sendrequests.Append (MyMPI_ISend (elementarrays[dest], dest, MPI_TAG_MESH+2));
|
sendrequests.Append (MyMPI_ISend (elementarrays[dest], dest, MPI_TAG_MESH+2, comm));
|
||||||
|
|
||||||
|
|
||||||
PrintMessage ( 3, "Sending Face Descriptors" );
|
PrintMessage ( 3, "Sending Face Descriptors" );
|
||||||
@ -442,7 +443,7 @@ namespace netgen
|
|||||||
|
|
||||||
}
|
}
|
||||||
for (int dest = 1; dest < ntasks; dest++)
|
for (int dest = 1; dest < ntasks; dest++)
|
||||||
sendrequests.Append (MyMPI_ISend (fddata, dest, MPI_TAG_MESH+3));
|
sendrequests.Append (MyMPI_ISend (fddata, dest, MPI_TAG_MESH+3, comm));
|
||||||
|
|
||||||
/** Surface Elements **/
|
/** Surface Elements **/
|
||||||
|
|
||||||
@ -526,7 +527,7 @@ namespace netgen
|
|||||||
});
|
});
|
||||||
// distribute sel data
|
// distribute sel data
|
||||||
for (int dest = 1; dest < ntasks; dest++)
|
for (int dest = 1; dest < ntasks; dest++)
|
||||||
sendrequests.Append (MyMPI_ISend(selbuf[dest], dest, MPI_TAG_MESH+4));
|
sendrequests.Append (MyMPI_ISend(selbuf[dest], dest, MPI_TAG_MESH+4, comm));
|
||||||
|
|
||||||
|
|
||||||
/** Segments **/
|
/** Segments **/
|
||||||
@ -676,7 +677,7 @@ namespace netgen
|
|||||||
});
|
});
|
||||||
// distrubute segment data
|
// distrubute segment data
|
||||||
for (int dest = 1; dest < ntasks; dest++)
|
for (int dest = 1; dest < ntasks; dest++)
|
||||||
sendrequests.Append (MyMPI_ISend(segm_buf[dest], dest, MPI_TAG_MESH+5));
|
sendrequests.Append (MyMPI_ISend(segm_buf[dest], dest, MPI_TAG_MESH+5, comm));
|
||||||
|
|
||||||
PrintMessage ( 3, "now wait ...");
|
PrintMessage ( 3, "now wait ...");
|
||||||
|
|
||||||
@ -700,9 +701,9 @@ namespace netgen
|
|||||||
compiled_bcnames[tot_bcsize++] = (*bcnames[k])[j];
|
compiled_bcnames[tot_bcsize++] = (*bcnames[k])[j];
|
||||||
|
|
||||||
for(int k=1;k<ntasks;k++) {
|
for(int k=1;k<ntasks;k++) {
|
||||||
sendrequests[6*(k-1)] = MyMPI_ISend(FlatArray<int>(1, &nbcs), k, MPI_TAG_MESH+6);
|
sendrequests[6*(k-1)] = MyMPI_ISend(FlatArray<int>(1, &nbcs), k, MPI_TAG_MESH+6, comm);
|
||||||
sendrequests[6*(k-1)+1] = MyMPI_ISend(bcname_sizes, k, MPI_TAG_MESH+6);
|
sendrequests[6*(k-1)+1] = MyMPI_ISend(bcname_sizes, k, MPI_TAG_MESH+6, comm);
|
||||||
(void) MPI_Isend(compiled_bcnames, tot_bcsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+2]);
|
(void) MPI_Isend(compiled_bcnames, tot_bcsize, MPI_CHAR, k, MPI_TAG_MESH+6, comm, &sendrequests[6*(k-1)+2]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Send mat-names **/
|
/** Send mat-names **/
|
||||||
@ -719,9 +720,9 @@ namespace netgen
|
|||||||
for(int j=0;j<mat_sizes[k];j++)
|
for(int j=0;j<mat_sizes[k];j++)
|
||||||
compiled_mats[tot_matsize++] = (*materials[k])[j];
|
compiled_mats[tot_matsize++] = (*materials[k])[j];
|
||||||
for(int k=1;k<ntasks;k++) {
|
for(int k=1;k<ntasks;k++) {
|
||||||
sendrequests[6*(k-1)+3] = MyMPI_ISend(FlatArray<int>(1, &nmats), k, MPI_TAG_MESH+6);
|
sendrequests[6*(k-1)+3] = MyMPI_ISend(FlatArray<int>(1, &nmats), k, MPI_TAG_MESH+6, comm);
|
||||||
sendrequests[6*(k-1)+4] = MyMPI_ISend(mat_sizes, k, MPI_TAG_MESH+6);
|
sendrequests[6*(k-1)+4] = MyMPI_ISend(mat_sizes, k, MPI_TAG_MESH+6, comm);
|
||||||
(void) MPI_Isend(compiled_mats, tot_matsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+5]);
|
(void) MPI_Isend(compiled_mats, tot_matsize, MPI_CHAR, k, MPI_TAG_MESH+6, comm, &sendrequests[6*(k-1)+5]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* now wait ... **/
|
/* now wait ... **/
|
||||||
@ -731,7 +732,7 @@ namespace netgen
|
|||||||
|
|
||||||
PrintMessage( 3, "send mesh complete");
|
PrintMessage( 3, "send mesh complete");
|
||||||
|
|
||||||
MPI_Barrier(MPI_COMM_WORLD);
|
MPI_Barrier(comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -750,14 +751,17 @@ namespace netgen
|
|||||||
int timer_sels = NgProfiler::CreateTimer ("Receive surface elements");
|
int timer_sels = NgProfiler::CreateTimer ("Receive surface elements");
|
||||||
NgProfiler::RegionTimer reg(timer);
|
NgProfiler::RegionTimer reg(timer);
|
||||||
|
|
||||||
|
int id = MyMPI_GetId(GetCommunicator());
|
||||||
|
int ntasks = MyMPI_GetNTasks(GetCommunicator());
|
||||||
|
|
||||||
int dim;
|
int dim;
|
||||||
MyMPI_Bcast(dim);
|
MyMPI_Bcast(dim, comm);
|
||||||
SetDimension(dim);
|
SetDimension(dim);
|
||||||
|
|
||||||
// Receive number of local elements
|
// Receive number of local elements
|
||||||
int nelloc;
|
int nelloc;
|
||||||
MPI_Scatter (NULL, 0, MPI_INT,
|
MPI_Scatter (NULL, 0, MPI_INT,
|
||||||
&nelloc, 1, MPI_INT, 0, MPI_COMM_WORLD);
|
&nelloc, 1, MPI_INT, 0, comm);
|
||||||
paralleltop -> SetNE (nelloc);
|
paralleltop -> SetNE (nelloc);
|
||||||
|
|
||||||
// string st;
|
// string st;
|
||||||
@ -766,8 +770,7 @@ namespace netgen
|
|||||||
NgProfiler::StartTimer (timer_pts);
|
NgProfiler::StartTimer (timer_pts);
|
||||||
|
|
||||||
Array<int> verts;
|
Array<int> verts;
|
||||||
MyMPI_Recv (verts, 0, MPI_TAG_MESH+1);
|
MyMPI_Recv (verts, 0, MPI_TAG_MESH+1, comm);
|
||||||
|
|
||||||
|
|
||||||
int numvert = verts.Size();
|
int numvert = verts.Size();
|
||||||
paralleltop -> SetNV (numvert);
|
paralleltop -> SetNV (numvert);
|
||||||
@ -787,11 +790,10 @@ namespace netgen
|
|||||||
|
|
||||||
MPI_Datatype mptype = MeshPoint::MyGetMPIType();
|
MPI_Datatype mptype = MeshPoint::MyGetMPIType();
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
MPI_Recv( &points[1], numvert, mptype, 0, MPI_TAG_MESH+1, MPI_COMM_WORLD, &status);
|
MPI_Recv( &points[1], numvert, mptype, 0, MPI_TAG_MESH+1, comm, &status);
|
||||||
|
|
||||||
Array<int> pp_data;
|
Array<int> pp_data;
|
||||||
MyMPI_Recv(pp_data, 0, MPI_TAG_MESH+1);
|
MyMPI_Recv(pp_data, 0, MPI_TAG_MESH+1, comm);
|
||||||
|
|
||||||
|
|
||||||
int maxidentnr = pp_data[0];
|
int maxidentnr = pp_data[0];
|
||||||
auto & idents = GetIdentifications();
|
auto & idents = GetIdentifications();
|
||||||
@ -815,7 +817,7 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
|
|
||||||
Array<int> dist_pnums;
|
Array<int> dist_pnums;
|
||||||
MyMPI_Recv (dist_pnums, 0, MPI_TAG_MESH+1);
|
MyMPI_Recv (dist_pnums, 0, MPI_TAG_MESH+1, comm);
|
||||||
|
|
||||||
for (int hi = 0; hi < dist_pnums.Size(); hi += 3)
|
for (int hi = 0; hi < dist_pnums.Size(); hi += 3)
|
||||||
paralleltop ->
|
paralleltop ->
|
||||||
@ -828,7 +830,7 @@ namespace netgen
|
|||||||
Element el;
|
Element el;
|
||||||
|
|
||||||
Array<int> elarray;
|
Array<int> elarray;
|
||||||
MyMPI_Recv (elarray, 0, MPI_TAG_MESH+2);
|
MyMPI_Recv (elarray, 0, MPI_TAG_MESH+2, comm);
|
||||||
|
|
||||||
NgProfiler::RegionTimer reg(timer_els);
|
NgProfiler::RegionTimer reg(timer_els);
|
||||||
|
|
||||||
@ -848,7 +850,7 @@ namespace netgen
|
|||||||
|
|
||||||
{
|
{
|
||||||
Array<double> fddata;
|
Array<double> fddata;
|
||||||
MyMPI_Recv (fddata, 0, MPI_TAG_MESH+3);
|
MyMPI_Recv (fddata, 0, MPI_TAG_MESH+3, comm);
|
||||||
for (int i = 0; i < fddata.Size(); i += 6)
|
for (int i = 0; i < fddata.Size(); i += 6)
|
||||||
{
|
{
|
||||||
int faceind = AddFaceDescriptor
|
int faceind = AddFaceDescriptor
|
||||||
@ -863,7 +865,7 @@ namespace netgen
|
|||||||
NgProfiler::RegionTimer reg(timer_sels);
|
NgProfiler::RegionTimer reg(timer_sels);
|
||||||
Array<int> selbuf;
|
Array<int> selbuf;
|
||||||
|
|
||||||
MyMPI_Recv ( selbuf, 0, MPI_TAG_MESH+4);
|
MyMPI_Recv ( selbuf, 0, MPI_TAG_MESH+4, comm);
|
||||||
|
|
||||||
int ii = 0;
|
int ii = 0;
|
||||||
int sel = 0;
|
int sel = 0;
|
||||||
@ -894,7 +896,7 @@ namespace netgen
|
|||||||
|
|
||||||
{
|
{
|
||||||
Array<double> segmbuf;
|
Array<double> segmbuf;
|
||||||
MyMPI_Recv ( segmbuf, 0, MPI_TAG_MESH+5);
|
MyMPI_Recv ( segmbuf, 0, MPI_TAG_MESH+5, comm);
|
||||||
|
|
||||||
Segment seg;
|
Segment seg;
|
||||||
int globsegi;
|
int globsegi;
|
||||||
@ -939,14 +941,14 @@ namespace netgen
|
|||||||
|
|
||||||
/** Recv bc-names **/
|
/** Recv bc-names **/
|
||||||
int nbcs;
|
int nbcs;
|
||||||
MyMPI_Recv(nbcs, 0, MPI_TAG_MESH+6);
|
MyMPI_Recv(nbcs, 0, MPI_TAG_MESH+6, comm);
|
||||||
Array<int> bcs(nbcs);
|
Array<int> bcs(nbcs);
|
||||||
MyMPI_Recv(bcs, 0, MPI_TAG_MESH+6);
|
MyMPI_Recv(bcs, 0, MPI_TAG_MESH+6, comm);
|
||||||
int size_bc = 0;
|
int size_bc = 0;
|
||||||
for(int k=0;k<nbcs;k++)
|
for(int k=0;k<nbcs;k++)
|
||||||
size_bc += bcs[k];
|
size_bc += bcs[k];
|
||||||
char compiled_bcnames[size_bc];
|
char compiled_bcnames[size_bc];
|
||||||
MPI_Recv(compiled_bcnames, size_bc, MPI_CHAR, 0, MPI_TAG_MESH+6, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
MPI_Recv(compiled_bcnames, size_bc, MPI_CHAR, 0, MPI_TAG_MESH+6, comm, MPI_STATUS_IGNORE);
|
||||||
|
|
||||||
SetNBCNames(nbcs);
|
SetNBCNames(nbcs);
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
@ -957,14 +959,14 @@ namespace netgen
|
|||||||
|
|
||||||
/** Recv mat-names **/
|
/** Recv mat-names **/
|
||||||
int nmats;
|
int nmats;
|
||||||
MyMPI_Recv(nmats, 0, MPI_TAG_MESH+6);
|
MyMPI_Recv(nmats, 0, MPI_TAG_MESH+6, comm);
|
||||||
Array<int> matsz(nmats);
|
Array<int> matsz(nmats);
|
||||||
MyMPI_Recv(matsz, 0, MPI_TAG_MESH+6);
|
MyMPI_Recv(matsz, 0, MPI_TAG_MESH+6, comm);
|
||||||
int size_mats = 0;
|
int size_mats = 0;
|
||||||
for(int k=0;k<nmats;k++)
|
for(int k=0;k<nmats;k++)
|
||||||
size_mats += matsz[k];
|
size_mats += matsz[k];
|
||||||
char compiled_mats[size_mats];
|
char compiled_mats[size_mats];
|
||||||
MPI_Recv(compiled_mats, size_mats, MPI_CHAR, 0, MPI_TAG_MESH+6, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
MPI_Recv(compiled_mats, size_mats, MPI_CHAR, 0, MPI_TAG_MESH+6, comm, MPI_STATUS_IGNORE);
|
||||||
cnt = 0;
|
cnt = 0;
|
||||||
materials.SetSize(nmats);
|
materials.SetSize(nmats);
|
||||||
for(int k=0;k<nmats;k++) {
|
for(int k=0;k<nmats;k++) {
|
||||||
@ -973,7 +975,7 @@ namespace netgen
|
|||||||
cnt += matsz[k];
|
cnt += matsz[k];
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_Barrier(MPI_COMM_WORLD);
|
MPI_Barrier(comm);
|
||||||
|
|
||||||
int timerloc = NgProfiler::CreateTimer ("Update local mesh");
|
int timerloc = NgProfiler::CreateTimer ("Update local mesh");
|
||||||
int timerloc2 = NgProfiler::CreateTimer ("CalcSurfacesOfNode");
|
int timerloc2 = NgProfiler::CreateTimer ("CalcSurfacesOfNode");
|
||||||
@ -1008,8 +1010,9 @@ namespace netgen
|
|||||||
// call it only for the master !
|
// call it only for the master !
|
||||||
void Mesh :: Distribute ()
|
void Mesh :: Distribute ()
|
||||||
{
|
{
|
||||||
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
|
MPI_Comm comm = GetCommunicator();
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &id);
|
int id = MyMPI_GetId(comm);
|
||||||
|
int ntasks = MyMPI_GetNTasks(comm);
|
||||||
|
|
||||||
if (id != 0 || ntasks == 1 ) return;
|
if (id != 0 || ntasks == 1 ) return;
|
||||||
|
|
||||||
@ -1068,7 +1071,7 @@ namespace netgen
|
|||||||
eptr.Append (eind.Size());
|
eptr.Append (eind.Size());
|
||||||
Array<idx_t> epart(ne), npart(nn);
|
Array<idx_t> epart(ne), npart(nn);
|
||||||
|
|
||||||
idxtype nparts = ntasks-1;
|
idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1;
|
||||||
idxtype edgecut;
|
idxtype edgecut;
|
||||||
|
|
||||||
idxtype ncommon = 3;
|
idxtype ncommon = 3;
|
||||||
@ -1120,12 +1123,9 @@ namespace netgen
|
|||||||
for (SurfaceElementIndex sei = 0; sei < GetNSE(); sei++)
|
for (SurfaceElementIndex sei = 0; sei < GetNSE(); sei++)
|
||||||
{
|
{
|
||||||
const Element2d & el = (*this)[sei];
|
const Element2d & el = (*this)[sei];
|
||||||
cout << "surf-el " << sei << " verts: " << endl;
|
|
||||||
for (int j = 0; j < el.GetNP(); j++) {
|
for (int j = 0; j < el.GetNP(); j++) {
|
||||||
cout << el[j] << " ";
|
|
||||||
f(el[j], sei);
|
f(el[j], sei);
|
||||||
}
|
}
|
||||||
cout << endl;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
auto loop_els_3d = [&](auto f) {
|
auto loop_els_3d = [&](auto f) {
|
||||||
@ -1150,7 +1150,6 @@ namespace netgen
|
|||||||
if(boundarypoints[vertex])
|
if(boundarypoints[vertex])
|
||||||
cnt[vertex]++;
|
cnt[vertex]++;
|
||||||
});
|
});
|
||||||
cout << "count: " << endl << cnt << endl;
|
|
||||||
TABLE<int, PointIndex::BASE> pnt2el(cnt);
|
TABLE<int, PointIndex::BASE> pnt2el(cnt);
|
||||||
loop_els([&](auto vertex, int index)
|
loop_els([&](auto vertex, int index)
|
||||||
{
|
{
|
||||||
@ -1277,8 +1276,9 @@ namespace netgen
|
|||||||
// call it only for the master !
|
// call it only for the master !
|
||||||
void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights)
|
void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights)
|
||||||
{
|
{
|
||||||
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
|
MPI_Comm comm = GetCommunicator();
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &id);
|
int id = MyMPI_GetId(comm);
|
||||||
|
int ntasks = MyMPI_GetNTasks(comm);
|
||||||
|
|
||||||
if (id != 0 || ntasks == 1 ) return;
|
if (id != 0 || ntasks == 1 ) return;
|
||||||
|
|
||||||
@ -1368,7 +1368,7 @@ namespace netgen
|
|||||||
eptr.Append (eind.Size());
|
eptr.Append (eind.Size());
|
||||||
Array<idx_t> epart(ne), npart(nn);
|
Array<idx_t> epart(ne), npart(nn);
|
||||||
|
|
||||||
idxtype nparts = ntasks-1;
|
idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1;
|
||||||
idxtype edgecut;
|
idxtype edgecut;
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,6 +24,9 @@ namespace netgen
|
|||||||
void ParallelMeshTopology :: Reset ()
|
void ParallelMeshTopology :: Reset ()
|
||||||
{
|
{
|
||||||
*testout << "ParallelMeshTopology::Reset" << endl;
|
*testout << "ParallelMeshTopology::Reset" << endl;
|
||||||
|
|
||||||
|
int id = MyMPI_GetId(mesh.GetCommunicator());
|
||||||
|
int ntasks = MyMPI_GetNTasks(mesh.GetCommunicator());
|
||||||
|
|
||||||
if ( ntasks == 1 ) return;
|
if ( ntasks == 1 ) return;
|
||||||
|
|
||||||
@ -206,6 +209,10 @@ namespace netgen
|
|||||||
// cout << "UpdateCoarseGrid" << endl;
|
// cout << "UpdateCoarseGrid" << endl;
|
||||||
// if (is_updated) return;
|
// if (is_updated) return;
|
||||||
|
|
||||||
|
MPI_Comm comm = mesh.GetCommunicator();
|
||||||
|
int id = MyMPI_GetId(comm);
|
||||||
|
int ntasks = MyMPI_GetNTasks(comm);
|
||||||
|
|
||||||
Reset();
|
Reset();
|
||||||
static int timer = NgProfiler::CreateTimer ("UpdateCoarseGrid");
|
static int timer = NgProfiler::CreateTimer ("UpdateCoarseGrid");
|
||||||
NgProfiler::RegionTimer reg(timer);
|
NgProfiler::RegionTimer reg(timer);
|
||||||
@ -222,14 +229,14 @@ namespace netgen
|
|||||||
|
|
||||||
// MPI_Barrier (MPI_COMM_WORLD);
|
// MPI_Barrier (MPI_COMM_WORLD);
|
||||||
|
|
||||||
MPI_Group MPI_GROUP_WORLD;
|
MPI_Group MPI_GROUP_comm;
|
||||||
MPI_Group MPI_LocalGroup;
|
MPI_Group MPI_LocalGroup;
|
||||||
MPI_Comm MPI_LocalComm;
|
MPI_Comm MPI_LocalComm;
|
||||||
|
|
||||||
int process_ranks[] = { 0 };
|
int process_ranks[] = { 0 };
|
||||||
MPI_Comm_group (MPI_COMM_WORLD, &MPI_GROUP_WORLD);
|
MPI_Comm_group (comm, &MPI_GROUP_comm);
|
||||||
MPI_Group_excl (MPI_GROUP_WORLD, 1, process_ranks, &MPI_LocalGroup);
|
MPI_Group_excl (MPI_GROUP_comm, 1, process_ranks, &MPI_LocalGroup);
|
||||||
MPI_Comm_create (MPI_COMM_WORLD, MPI_LocalGroup, &MPI_LocalComm);
|
MPI_Comm_create (comm, MPI_LocalGroup, &MPI_LocalComm);
|
||||||
|
|
||||||
if (id == 0) return;
|
if (id == 0) return;
|
||||||
|
|
||||||
|
@ -17,6 +17,22 @@ namespace netgen
|
|||||||
{
|
{
|
||||||
extern bool netgen_executable_started;
|
extern bool netgen_executable_started;
|
||||||
extern shared_ptr<NetgenGeometry> ng_geometry;
|
extern shared_ptr<NetgenGeometry> ng_geometry;
|
||||||
|
#ifdef PARALLEL
|
||||||
|
/** we need allreduce in python-wrapped communicators **/
|
||||||
|
template <typename T>
|
||||||
|
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op = MPI_SUM, MPI_Comm comm = ng_comm)
|
||||||
|
{
|
||||||
|
T global_d;
|
||||||
|
MPI_Allreduce ( &d, &global_d, 1, MyGetMPIType<T>(), op, comm);
|
||||||
|
return global_d;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
enum { MPI_SUM = 0, MPI_MIN = 1, MPI_MAX = 2 };
|
||||||
|
typedef int MPI_Op;
|
||||||
|
template <typename T>
|
||||||
|
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op = MPI_SUM, MPI_Comm comm = ng_comm)
|
||||||
|
{ return d; }
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -503,18 +519,21 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
|
|||||||
py::class_<Mesh,shared_ptr<Mesh>>(m, "Mesh")
|
py::class_<Mesh,shared_ptr<Mesh>>(m, "Mesh")
|
||||||
// .def(py::init<>("create empty mesh"))
|
// .def(py::init<>("create empty mesh"))
|
||||||
|
|
||||||
.def(py::init( [] (int dim)
|
.def(py::init( [] (int dim, shared_ptr<PyMPI_Comm> pycomm)
|
||||||
{
|
{
|
||||||
auto mesh = make_shared<Mesh>();
|
auto mesh = make_shared<Mesh>();
|
||||||
|
mesh->SetCommunicator(pycomm!=nullptr ? pycomm->comm : netgen::ng_comm);
|
||||||
mesh -> SetDimension(dim);
|
mesh -> SetDimension(dim);
|
||||||
SetGlobalMesh(mesh); // for visualization
|
SetGlobalMesh(mesh); // for visualization
|
||||||
mesh -> SetGeometry (nullptr);
|
mesh -> SetGeometry (nullptr);
|
||||||
return mesh;
|
return mesh;
|
||||||
} ),
|
} ),
|
||||||
py::arg("dim")=3
|
py::arg("dim")=3, py::arg("comm")=nullptr
|
||||||
)
|
)
|
||||||
.def(NGSPickle<Mesh>())
|
.def(NGSPickle<Mesh>())
|
||||||
|
.def_property_readonly("comm", [](const Mesh & amesh)
|
||||||
|
{ return make_shared<PyMPI_Comm>(amesh.GetCommunicator()); },
|
||||||
|
"MPI-communicator the Mesh lives in")
|
||||||
/*
|
/*
|
||||||
.def("__init__",
|
.def("__init__",
|
||||||
[](Mesh *instance, int dim)
|
[](Mesh *instance, int dim)
|
||||||
@ -527,15 +546,25 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
.def_property_readonly("_timestamp", &Mesh::GetTimeStamp)
|
.def_property_readonly("_timestamp", &Mesh::GetTimeStamp)
|
||||||
|
.def("Distribute", [](Mesh & self, shared_ptr<PyMPI_Comm> pycomm) {
|
||||||
|
MPI_Comm comm = pycomm!=nullptr ? pycomm->comm : self.GetCommunicator();
|
||||||
|
self.SetCommunicator(comm);
|
||||||
|
if(MyMPI_GetNTasks(comm)==1) return;
|
||||||
|
if(MyMPI_GetNTasks(comm)==2) throw NgException("Sorry, cannot handle communicators with NP=2!");
|
||||||
|
cout << " rank " << MyMPI_GetId(comm) << " of " << MyMPI_GetNTasks(comm) << " called Distribute " << endl;
|
||||||
|
if(MyMPI_GetId(comm)==0) self.Distribute();
|
||||||
|
else self.SendRecvMesh();
|
||||||
|
}, py::arg("comm")=nullptr)
|
||||||
.def("Load", FunctionPointer
|
.def("Load", FunctionPointer
|
||||||
([](Mesh & self, const string & filename)
|
([](Mesh & self, const string & filename)
|
||||||
{
|
{
|
||||||
istream * infile;
|
istream * infile;
|
||||||
|
|
||||||
|
MPI_Comm comm = self.GetCommunicator();
|
||||||
|
id = MyMPI_GetId(comm);
|
||||||
|
ntasks = MyMPI_GetNTasks(comm);
|
||||||
|
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &id);
|
|
||||||
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
|
|
||||||
|
|
||||||
char* buf = nullptr;
|
char* buf = nullptr;
|
||||||
int strs = 0;
|
int strs = 0;
|
||||||
if(id==0) {
|
if(id==0) {
|
||||||
@ -563,10 +592,10 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** Scatter the geometry-string **/
|
/** Scatter the geometry-string **/
|
||||||
MPI_Bcast(&strs, 1, MPI_INT, 0, MPI_COMM_WORLD);
|
MPI_Bcast(&strs, 1, MPI_INT, 0, comm);
|
||||||
if(id!=0)
|
if(id!=0)
|
||||||
buf = new char[strs];
|
buf = new char[strs];
|
||||||
MPI_Bcast(buf, strs, MPI_CHAR, 0, MPI_COMM_WORLD);
|
MPI_Bcast(buf, strs, MPI_CHAR, 0, comm);
|
||||||
if(id==0)
|
if(id==0)
|
||||||
delete infile;
|
delete infile;
|
||||||
infile = new istringstream(string((const char*)buf, (size_t)strs));
|
infile = new istringstream(string((const char*)buf, (size_t)strs));
|
||||||
@ -921,6 +950,51 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
|
|||||||
printmessage_importance = importance;
|
printmessage_importance = importance;
|
||||||
return old;
|
return old;
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
py::class_<PyMPI_Comm, shared_ptr<PyMPI_Comm>> (m, "MPI_Comm")
|
||||||
|
.def_property_readonly ("rank", &PyMPI_Comm::Rank)
|
||||||
|
.def_property_readonly ("size", &PyMPI_Comm::Size)
|
||||||
|
// .def_property_readonly ("rank", [](PyMPI_Comm & c) { cout << "rank for " << c.comm << endl; return c.Rank(); })
|
||||||
|
// .def_property_readonly ("size", [](PyMPI_Comm & c) { cout << "size for " << c.comm << endl; return c.Size(); })
|
||||||
|
#ifdef PARALLEL
|
||||||
|
.def("Barrier", [](PyMPI_Comm & c) { MPI_Barrier(c.comm); })
|
||||||
|
.def("WTime", [](PyMPI_Comm & c) { return MPI_Wtime(); })
|
||||||
|
#else
|
||||||
|
.def("Barrier", [](PyMPI_Comm & c) { })
|
||||||
|
.def("WTime", [](PyMPI_Comm & c) { return -1.0; })
|
||||||
|
#endif
|
||||||
|
.def("Sum", [](PyMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_SUM, c.comm); })
|
||||||
|
.def("Min", [](PyMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_MIN, c.comm); })
|
||||||
|
.def("Max", [](PyMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_MAX, c.comm); })
|
||||||
|
.def("Sum", [](PyMPI_Comm & c, int x) { return MyMPI_AllReduceNG(x, MPI_SUM, c.comm); })
|
||||||
|
.def("Min", [](PyMPI_Comm & c, int x) { return MyMPI_AllReduceNG(x, MPI_MIN, c.comm); })
|
||||||
|
.def("Max", [](PyMPI_Comm & c, int x) { return MyMPI_AllReduceNG(x, MPI_MAX, c.comm); })
|
||||||
|
.def("Sum", [](PyMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_SUM, c.comm); })
|
||||||
|
.def("Min", [](PyMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_MIN, c.comm); })
|
||||||
|
.def("Max", [](PyMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_MAX, c.comm); })
|
||||||
|
.def("SubComm", [](PyMPI_Comm & c, py::list proc_list) -> shared_ptr<PyMPI_Comm> {
|
||||||
|
Array<int> procs;
|
||||||
|
if (py::extract<py::list> (proc_list).check()) {
|
||||||
|
py::list pylist = py::extract<py::list> (proc_list)();
|
||||||
|
procs.SetSize(py::len(pylist));
|
||||||
|
for (int i = 0; i < py::len(pylist); i++)
|
||||||
|
procs[i] = py::extract<int>(pylist[i])();
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
throw Exception("SubComm needs a list!");
|
||||||
|
}
|
||||||
|
if(!procs.Size()) {
|
||||||
|
cout << "warning, tried to construct empty communicator, returning MPI_COMM_NULL" << endl;
|
||||||
|
return make_shared<PyMPI_Comm>(MPI_COMM_NULL);
|
||||||
|
}
|
||||||
|
else if(procs.Size()==2) {
|
||||||
|
throw Exception("Sorry, NGSolve cannot handle NP=2.");
|
||||||
|
}
|
||||||
|
MPI_Comm subcomm = MyMPI_SubCommunicator(c.comm, procs);
|
||||||
|
return make_shared<PyMPI_Comm>(subcomm, true);
|
||||||
|
}, py::arg("procs"));
|
||||||
|
;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
PYBIND11_MODULE(libmesh, m) {
|
PYBIND11_MODULE(libmesh, m) {
|
||||||
|
Loading…
Reference in New Issue
Block a user