get rid of ng_comm

This commit is contained in:
Joachim Schöberl 2019-02-12 22:11:55 +01:00
parent bac9f92dd4
commit 734e96c87c
17 changed files with 165 additions and 107 deletions

View File

@ -101,21 +101,53 @@ namespace ngcore
operator MPI_Comm() const { return comm; }
int Rank() const { return rank; } // int r; MPI_Comm_rank(comm, &r); return r; }
int Size() const { return size; } // int s; MPI_Comm_size(comm, &s); return s; }
int Rank() const { return rank; }
int Size() const { return size; }
void Barrier() const {
if (size > 1) MPI_Barrier (comm);
}
template<typename T, typename T2 = decltype(GetMPIType<T>())>
void Send( T & val, int dest, int tag) {
void Send (T & val, int dest, int tag) const {
MPI_Send (&val, 1, GetMPIType<T>(), dest, tag, comm);
}
template<typename T, typename T2 = decltype(GetMPIType<T>())>
void MyMPI_Recv (T & val, int src, int tag) {
void MyMPI_Recv (T & val, int src, int tag) const {
MPI_Recv (&val, 1, GetMPIType<T>(), src, tag, comm, MPI_STATUS_IGNORE);
}
/** --- collectives --- **/
template <typename T, typename T2 = decltype(GetMPIType<T>())>
T AllReduce (T d, const MPI_Op & op) const
{
if (size == 1) return d;
T global_d;
MPI_Allreduce ( &d, &global_d, 1, GetMPIType<T>(), op, comm);
return global_d;
}
template <typename T, typename T2 = decltype(GetMPIType<T>())>
void Bcast (T & s, int root = 0) const {
if (size == 1) return ;
MPI_Bcast (&s, 1, GetMPIType<T>(), root, comm);
}
void Bcast (std::string & s, int root = 0) const
{
if (size == 1) return;
int len = s.length();
Bcast (len, root);
if (rank != 0) s.resize (len);
MPI_Bcast (&s[0], len, MPI_CHAR, root, comm);
}
};
@ -138,14 +170,20 @@ namespace ngcore
size_t Rank() const { return 0; }
size_t Size() const { return 1; }
void Barrier() const { ; }
operator MPI_Comm() const { return MPI_Comm(); }
template<typename T>
void Send( T & val, int dest, int tag) { ; }
void Send( T & val, int dest, int tag) const { ; }
template<typename T>
void MyMPI_Recv (T & val, int src, int tag) { ; }
void MyMPI_Recv (T & val, int src, int tag) const { ; }
template <typename T, typename T2 = decltype(GetMPIType<T>())>
T AllReduce (T d, const MPI_Op & op) const { return d; }
template <typename T, typename T2 = decltype(GetMPIType<T>())>
INLINE void Bcast (T & s, int root = 0) const { ; }
};
#endif

View File

@ -14,8 +14,8 @@
namespace netgen
{
using ngcore::id;
using ngcore::ntasks;
// using ngcore::id;
// using ngcore::ntasks;
#ifndef PARALLEL
/** without MPI, we need a dummy typedef **/
@ -23,16 +23,17 @@ namespace netgen
#endif
/** This is the "standard" communicator that will be used for netgen-objects. **/
extern DLL_HEADER NgMPI_Comm ng_comm;
// extern DLL_HEADER NgMPI_Comm ng_comm;
#ifdef OLD
#ifdef PARALLEL
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm)
inline int MyMPI_GetNTasks (MPI_Comm comm /* = ng_comm */)
{
int ntasks;
MPI_Comm_size(comm, &ntasks);
return ntasks;
}
inline int MyMPI_GetId (MPI_Comm comm = ng_comm)
inline int MyMPI_GetId (MPI_Comm comm /* = ng_comm */)
{
int id;
MPI_Comm_rank(comm, &id);
@ -40,8 +41,9 @@ namespace netgen
}
#else
// enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) { return 1; }
inline int MyMPI_GetId (MPI_Comm comm = ng_comm) { return 0; }
inline int MyMPI_GetNTasks (MPI_Comm comm /* = ng_comm */) { return 1; }
inline int MyMPI_GetId (MPI_Comm comm /* = ng_comm */) { return 0; }
#endif
#endif
/*
@ -112,13 +114,13 @@ namespace netgen
enum { MPI_TAG_MESH = 210 };
enum { MPI_TAG_VIS = 310 };
inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm = ng_comm)
inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm /* = ng_comm */)
{
int hi = i;
MPI_Send( &hi, 1, MPI_INT, dest, tag, comm);
}
inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm = ng_comm)
inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm /* = ng_comm */)
{
MPI_Status status;
MPI_Recv( &i, 1, MPI_INT, src, tag, comm, &status);
@ -126,12 +128,12 @@ namespace netgen
inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm = ng_comm)
inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
{
MPI_Send( const_cast<char*> (s.c_str()), s.length(), MPI_CHAR, dest, tag, comm);
}
inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm = ng_comm)
inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm /* = ng_comm */)
{
MPI_Status status;
int len;
@ -145,20 +147,20 @@ namespace netgen
template <class T, int BASE>
inline void MyMPI_Send (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
inline void MyMPI_Send (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
{
MPI_Send( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm);
}
template <class T, int BASE>
inline void MyMPI_Recv ( FlatArray<T, BASE> s, int src, int tag, MPI_Comm comm = ng_comm)
inline void MyMPI_Recv ( FlatArray<T, BASE> s, int src, int tag, MPI_Comm comm /* = ng_comm */)
{
MPI_Status status;
MPI_Recv( &s.First(), s.Size(), MyGetMPIType<T>(), src, tag, comm, &status);
}
template <class T, int BASE>
inline void MyMPI_Recv ( Array <T, BASE> & s, int src, int tag, MPI_Comm comm = ng_comm)
inline void MyMPI_Recv ( Array <T, BASE> & s, int src, int tag, MPI_Comm comm /* = ng_comm */)
{
MPI_Status status;
int len;
@ -170,7 +172,7 @@ namespace netgen
}
template <class T, int BASE>
inline int MyMPI_Recv ( Array <T, BASE> & s, int tag, MPI_Comm comm = ng_comm)
inline int MyMPI_Recv ( Array <T, BASE> & s, int tag, MPI_Comm comm /* = ng_comm */)
{
MPI_Status status;
int len;
@ -203,7 +205,7 @@ namespace netgen
*/
template <class T, int BASE>
inline MPI_Request MyMPI_ISend (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
inline MPI_Request MyMPI_ISend (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
{
MPI_Request request;
MPI_Isend( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
@ -212,7 +214,7 @@ namespace netgen
template <class T, int BASE>
inline MPI_Request MyMPI_IRecv (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
inline MPI_Request MyMPI_IRecv (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
{
MPI_Request request;
MPI_Irecv( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
@ -277,10 +279,14 @@ namespace netgen
template <typename T>
inline void MyMPI_ExchangeTable (TABLE<T> & send_data,
TABLE<T> & recv_data, int tag,
MPI_Comm comm = ng_comm)
const NgMPI_Comm & comm /* = ng_comm */)
{
/*
int rank = MyMPI_GetId(comm);
int ntasks = MyMPI_GetNTasks(comm);
*/
int rank = comm.Rank();
int ntasks = comm.Size();
Array<int> send_sizes(ntasks);
Array<int> recv_sizes(ntasks);
@ -324,22 +330,23 @@ namespace netgen
template <class T>
inline void MyMPI_Bcast (T & s, MPI_Comm comm = ng_comm)
inline void MyMPI_Bcast (T & s, MPI_Comm comm /* = ng_comm */)
{
MPI_Bcast (&s, 1, MyGetMPIType<T>(), 0, comm);
}
template <class T>
inline void MyMPI_Bcast (Array<T, 0> & s, MPI_Comm comm = ng_comm)
inline void MyMPI_Bcast (Array<T, 0> & s, NgMPI_Comm comm /* = ng_comm */)
{
int size = s.Size();
MyMPI_Bcast (size, comm);
if (MyMPI_GetId(comm) != 0) s.SetSize (size);
// if (MyMPI_GetId(comm) != 0) s.SetSize (size);
if (comm.Rank() != 0) s.SetSize (size);
MPI_Bcast (&s[0], size, MyGetMPIType<T>(), 0, comm);
}
template <class T>
inline void MyMPI_Bcast (Array<T, 0> & s, int root, MPI_Comm comm = ng_comm)
inline void MyMPI_Bcast (Array<T, 0> & s, int root, MPI_Comm comm /* = ng_comm */)
{
int id;
MPI_Comm_rank(comm, &id);
@ -352,13 +359,13 @@ namespace netgen
}
template <class T, class T2>
inline void MyMPI_Allgather (const T & send, FlatArray<T2> recv, MPI_Comm comm = ng_comm)
inline void MyMPI_Allgather (const T & send, FlatArray<T2> recv, MPI_Comm comm /* = ng_comm */)
{
MPI_Allgather( const_cast<T*> (&send), 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
}
template <class T, class T2>
inline void MyMPI_Alltoall (FlatArray<T> send, FlatArray<T2> recv, MPI_Comm comm = ng_comm)
inline void MyMPI_Alltoall (FlatArray<T> send, FlatArray<T2> recv, MPI_Comm comm /* = ng_comm */)
{
MPI_Alltoall( &send[0], 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
}

View File

@ -40,7 +40,7 @@
// #ifndef PARALLEL
// typedef int MPI_Comm;
// #endif
namespace netgen { extern DLL_HEADER ngcore::NgMPI_Comm ng_comm; }
// namespace netgen { extern DLL_HEADER ngcore::NgMPI_Comm ng_comm; }
// implemented element types:
@ -66,7 +66,7 @@ extern "C" {
DLL_HEADER void Ng_LoadGeometry (const char * filename);
// load netgen mesh
DLL_HEADER void Ng_LoadMesh (const char * filename, ngcore::NgMPI_Comm comm = netgen::ng_comm);
DLL_HEADER void Ng_LoadMesh (const char * filename, ngcore::NgMPI_Comm comm = ngcore::NgMPI_Comm{});
// load netgen mesh
DLL_HEADER void Ng_LoadMeshFromString (const char * mesh_as_string);

View File

@ -36,7 +36,7 @@ namespace netgen
using namespace std;
using namespace ngcore;
extern DLL_HEADER NgMPI_Comm ng_comm;
// extern DLL_HEADER NgMPI_Comm ng_comm;
static constexpr int POINTINDEX_BASE = 1;
@ -265,11 +265,11 @@ namespace netgen
/** reuse a netgen-mesh **/
Ngx_Mesh (shared_ptr<Mesh> amesh);
/** load a new mesh **/
Ngx_Mesh (string filename, MPI_Comm acomm = netgen::ng_comm);
Ngx_Mesh (string filename, NgMPI_Comm acomm = NgMPI_Comm{});
void LoadMesh (const string & filename, MPI_Comm comm = netgen::ng_comm);
void LoadMesh (const string & filename, NgMPI_Comm comm = NgMPI_Comm{});
void LoadMesh (istream & str, MPI_Comm comm = netgen::ng_comm);
void LoadMesh (istream & str, NgMPI_Comm comm = NgMPI_Comm{});
void SaveMesh (ostream & str) const;
void UpdateTopology ();
void DoArchive (Archive & archive);

View File

@ -119,8 +119,8 @@ void Ng_LoadMeshFromStream ( istream & input )
void Ng_LoadMesh (const char * filename, ngcore::NgMPI_Comm comm)
{
int id = MyMPI_GetId(comm);
int ntasks = MyMPI_GetNTasks(comm);
int id = comm.Rank();
int ntasks = comm.Size();
{
ifstream infile(filename);

View File

@ -33,17 +33,17 @@ namespace netgen
Ngx_Mesh :: Ngx_Mesh (shared_ptr<Mesh> amesh)
{ mesh = amesh ? amesh : netgen::mesh; }
Ngx_Mesh :: Ngx_Mesh (string filename, MPI_Comm acomm)
Ngx_Mesh :: Ngx_Mesh (string filename, NgMPI_Comm acomm)
{ LoadMesh(filename, acomm); }
Ngx_Mesh * LoadMesh (const string & filename, MPI_Comm comm = netgen::ng_comm)
Ngx_Mesh * LoadMesh (const string & filename, NgMPI_Comm comm)
{
netgen::mesh.reset();
Ng_LoadMesh (filename.c_str(), comm);
return new Ngx_Mesh (netgen::mesh);
}
void Ngx_Mesh :: LoadMesh (const string & filename, MPI_Comm comm)
void Ngx_Mesh :: LoadMesh (const string & filename, NgMPI_Comm comm)
{
netgen::mesh.reset();
Ng_LoadMesh (filename.c_str(), comm);
@ -51,7 +51,7 @@ namespace netgen
mesh = netgen::mesh;
}
void Ngx_Mesh :: LoadMesh (istream & ist, MPI_Comm comm)
void Ngx_Mesh :: LoadMesh (istream & ist, NgMPI_Comm comm)
{
netgen::mesh = make_shared<Mesh>();
netgen::mesh->SetCommunicator(comm);

View File

@ -553,18 +553,19 @@ namespace netgen
order = 1;
MPI_Comm curve_comm;
// MPI_Comm curve_comm;
const auto & curve_comm = mesh.GetCommunicator();
#ifdef PARALLEL
enum { MPI_TAG_CURVE = MPI_TAG_MESH+20 };
const ParallelMeshTopology & partop = mesh.GetParallelTopology ();
MPI_Comm_dup (mesh.GetCommunicator(), &curve_comm);
// MPI_Comm_dup (mesh.GetCommunicator(), &curve_comm);
Array<int> procs;
#else
curve_comm = ng_comm; // dummy!
// curve_comm = mesh.GetCommunicator();
#endif
int rank = MyMPI_GetId(curve_comm);
int ntasks = MyMPI_GetNTasks(curve_comm);
int rank = curve_comm.Rank();
int ntasks = curve_comm.Size();
if (working)
order = aorder;
@ -656,8 +657,8 @@ namespace netgen
}
}
MyMPI_ExchangeTable (send_orders, recv_orders, MPI_TAG_CURVE, curve_comm);
if (ntasks > 1)
MyMPI_ExchangeTable (send_orders, recv_orders, MPI_TAG_CURVE, curve_comm);
if (ntasks > 1 && working)
{
@ -1186,7 +1187,8 @@ namespace netgen
}
}
MyMPI_ExchangeTable (send_surfnr, recv_surfnr, MPI_TAG_CURVE, curve_comm);
if (ntasks > 1)
MyMPI_ExchangeTable (send_surfnr, recv_surfnr, MPI_TAG_CURVE, curve_comm);
if (ntasks > 1 && working)
{
@ -1369,8 +1371,8 @@ namespace netgen
#ifdef PARALLEL
MPI_Barrier (curve_comm);
MPI_Comm_free (&curve_comm);
curve_comm.Barrier();
// MPI_Comm_free (&curve_comm);
#endif
}

View File

@ -32,7 +32,7 @@ namespace netgen
// TraceGlobal glob2("global2");
// global communicator for netgen
DLL_HEADER NgMPI_Comm ng_comm;
// DLL_HEADER NgMPI_Comm ng_comm;
weak_ptr<Mesh> global_mesh;
void SetGlobalMesh (shared_ptr<Mesh> m)

View File

@ -61,7 +61,7 @@ namespace netgen
DLL_HEADER void SetGlobalMesh (shared_ptr<Mesh> m);
// global communicator for netgen (dummy if no MPI)
extern DLL_HEADER NgMPI_Comm ng_comm;
// extern DLL_HEADER NgMPI_Comm ng_comm;
}

View File

@ -43,7 +43,7 @@ namespace netgen
bcnames.SetSize(0);
cd2names.SetSize(0);
this->comm = netgen :: ng_comm;
// this->comm = netgen :: ng_comm;
#ifdef PARALLEL
paralleltop = new ParallelMeshTopology (*this);
#endif
@ -1325,8 +1325,8 @@ namespace netgen
if (archive.Input())
{
int rank = MyMPI_GetId(GetCommunicator());
int ntasks = MyMPI_GetNTasks(GetCommunicator());
int rank = GetCommunicator().Rank();
int ntasks = GetCommunicator().Size();
RebuildSurfaceElementLists();

View File

@ -35,8 +35,8 @@ namespace netgen
void Mesh :: SendRecvMesh ()
{
int id = MyMPI_GetId(GetCommunicator());
int np = MyMPI_GetNTasks(GetCommunicator());
int id = GetCommunicator().Rank();
int np = GetCommunicator().Size();
if (np == 1) {
throw NgException("SendRecvMesh called, but only one rank in communicator!!");
@ -72,9 +72,9 @@ namespace netgen
{
Array<MPI_Request> sendrequests;
MPI_Comm comm = GetCommunicator();
int id = MyMPI_GetId(comm);
int ntasks = MyMPI_GetNTasks(comm);
NgMPI_Comm comm = GetCommunicator();
int id = comm.Rank();
int ntasks = comm.Size();
int dim = GetDimension();
MyMPI_Bcast(dim, comm);
@ -751,8 +751,9 @@ namespace netgen
int timer_sels = NgProfiler::CreateTimer ("Receive surface elements");
NgProfiler::RegionTimer reg(timer);
int id = MyMPI_GetId(GetCommunicator());
int ntasks = MyMPI_GetNTasks(GetCommunicator());
NgMPI_Comm comm = GetCommunicator();
int id = comm.Rank();
int ntasks = comm.Size();
int dim;
MyMPI_Bcast(dim, comm);
@ -1011,9 +1012,9 @@ namespace netgen
// call it only for the master !
void Mesh :: Distribute ()
{
MPI_Comm comm = GetCommunicator();
int id = MyMPI_GetId(comm);
int ntasks = MyMPI_GetNTasks(comm);
NgMPI_Comm comm = GetCommunicator();
int id = comm.Rank();
int ntasks = comm.Size();
if (id != 0 || ntasks == 1 ) return;
@ -1072,7 +1073,7 @@ namespace netgen
eptr.Append (eind.Size());
Array<idx_t> epart(ne), npart(nn);
idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1;
idxtype nparts = GetCommunicator().Size()-1;
if (nparts == 1)
{
@ -1293,9 +1294,9 @@ namespace netgen
// call it only for the master !
void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights)
{
MPI_Comm comm = GetCommunicator();
int id = MyMPI_GetId(comm);
int ntasks = MyMPI_GetNTasks(comm);
NgMPI_Comm comm = GetCommunicator();
int id = comm.Rank();
int ntasks = comm.Size();
if (id != 0 || ntasks == 1 ) return;
@ -1385,7 +1386,7 @@ namespace netgen
eptr.Append (eind.Size());
Array<idx_t> epart(ne), npart(nn);
idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1;
idxtype nparts = GetCommunicator().Size()-1;
if (nparts == 1)
{

View File

@ -25,8 +25,9 @@ namespace netgen
{
*testout << "ParallelMeshTopology::Reset" << endl;
int id = MyMPI_GetId(mesh.GetCommunicator());
int ntasks = MyMPI_GetNTasks(mesh.GetCommunicator());
NgMPI_Comm comm = mesh.GetCommunicator();
int id = comm.Rank();
int ntasks = comm.Size();
if ( ntasks == 1 ) return;
@ -125,6 +126,7 @@ namespace netgen
*testout << "ParallelMeshTopology :: UpdateCoarseGridGlobal" << endl;
const MeshTopology & topology = mesh.GetTopology();
MPI_Comm comm = mesh.GetCommunicator();
if ( id == 0 )
{
@ -160,7 +162,7 @@ namespace netgen
Array<MPI_Request> sendrequests;
for (int dest = 1; dest < ntasks; dest++)
sendrequests.Append (MyMPI_ISend (*sendarrays[dest], dest, MPI_TAG_MESH+10));
sendrequests.Append (MyMPI_ISend (*sendarrays[dest], dest, MPI_TAG_MESH+10, comm));
MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE);
for (int dest = 1; dest < ntasks; dest++)
@ -171,7 +173,7 @@ namespace netgen
{
Array<int> recvarray;
MyMPI_Recv (recvarray, 0, MPI_TAG_MESH+10);
MyMPI_Recv (recvarray, 0, MPI_TAG_MESH+10, comm);
int ii = 0;
@ -209,9 +211,11 @@ namespace netgen
// cout << "UpdateCoarseGrid" << endl;
// if (is_updated) return;
MPI_Comm comm = mesh.GetCommunicator();
int id = MyMPI_GetId(comm);
int ntasks = MyMPI_GetNTasks(comm);
NgMPI_Comm comm = mesh.GetCommunicator();
int id = comm.Rank();
int ntasks = comm.Size();
if (ntasks == 1) return;
Reset();
static int timer = NgProfiler::CreateTimer ("UpdateCoarseGrid");

View File

@ -20,7 +20,7 @@ namespace netgen
#ifdef PARALLEL
/** we need allreduce in python-wrapped communicators **/
template <typename T>
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op = MPI_SUM, MPI_Comm comm = ng_comm)
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op /* = MPI_SUM */, MPI_Comm comm)
{
T global_d;
MPI_Allreduce ( &d, &global_d, 1, MyGetMPIType<T>(), op, comm);
@ -30,7 +30,7 @@ namespace netgen
enum { MPI_SUM = 0, MPI_MIN = 1, MPI_MAX = 2 };
typedef int MPI_Op;
template <typename T>
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op = MPI_SUM, MPI_Comm comm = ng_comm)
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op /* = MPI_SUM */, MPI_Comm comm)
{ return d; }
#endif
}
@ -89,12 +89,11 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
py::class_<NgMPI_Comm> (m, "MPI_Comm")
.def_property_readonly ("rank", &NgMPI_Comm::Rank)
.def_property_readonly ("size", &NgMPI_Comm::Size)
.def("Barrier", &NgMPI_Comm::Barrier)
#ifdef PARALLEL
.def("Barrier", [](NgMPI_Comm & c) { MPI_Barrier(c); })
.def("WTime", [](NgMPI_Comm & c) { return MPI_Wtime(); })
#else
.def("Barrier", [](NgMPI_Comm & c) { })
.def("WTime", [](NgMPI_Comm & c) { return -1.0; })
#endif
.def("Sum", [](NgMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_SUM, c); })
@ -557,13 +556,13 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
.def_property_readonly("_timestamp", &Mesh::GetTimeStamp)
.def("Distribute", [](shared_ptr<Mesh> self, NgMPI_Comm comm) {
self->SetCommunicator(comm);
if(MyMPI_GetNTasks(comm)==1) return self;
if(comm.Size()==1) return self;
// if(MyMPI_GetNTasks(comm)==2) throw NgException("Sorry, cannot handle communicators with NP=2!");
// cout << " rank " << MyMPI_GetId(comm) << " of " << MyMPI_GetNTasks(comm) << " called Distribute " << endl;
if(MyMPI_GetId(comm)==0) self->Distribute();
if(comm.Rank()==0) self->Distribute();
else self->SendRecvMesh();
return self;
}, py::arg("comm")=NgMPI_Comm(ng_comm))
}, py::arg("comm"))
.def("Receive", [](NgMPI_Comm comm) {
auto mesh = make_shared<Mesh>();
mesh->SetCommunicator(comm);
@ -575,9 +574,9 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
{
istream * infile;
MPI_Comm comm = self.GetCommunicator();
id = MyMPI_GetId(comm);
ntasks = MyMPI_GetNTasks(comm);
NgMPI_Comm comm = self.GetCommunicator();
id = comm.Rank();
ntasks = comm.Size();
#ifdef PARALLEL
char* buf = nullptr;

View File

@ -2650,13 +2650,17 @@ namespace netgen
static int timer2 = NgProfiler::CreateTimer ("getminmax, surf");
#ifdef PARALLEL
if (id == 0)
auto comm = mesh->GetCommunicator();
if (comm.Size() > 1)
{
MyMPI_SendCmd ("redraw");
MyMPI_SendCmd ("getminmax");
if (id == 0)
{
MyMPI_SendCmd ("redraw");
MyMPI_SendCmd ("getminmax");
}
MyMPI_Bcast (funcnr, mesh->GetCommunicator());
MyMPI_Bcast (comp, mesh->GetCommunicator());
}
MyMPI_Bcast (funcnr);
MyMPI_Bcast (comp);
#endif
// double val;
@ -2744,11 +2748,14 @@ namespace netgen
minv = 1e99;
maxv = -1e99;
}
double hmin, hmax;
MPI_Reduce (&minv, &hmin, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce (&maxv, &hmax, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
minv = hmin;
maxv = hmax;
if (ntasks > 1)
{
double hmin, hmax;
MPI_Reduce (&minv, &hmin, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce (&maxv, &hmax, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
minv = hmin;
maxv = hmax;
}
#endif
}

View File

@ -77,7 +77,7 @@ int main(int argc, char ** argv)
throw ngcore::Exception("Netgen GUI cannot run MPI-parallel");
// MPI_COMM_WORLD is just a local communicator
netgen::ng_comm = ngcore::NgMPI_Comm{MPI_COMM_WORLD, false};
// netgen::ng_comm = ngcore::NgMPI_Comm{MPI_COMM_WORLD, false};
#endif

View File

@ -1219,7 +1219,7 @@ namespace netgen
#ifdef PARALLEL
MyMPI_SendCmd ("bcastparthread");
MyMPI_Bcast (mparam.parthread);
MyMPI_Bcast (mparam.parthread, MPI_COMM_WORLD);
#endif
return TCL_OK;

View File

@ -125,7 +125,7 @@ void ParallelRun()
else if ( message == "bcastparthread" )
{
MyMPI_Bcast (mparam.parthread);
MyMPI_Bcast (mparam.parthread, MPI_COMM_WORLD);
}