mirror of
https://github.com/NGSolve/netgen.git
synced 2024-12-26 05:50:32 +05:00
get rid of ng_comm
This commit is contained in:
parent
bac9f92dd4
commit
734e96c87c
@ -101,20 +101,52 @@ namespace ngcore
|
|||||||
|
|
||||||
operator MPI_Comm() const { return comm; }
|
operator MPI_Comm() const { return comm; }
|
||||||
|
|
||||||
int Rank() const { return rank; } // int r; MPI_Comm_rank(comm, &r); return r; }
|
int Rank() const { return rank; }
|
||||||
int Size() const { return size; } // int s; MPI_Comm_size(comm, &s); return s; }
|
int Size() const { return size; }
|
||||||
|
void Barrier() const {
|
||||||
|
if (size > 1) MPI_Barrier (comm);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
template<typename T, typename T2 = decltype(GetMPIType<T>())>
|
template<typename T, typename T2 = decltype(GetMPIType<T>())>
|
||||||
void Send( T & val, int dest, int tag) {
|
void Send (T & val, int dest, int tag) const {
|
||||||
MPI_Send (&val, 1, GetMPIType<T>(), dest, tag, comm);
|
MPI_Send (&val, 1, GetMPIType<T>(), dest, tag, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T, typename T2 = decltype(GetMPIType<T>())>
|
template<typename T, typename T2 = decltype(GetMPIType<T>())>
|
||||||
void MyMPI_Recv (T & val, int src, int tag) {
|
void MyMPI_Recv (T & val, int src, int tag) const {
|
||||||
MPI_Recv (&val, 1, GetMPIType<T>(), src, tag, comm, MPI_STATUS_IGNORE);
|
MPI_Recv (&val, 1, GetMPIType<T>(), src, tag, comm, MPI_STATUS_IGNORE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** --- collectives --- **/
|
||||||
|
|
||||||
|
template <typename T, typename T2 = decltype(GetMPIType<T>())>
|
||||||
|
T AllReduce (T d, const MPI_Op & op) const
|
||||||
|
{
|
||||||
|
if (size == 1) return d;
|
||||||
|
|
||||||
|
T global_d;
|
||||||
|
MPI_Allreduce ( &d, &global_d, 1, GetMPIType<T>(), op, comm);
|
||||||
|
return global_d;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename T2 = decltype(GetMPIType<T>())>
|
||||||
|
void Bcast (T & s, int root = 0) const {
|
||||||
|
if (size == 1) return ;
|
||||||
|
MPI_Bcast (&s, 1, GetMPIType<T>(), root, comm);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Bcast (std::string & s, int root = 0) const
|
||||||
|
{
|
||||||
|
if (size == 1) return;
|
||||||
|
int len = s.length();
|
||||||
|
Bcast (len, root);
|
||||||
|
if (rank != 0) s.resize (len);
|
||||||
|
MPI_Bcast (&s[0], len, MPI_CHAR, root, comm);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -138,14 +170,20 @@ namespace ngcore
|
|||||||
|
|
||||||
size_t Rank() const { return 0; }
|
size_t Rank() const { return 0; }
|
||||||
size_t Size() const { return 1; }
|
size_t Size() const { return 1; }
|
||||||
|
void Barrier() const { ; }
|
||||||
operator MPI_Comm() const { return MPI_Comm(); }
|
operator MPI_Comm() const { return MPI_Comm(); }
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
void Send( T & val, int dest, int tag) { ; }
|
void Send( T & val, int dest, int tag) const { ; }
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
void MyMPI_Recv (T & val, int src, int tag) { ; }
|
void MyMPI_Recv (T & val, int src, int tag) const { ; }
|
||||||
|
|
||||||
|
template <typename T, typename T2 = decltype(GetMPIType<T>())>
|
||||||
|
T AllReduce (T d, const MPI_Op & op) const { return d; }
|
||||||
|
|
||||||
|
template <typename T, typename T2 = decltype(GetMPIType<T>())>
|
||||||
|
INLINE void Bcast (T & s, int root = 0) const { ; }
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -14,8 +14,8 @@
|
|||||||
|
|
||||||
namespace netgen
|
namespace netgen
|
||||||
{
|
{
|
||||||
using ngcore::id;
|
// using ngcore::id;
|
||||||
using ngcore::ntasks;
|
// using ngcore::ntasks;
|
||||||
|
|
||||||
#ifndef PARALLEL
|
#ifndef PARALLEL
|
||||||
/** without MPI, we need a dummy typedef **/
|
/** without MPI, we need a dummy typedef **/
|
||||||
@ -23,16 +23,17 @@ namespace netgen
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/** This is the "standard" communicator that will be used for netgen-objects. **/
|
/** This is the "standard" communicator that will be used for netgen-objects. **/
|
||||||
extern DLL_HEADER NgMPI_Comm ng_comm;
|
// extern DLL_HEADER NgMPI_Comm ng_comm;
|
||||||
|
|
||||||
|
#ifdef OLD
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm)
|
inline int MyMPI_GetNTasks (MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
int ntasks;
|
int ntasks;
|
||||||
MPI_Comm_size(comm, &ntasks);
|
MPI_Comm_size(comm, &ntasks);
|
||||||
return ntasks;
|
return ntasks;
|
||||||
}
|
}
|
||||||
inline int MyMPI_GetId (MPI_Comm comm = ng_comm)
|
inline int MyMPI_GetId (MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
int id;
|
int id;
|
||||||
MPI_Comm_rank(comm, &id);
|
MPI_Comm_rank(comm, &id);
|
||||||
@ -40,10 +41,11 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
// enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
|
// enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
|
||||||
inline int MyMPI_GetNTasks (MPI_Comm comm = ng_comm) { return 1; }
|
inline int MyMPI_GetNTasks (MPI_Comm comm /* = ng_comm */) { return 1; }
|
||||||
inline int MyMPI_GetId (MPI_Comm comm = ng_comm) { return 0; }
|
inline int MyMPI_GetId (MPI_Comm comm /* = ng_comm */) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
// For python wrapping of communicators
|
// For python wrapping of communicators
|
||||||
@ -112,13 +114,13 @@ namespace netgen
|
|||||||
enum { MPI_TAG_MESH = 210 };
|
enum { MPI_TAG_MESH = 210 };
|
||||||
enum { MPI_TAG_VIS = 310 };
|
enum { MPI_TAG_VIS = 310 };
|
||||||
|
|
||||||
inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
int hi = i;
|
int hi = i;
|
||||||
MPI_Send( &hi, 1, MPI_INT, dest, tag, comm);
|
MPI_Send( &hi, 1, MPI_INT, dest, tag, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
MPI_Recv( &i, 1, MPI_INT, src, tag, comm, &status);
|
MPI_Recv( &i, 1, MPI_INT, src, tag, comm, &status);
|
||||||
@ -126,12 +128,12 @@ namespace netgen
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Send( const_cast<char*> (s.c_str()), s.length(), MPI_CHAR, dest, tag, comm);
|
MPI_Send( const_cast<char*> (s.c_str()), s.length(), MPI_CHAR, dest, tag, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
int len;
|
int len;
|
||||||
@ -145,20 +147,20 @@ namespace netgen
|
|||||||
|
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline void MyMPI_Send (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Send (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Send( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm);
|
MPI_Send( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline void MyMPI_Recv ( FlatArray<T, BASE> s, int src, int tag, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Recv ( FlatArray<T, BASE> s, int src, int tag, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
MPI_Recv( &s.First(), s.Size(), MyGetMPIType<T>(), src, tag, comm, &status);
|
MPI_Recv( &s.First(), s.Size(), MyGetMPIType<T>(), src, tag, comm, &status);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline void MyMPI_Recv ( Array <T, BASE> & s, int src, int tag, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Recv ( Array <T, BASE> & s, int src, int tag, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
int len;
|
int len;
|
||||||
@ -170,7 +172,7 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline int MyMPI_Recv ( Array <T, BASE> & s, int tag, MPI_Comm comm = ng_comm)
|
inline int MyMPI_Recv ( Array <T, BASE> & s, int tag, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
int len;
|
int len;
|
||||||
@ -203,7 +205,7 @@ namespace netgen
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline MPI_Request MyMPI_ISend (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
|
inline MPI_Request MyMPI_ISend (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Request request;
|
MPI_Request request;
|
||||||
MPI_Isend( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
MPI_Isend( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
||||||
@ -212,7 +214,7 @@ namespace netgen
|
|||||||
|
|
||||||
|
|
||||||
template <class T, int BASE>
|
template <class T, int BASE>
|
||||||
inline MPI_Request MyMPI_IRecv (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm = ng_comm)
|
inline MPI_Request MyMPI_IRecv (FlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Request request;
|
MPI_Request request;
|
||||||
MPI_Irecv( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
MPI_Irecv( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
||||||
@ -277,11 +279,15 @@ namespace netgen
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
inline void MyMPI_ExchangeTable (TABLE<T> & send_data,
|
inline void MyMPI_ExchangeTable (TABLE<T> & send_data,
|
||||||
TABLE<T> & recv_data, int tag,
|
TABLE<T> & recv_data, int tag,
|
||||||
MPI_Comm comm = ng_comm)
|
const NgMPI_Comm & comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
int rank = MyMPI_GetId(comm);
|
int rank = MyMPI_GetId(comm);
|
||||||
int ntasks = MyMPI_GetNTasks(comm);
|
int ntasks = MyMPI_GetNTasks(comm);
|
||||||
|
*/
|
||||||
|
int rank = comm.Rank();
|
||||||
|
int ntasks = comm.Size();
|
||||||
|
|
||||||
Array<int> send_sizes(ntasks);
|
Array<int> send_sizes(ntasks);
|
||||||
Array<int> recv_sizes(ntasks);
|
Array<int> recv_sizes(ntasks);
|
||||||
for (int i = 0; i < ntasks; i++)
|
for (int i = 0; i < ntasks; i++)
|
||||||
@ -324,22 +330,23 @@ namespace netgen
|
|||||||
|
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
inline void MyMPI_Bcast (T & s, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Bcast (T & s, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Bcast (&s, 1, MyGetMPIType<T>(), 0, comm);
|
MPI_Bcast (&s, 1, MyGetMPIType<T>(), 0, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
inline void MyMPI_Bcast (Array<T, 0> & s, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Bcast (Array<T, 0> & s, NgMPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
int size = s.Size();
|
int size = s.Size();
|
||||||
MyMPI_Bcast (size, comm);
|
MyMPI_Bcast (size, comm);
|
||||||
if (MyMPI_GetId(comm) != 0) s.SetSize (size);
|
// if (MyMPI_GetId(comm) != 0) s.SetSize (size);
|
||||||
|
if (comm.Rank() != 0) s.SetSize (size);
|
||||||
MPI_Bcast (&s[0], size, MyGetMPIType<T>(), 0, comm);
|
MPI_Bcast (&s[0], size, MyGetMPIType<T>(), 0, comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
inline void MyMPI_Bcast (Array<T, 0> & s, int root, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Bcast (Array<T, 0> & s, int root, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
int id;
|
int id;
|
||||||
MPI_Comm_rank(comm, &id);
|
MPI_Comm_rank(comm, &id);
|
||||||
@ -352,13 +359,13 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class T2>
|
template <class T, class T2>
|
||||||
inline void MyMPI_Allgather (const T & send, FlatArray<T2> recv, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Allgather (const T & send, FlatArray<T2> recv, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Allgather( const_cast<T*> (&send), 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
MPI_Allgather( const_cast<T*> (&send), 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class T2>
|
template <class T, class T2>
|
||||||
inline void MyMPI_Alltoall (FlatArray<T> send, FlatArray<T2> recv, MPI_Comm comm = ng_comm)
|
inline void MyMPI_Alltoall (FlatArray<T> send, FlatArray<T2> recv, MPI_Comm comm /* = ng_comm */)
|
||||||
{
|
{
|
||||||
MPI_Alltoall( &send[0], 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
MPI_Alltoall( &send[0], 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@
|
|||||||
// #ifndef PARALLEL
|
// #ifndef PARALLEL
|
||||||
// typedef int MPI_Comm;
|
// typedef int MPI_Comm;
|
||||||
// #endif
|
// #endif
|
||||||
namespace netgen { extern DLL_HEADER ngcore::NgMPI_Comm ng_comm; }
|
// namespace netgen { extern DLL_HEADER ngcore::NgMPI_Comm ng_comm; }
|
||||||
|
|
||||||
|
|
||||||
// implemented element types:
|
// implemented element types:
|
||||||
@ -66,7 +66,7 @@ extern "C" {
|
|||||||
DLL_HEADER void Ng_LoadGeometry (const char * filename);
|
DLL_HEADER void Ng_LoadGeometry (const char * filename);
|
||||||
|
|
||||||
// load netgen mesh
|
// load netgen mesh
|
||||||
DLL_HEADER void Ng_LoadMesh (const char * filename, ngcore::NgMPI_Comm comm = netgen::ng_comm);
|
DLL_HEADER void Ng_LoadMesh (const char * filename, ngcore::NgMPI_Comm comm = ngcore::NgMPI_Comm{});
|
||||||
|
|
||||||
// load netgen mesh
|
// load netgen mesh
|
||||||
DLL_HEADER void Ng_LoadMeshFromString (const char * mesh_as_string);
|
DLL_HEADER void Ng_LoadMeshFromString (const char * mesh_as_string);
|
||||||
|
@ -36,7 +36,7 @@ namespace netgen
|
|||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace ngcore;
|
using namespace ngcore;
|
||||||
|
|
||||||
extern DLL_HEADER NgMPI_Comm ng_comm;
|
// extern DLL_HEADER NgMPI_Comm ng_comm;
|
||||||
|
|
||||||
static constexpr int POINTINDEX_BASE = 1;
|
static constexpr int POINTINDEX_BASE = 1;
|
||||||
|
|
||||||
@ -265,11 +265,11 @@ namespace netgen
|
|||||||
/** reuse a netgen-mesh **/
|
/** reuse a netgen-mesh **/
|
||||||
Ngx_Mesh (shared_ptr<Mesh> amesh);
|
Ngx_Mesh (shared_ptr<Mesh> amesh);
|
||||||
/** load a new mesh **/
|
/** load a new mesh **/
|
||||||
Ngx_Mesh (string filename, MPI_Comm acomm = netgen::ng_comm);
|
Ngx_Mesh (string filename, NgMPI_Comm acomm = NgMPI_Comm{});
|
||||||
|
|
||||||
void LoadMesh (const string & filename, MPI_Comm comm = netgen::ng_comm);
|
void LoadMesh (const string & filename, NgMPI_Comm comm = NgMPI_Comm{});
|
||||||
|
|
||||||
void LoadMesh (istream & str, MPI_Comm comm = netgen::ng_comm);
|
void LoadMesh (istream & str, NgMPI_Comm comm = NgMPI_Comm{});
|
||||||
void SaveMesh (ostream & str) const;
|
void SaveMesh (ostream & str) const;
|
||||||
void UpdateTopology ();
|
void UpdateTopology ();
|
||||||
void DoArchive (Archive & archive);
|
void DoArchive (Archive & archive);
|
||||||
|
@ -119,8 +119,8 @@ void Ng_LoadMeshFromStream ( istream & input )
|
|||||||
|
|
||||||
void Ng_LoadMesh (const char * filename, ngcore::NgMPI_Comm comm)
|
void Ng_LoadMesh (const char * filename, ngcore::NgMPI_Comm comm)
|
||||||
{
|
{
|
||||||
int id = MyMPI_GetId(comm);
|
int id = comm.Rank();
|
||||||
int ntasks = MyMPI_GetNTasks(comm);
|
int ntasks = comm.Size();
|
||||||
|
|
||||||
{
|
{
|
||||||
ifstream infile(filename);
|
ifstream infile(filename);
|
||||||
|
@ -33,17 +33,17 @@ namespace netgen
|
|||||||
|
|
||||||
Ngx_Mesh :: Ngx_Mesh (shared_ptr<Mesh> amesh)
|
Ngx_Mesh :: Ngx_Mesh (shared_ptr<Mesh> amesh)
|
||||||
{ mesh = amesh ? amesh : netgen::mesh; }
|
{ mesh = amesh ? amesh : netgen::mesh; }
|
||||||
Ngx_Mesh :: Ngx_Mesh (string filename, MPI_Comm acomm)
|
Ngx_Mesh :: Ngx_Mesh (string filename, NgMPI_Comm acomm)
|
||||||
{ LoadMesh(filename, acomm); }
|
{ LoadMesh(filename, acomm); }
|
||||||
|
|
||||||
Ngx_Mesh * LoadMesh (const string & filename, MPI_Comm comm = netgen::ng_comm)
|
Ngx_Mesh * LoadMesh (const string & filename, NgMPI_Comm comm)
|
||||||
{
|
{
|
||||||
netgen::mesh.reset();
|
netgen::mesh.reset();
|
||||||
Ng_LoadMesh (filename.c_str(), comm);
|
Ng_LoadMesh (filename.c_str(), comm);
|
||||||
return new Ngx_Mesh (netgen::mesh);
|
return new Ngx_Mesh (netgen::mesh);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Ngx_Mesh :: LoadMesh (const string & filename, MPI_Comm comm)
|
void Ngx_Mesh :: LoadMesh (const string & filename, NgMPI_Comm comm)
|
||||||
{
|
{
|
||||||
netgen::mesh.reset();
|
netgen::mesh.reset();
|
||||||
Ng_LoadMesh (filename.c_str(), comm);
|
Ng_LoadMesh (filename.c_str(), comm);
|
||||||
@ -51,7 +51,7 @@ namespace netgen
|
|||||||
mesh = netgen::mesh;
|
mesh = netgen::mesh;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Ngx_Mesh :: LoadMesh (istream & ist, MPI_Comm comm)
|
void Ngx_Mesh :: LoadMesh (istream & ist, NgMPI_Comm comm)
|
||||||
{
|
{
|
||||||
netgen::mesh = make_shared<Mesh>();
|
netgen::mesh = make_shared<Mesh>();
|
||||||
netgen::mesh->SetCommunicator(comm);
|
netgen::mesh->SetCommunicator(comm);
|
||||||
|
@ -553,18 +553,19 @@ namespace netgen
|
|||||||
order = 1;
|
order = 1;
|
||||||
|
|
||||||
|
|
||||||
MPI_Comm curve_comm;
|
// MPI_Comm curve_comm;
|
||||||
|
const auto & curve_comm = mesh.GetCommunicator();
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
enum { MPI_TAG_CURVE = MPI_TAG_MESH+20 };
|
enum { MPI_TAG_CURVE = MPI_TAG_MESH+20 };
|
||||||
|
|
||||||
const ParallelMeshTopology & partop = mesh.GetParallelTopology ();
|
const ParallelMeshTopology & partop = mesh.GetParallelTopology ();
|
||||||
MPI_Comm_dup (mesh.GetCommunicator(), &curve_comm);
|
// MPI_Comm_dup (mesh.GetCommunicator(), &curve_comm);
|
||||||
Array<int> procs;
|
Array<int> procs;
|
||||||
#else
|
#else
|
||||||
curve_comm = ng_comm; // dummy!
|
// curve_comm = mesh.GetCommunicator();
|
||||||
#endif
|
#endif
|
||||||
int rank = MyMPI_GetId(curve_comm);
|
int rank = curve_comm.Rank();
|
||||||
int ntasks = MyMPI_GetNTasks(curve_comm);
|
int ntasks = curve_comm.Size();
|
||||||
|
|
||||||
if (working)
|
if (working)
|
||||||
order = aorder;
|
order = aorder;
|
||||||
@ -656,8 +657,8 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ntasks > 1)
|
||||||
MyMPI_ExchangeTable (send_orders, recv_orders, MPI_TAG_CURVE, curve_comm);
|
MyMPI_ExchangeTable (send_orders, recv_orders, MPI_TAG_CURVE, curve_comm);
|
||||||
|
|
||||||
if (ntasks > 1 && working)
|
if (ntasks > 1 && working)
|
||||||
{
|
{
|
||||||
@ -1186,7 +1187,8 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MyMPI_ExchangeTable (send_surfnr, recv_surfnr, MPI_TAG_CURVE, curve_comm);
|
if (ntasks > 1)
|
||||||
|
MyMPI_ExchangeTable (send_surfnr, recv_surfnr, MPI_TAG_CURVE, curve_comm);
|
||||||
|
|
||||||
if (ntasks > 1 && working)
|
if (ntasks > 1 && working)
|
||||||
{
|
{
|
||||||
@ -1369,8 +1371,8 @@ namespace netgen
|
|||||||
|
|
||||||
|
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
MPI_Barrier (curve_comm);
|
curve_comm.Barrier();
|
||||||
MPI_Comm_free (&curve_comm);
|
// MPI_Comm_free (&curve_comm);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ namespace netgen
|
|||||||
// TraceGlobal glob2("global2");
|
// TraceGlobal glob2("global2");
|
||||||
|
|
||||||
// global communicator for netgen
|
// global communicator for netgen
|
||||||
DLL_HEADER NgMPI_Comm ng_comm;
|
// DLL_HEADER NgMPI_Comm ng_comm;
|
||||||
|
|
||||||
weak_ptr<Mesh> global_mesh;
|
weak_ptr<Mesh> global_mesh;
|
||||||
void SetGlobalMesh (shared_ptr<Mesh> m)
|
void SetGlobalMesh (shared_ptr<Mesh> m)
|
||||||
|
@ -61,7 +61,7 @@ namespace netgen
|
|||||||
DLL_HEADER void SetGlobalMesh (shared_ptr<Mesh> m);
|
DLL_HEADER void SetGlobalMesh (shared_ptr<Mesh> m);
|
||||||
|
|
||||||
// global communicator for netgen (dummy if no MPI)
|
// global communicator for netgen (dummy if no MPI)
|
||||||
extern DLL_HEADER NgMPI_Comm ng_comm;
|
// extern DLL_HEADER NgMPI_Comm ng_comm;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ namespace netgen
|
|||||||
bcnames.SetSize(0);
|
bcnames.SetSize(0);
|
||||||
cd2names.SetSize(0);
|
cd2names.SetSize(0);
|
||||||
|
|
||||||
this->comm = netgen :: ng_comm;
|
// this->comm = netgen :: ng_comm;
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
paralleltop = new ParallelMeshTopology (*this);
|
paralleltop = new ParallelMeshTopology (*this);
|
||||||
#endif
|
#endif
|
||||||
@ -1325,8 +1325,8 @@ namespace netgen
|
|||||||
|
|
||||||
if (archive.Input())
|
if (archive.Input())
|
||||||
{
|
{
|
||||||
int rank = MyMPI_GetId(GetCommunicator());
|
int rank = GetCommunicator().Rank();
|
||||||
int ntasks = MyMPI_GetNTasks(GetCommunicator());
|
int ntasks = GetCommunicator().Size();
|
||||||
|
|
||||||
RebuildSurfaceElementLists();
|
RebuildSurfaceElementLists();
|
||||||
|
|
||||||
|
@ -35,8 +35,8 @@ namespace netgen
|
|||||||
|
|
||||||
void Mesh :: SendRecvMesh ()
|
void Mesh :: SendRecvMesh ()
|
||||||
{
|
{
|
||||||
int id = MyMPI_GetId(GetCommunicator());
|
int id = GetCommunicator().Rank();
|
||||||
int np = MyMPI_GetNTasks(GetCommunicator());
|
int np = GetCommunicator().Size();
|
||||||
|
|
||||||
if (np == 1) {
|
if (np == 1) {
|
||||||
throw NgException("SendRecvMesh called, but only one rank in communicator!!");
|
throw NgException("SendRecvMesh called, but only one rank in communicator!!");
|
||||||
@ -72,9 +72,9 @@ namespace netgen
|
|||||||
{
|
{
|
||||||
Array<MPI_Request> sendrequests;
|
Array<MPI_Request> sendrequests;
|
||||||
|
|
||||||
MPI_Comm comm = GetCommunicator();
|
NgMPI_Comm comm = GetCommunicator();
|
||||||
int id = MyMPI_GetId(comm);
|
int id = comm.Rank();
|
||||||
int ntasks = MyMPI_GetNTasks(comm);
|
int ntasks = comm.Size();
|
||||||
|
|
||||||
int dim = GetDimension();
|
int dim = GetDimension();
|
||||||
MyMPI_Bcast(dim, comm);
|
MyMPI_Bcast(dim, comm);
|
||||||
@ -751,8 +751,9 @@ namespace netgen
|
|||||||
int timer_sels = NgProfiler::CreateTimer ("Receive surface elements");
|
int timer_sels = NgProfiler::CreateTimer ("Receive surface elements");
|
||||||
NgProfiler::RegionTimer reg(timer);
|
NgProfiler::RegionTimer reg(timer);
|
||||||
|
|
||||||
int id = MyMPI_GetId(GetCommunicator());
|
NgMPI_Comm comm = GetCommunicator();
|
||||||
int ntasks = MyMPI_GetNTasks(GetCommunicator());
|
int id = comm.Rank();
|
||||||
|
int ntasks = comm.Size();
|
||||||
|
|
||||||
int dim;
|
int dim;
|
||||||
MyMPI_Bcast(dim, comm);
|
MyMPI_Bcast(dim, comm);
|
||||||
@ -1011,9 +1012,9 @@ namespace netgen
|
|||||||
// call it only for the master !
|
// call it only for the master !
|
||||||
void Mesh :: Distribute ()
|
void Mesh :: Distribute ()
|
||||||
{
|
{
|
||||||
MPI_Comm comm = GetCommunicator();
|
NgMPI_Comm comm = GetCommunicator();
|
||||||
int id = MyMPI_GetId(comm);
|
int id = comm.Rank();
|
||||||
int ntasks = MyMPI_GetNTasks(comm);
|
int ntasks = comm.Size();
|
||||||
|
|
||||||
if (id != 0 || ntasks == 1 ) return;
|
if (id != 0 || ntasks == 1 ) return;
|
||||||
|
|
||||||
@ -1072,7 +1073,7 @@ namespace netgen
|
|||||||
eptr.Append (eind.Size());
|
eptr.Append (eind.Size());
|
||||||
Array<idx_t> epart(ne), npart(nn);
|
Array<idx_t> epart(ne), npart(nn);
|
||||||
|
|
||||||
idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1;
|
idxtype nparts = GetCommunicator().Size()-1;
|
||||||
|
|
||||||
if (nparts == 1)
|
if (nparts == 1)
|
||||||
{
|
{
|
||||||
@ -1293,9 +1294,9 @@ namespace netgen
|
|||||||
// call it only for the master !
|
// call it only for the master !
|
||||||
void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights)
|
void Mesh :: Distribute (Array<int> & volume_weights , Array<int> & surface_weights, Array<int> & segment_weights)
|
||||||
{
|
{
|
||||||
MPI_Comm comm = GetCommunicator();
|
NgMPI_Comm comm = GetCommunicator();
|
||||||
int id = MyMPI_GetId(comm);
|
int id = comm.Rank();
|
||||||
int ntasks = MyMPI_GetNTasks(comm);
|
int ntasks = comm.Size();
|
||||||
|
|
||||||
if (id != 0 || ntasks == 1 ) return;
|
if (id != 0 || ntasks == 1 ) return;
|
||||||
|
|
||||||
@ -1385,7 +1386,7 @@ namespace netgen
|
|||||||
eptr.Append (eind.Size());
|
eptr.Append (eind.Size());
|
||||||
Array<idx_t> epart(ne), npart(nn);
|
Array<idx_t> epart(ne), npart(nn);
|
||||||
|
|
||||||
idxtype nparts = MyMPI_GetNTasks(GetCommunicator())-1;
|
idxtype nparts = GetCommunicator().Size()-1;
|
||||||
|
|
||||||
if (nparts == 1)
|
if (nparts == 1)
|
||||||
{
|
{
|
||||||
|
@ -25,8 +25,9 @@ namespace netgen
|
|||||||
{
|
{
|
||||||
*testout << "ParallelMeshTopology::Reset" << endl;
|
*testout << "ParallelMeshTopology::Reset" << endl;
|
||||||
|
|
||||||
int id = MyMPI_GetId(mesh.GetCommunicator());
|
NgMPI_Comm comm = mesh.GetCommunicator();
|
||||||
int ntasks = MyMPI_GetNTasks(mesh.GetCommunicator());
|
int id = comm.Rank();
|
||||||
|
int ntasks = comm.Size();
|
||||||
|
|
||||||
if ( ntasks == 1 ) return;
|
if ( ntasks == 1 ) return;
|
||||||
|
|
||||||
@ -125,7 +126,8 @@ namespace netgen
|
|||||||
*testout << "ParallelMeshTopology :: UpdateCoarseGridGlobal" << endl;
|
*testout << "ParallelMeshTopology :: UpdateCoarseGridGlobal" << endl;
|
||||||
|
|
||||||
const MeshTopology & topology = mesh.GetTopology();
|
const MeshTopology & topology = mesh.GetTopology();
|
||||||
|
MPI_Comm comm = mesh.GetCommunicator();
|
||||||
|
|
||||||
if ( id == 0 )
|
if ( id == 0 )
|
||||||
{
|
{
|
||||||
Array<Array<int>*> sendarrays(ntasks);
|
Array<Array<int>*> sendarrays(ntasks);
|
||||||
@ -160,7 +162,7 @@ namespace netgen
|
|||||||
|
|
||||||
Array<MPI_Request> sendrequests;
|
Array<MPI_Request> sendrequests;
|
||||||
for (int dest = 1; dest < ntasks; dest++)
|
for (int dest = 1; dest < ntasks; dest++)
|
||||||
sendrequests.Append (MyMPI_ISend (*sendarrays[dest], dest, MPI_TAG_MESH+10));
|
sendrequests.Append (MyMPI_ISend (*sendarrays[dest], dest, MPI_TAG_MESH+10, comm));
|
||||||
MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE);
|
MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE);
|
||||||
|
|
||||||
for (int dest = 1; dest < ntasks; dest++)
|
for (int dest = 1; dest < ntasks; dest++)
|
||||||
@ -171,7 +173,7 @@ namespace netgen
|
|||||||
|
|
||||||
{
|
{
|
||||||
Array<int> recvarray;
|
Array<int> recvarray;
|
||||||
MyMPI_Recv (recvarray, 0, MPI_TAG_MESH+10);
|
MyMPI_Recv (recvarray, 0, MPI_TAG_MESH+10, comm);
|
||||||
|
|
||||||
int ii = 0;
|
int ii = 0;
|
||||||
|
|
||||||
@ -209,10 +211,12 @@ namespace netgen
|
|||||||
// cout << "UpdateCoarseGrid" << endl;
|
// cout << "UpdateCoarseGrid" << endl;
|
||||||
// if (is_updated) return;
|
// if (is_updated) return;
|
||||||
|
|
||||||
MPI_Comm comm = mesh.GetCommunicator();
|
NgMPI_Comm comm = mesh.GetCommunicator();
|
||||||
int id = MyMPI_GetId(comm);
|
int id = comm.Rank();
|
||||||
int ntasks = MyMPI_GetNTasks(comm);
|
int ntasks = comm.Size();
|
||||||
|
|
||||||
|
if (ntasks == 1) return;
|
||||||
|
|
||||||
Reset();
|
Reset();
|
||||||
static int timer = NgProfiler::CreateTimer ("UpdateCoarseGrid");
|
static int timer = NgProfiler::CreateTimer ("UpdateCoarseGrid");
|
||||||
NgProfiler::RegionTimer reg(timer);
|
NgProfiler::RegionTimer reg(timer);
|
||||||
|
@ -20,7 +20,7 @@ namespace netgen
|
|||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
/** we need allreduce in python-wrapped communicators **/
|
/** we need allreduce in python-wrapped communicators **/
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op = MPI_SUM, MPI_Comm comm = ng_comm)
|
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op /* = MPI_SUM */, MPI_Comm comm)
|
||||||
{
|
{
|
||||||
T global_d;
|
T global_d;
|
||||||
MPI_Allreduce ( &d, &global_d, 1, MyGetMPIType<T>(), op, comm);
|
MPI_Allreduce ( &d, &global_d, 1, MyGetMPIType<T>(), op, comm);
|
||||||
@ -30,7 +30,7 @@ namespace netgen
|
|||||||
enum { MPI_SUM = 0, MPI_MIN = 1, MPI_MAX = 2 };
|
enum { MPI_SUM = 0, MPI_MIN = 1, MPI_MAX = 2 };
|
||||||
typedef int MPI_Op;
|
typedef int MPI_Op;
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op = MPI_SUM, MPI_Comm comm = ng_comm)
|
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op /* = MPI_SUM */, MPI_Comm comm)
|
||||||
{ return d; }
|
{ return d; }
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@ -89,12 +89,11 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
|
|||||||
py::class_<NgMPI_Comm> (m, "MPI_Comm")
|
py::class_<NgMPI_Comm> (m, "MPI_Comm")
|
||||||
.def_property_readonly ("rank", &NgMPI_Comm::Rank)
|
.def_property_readonly ("rank", &NgMPI_Comm::Rank)
|
||||||
.def_property_readonly ("size", &NgMPI_Comm::Size)
|
.def_property_readonly ("size", &NgMPI_Comm::Size)
|
||||||
|
.def("Barrier", &NgMPI_Comm::Barrier)
|
||||||
|
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
.def("Barrier", [](NgMPI_Comm & c) { MPI_Barrier(c); })
|
|
||||||
.def("WTime", [](NgMPI_Comm & c) { return MPI_Wtime(); })
|
.def("WTime", [](NgMPI_Comm & c) { return MPI_Wtime(); })
|
||||||
#else
|
#else
|
||||||
.def("Barrier", [](NgMPI_Comm & c) { })
|
|
||||||
.def("WTime", [](NgMPI_Comm & c) { return -1.0; })
|
.def("WTime", [](NgMPI_Comm & c) { return -1.0; })
|
||||||
#endif
|
#endif
|
||||||
.def("Sum", [](NgMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_SUM, c); })
|
.def("Sum", [](NgMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_SUM, c); })
|
||||||
@ -557,13 +556,13 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
|
|||||||
.def_property_readonly("_timestamp", &Mesh::GetTimeStamp)
|
.def_property_readonly("_timestamp", &Mesh::GetTimeStamp)
|
||||||
.def("Distribute", [](shared_ptr<Mesh> self, NgMPI_Comm comm) {
|
.def("Distribute", [](shared_ptr<Mesh> self, NgMPI_Comm comm) {
|
||||||
self->SetCommunicator(comm);
|
self->SetCommunicator(comm);
|
||||||
if(MyMPI_GetNTasks(comm)==1) return self;
|
if(comm.Size()==1) return self;
|
||||||
// if(MyMPI_GetNTasks(comm)==2) throw NgException("Sorry, cannot handle communicators with NP=2!");
|
// if(MyMPI_GetNTasks(comm)==2) throw NgException("Sorry, cannot handle communicators with NP=2!");
|
||||||
// cout << " rank " << MyMPI_GetId(comm) << " of " << MyMPI_GetNTasks(comm) << " called Distribute " << endl;
|
// cout << " rank " << MyMPI_GetId(comm) << " of " << MyMPI_GetNTasks(comm) << " called Distribute " << endl;
|
||||||
if(MyMPI_GetId(comm)==0) self->Distribute();
|
if(comm.Rank()==0) self->Distribute();
|
||||||
else self->SendRecvMesh();
|
else self->SendRecvMesh();
|
||||||
return self;
|
return self;
|
||||||
}, py::arg("comm")=NgMPI_Comm(ng_comm))
|
}, py::arg("comm"))
|
||||||
.def("Receive", [](NgMPI_Comm comm) {
|
.def("Receive", [](NgMPI_Comm comm) {
|
||||||
auto mesh = make_shared<Mesh>();
|
auto mesh = make_shared<Mesh>();
|
||||||
mesh->SetCommunicator(comm);
|
mesh->SetCommunicator(comm);
|
||||||
@ -575,9 +574,9 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
|
|||||||
{
|
{
|
||||||
istream * infile;
|
istream * infile;
|
||||||
|
|
||||||
MPI_Comm comm = self.GetCommunicator();
|
NgMPI_Comm comm = self.GetCommunicator();
|
||||||
id = MyMPI_GetId(comm);
|
id = comm.Rank();
|
||||||
ntasks = MyMPI_GetNTasks(comm);
|
ntasks = comm.Size();
|
||||||
|
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
char* buf = nullptr;
|
char* buf = nullptr;
|
||||||
|
@ -2650,13 +2650,17 @@ namespace netgen
|
|||||||
static int timer2 = NgProfiler::CreateTimer ("getminmax, surf");
|
static int timer2 = NgProfiler::CreateTimer ("getminmax, surf");
|
||||||
|
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
if (id == 0)
|
auto comm = mesh->GetCommunicator();
|
||||||
|
if (comm.Size() > 1)
|
||||||
{
|
{
|
||||||
MyMPI_SendCmd ("redraw");
|
if (id == 0)
|
||||||
MyMPI_SendCmd ("getminmax");
|
{
|
||||||
|
MyMPI_SendCmd ("redraw");
|
||||||
|
MyMPI_SendCmd ("getminmax");
|
||||||
|
}
|
||||||
|
MyMPI_Bcast (funcnr, mesh->GetCommunicator());
|
||||||
|
MyMPI_Bcast (comp, mesh->GetCommunicator());
|
||||||
}
|
}
|
||||||
MyMPI_Bcast (funcnr);
|
|
||||||
MyMPI_Bcast (comp);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// double val;
|
// double val;
|
||||||
@ -2744,11 +2748,14 @@ namespace netgen
|
|||||||
minv = 1e99;
|
minv = 1e99;
|
||||||
maxv = -1e99;
|
maxv = -1e99;
|
||||||
}
|
}
|
||||||
double hmin, hmax;
|
if (ntasks > 1)
|
||||||
MPI_Reduce (&minv, &hmin, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
|
{
|
||||||
MPI_Reduce (&maxv, &hmax, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
|
double hmin, hmax;
|
||||||
minv = hmin;
|
MPI_Reduce (&minv, &hmin, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
|
||||||
maxv = hmax;
|
MPI_Reduce (&maxv, &hmax, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
|
||||||
|
minv = hmin;
|
||||||
|
maxv = hmax;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ int main(int argc, char ** argv)
|
|||||||
throw ngcore::Exception("Netgen GUI cannot run MPI-parallel");
|
throw ngcore::Exception("Netgen GUI cannot run MPI-parallel");
|
||||||
|
|
||||||
// MPI_COMM_WORLD is just a local communicator
|
// MPI_COMM_WORLD is just a local communicator
|
||||||
netgen::ng_comm = ngcore::NgMPI_Comm{MPI_COMM_WORLD, false};
|
// netgen::ng_comm = ngcore::NgMPI_Comm{MPI_COMM_WORLD, false};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1219,7 +1219,7 @@ namespace netgen
|
|||||||
|
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
MyMPI_SendCmd ("bcastparthread");
|
MyMPI_SendCmd ("bcastparthread");
|
||||||
MyMPI_Bcast (mparam.parthread);
|
MyMPI_Bcast (mparam.parthread, MPI_COMM_WORLD);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return TCL_OK;
|
return TCL_OK;
|
||||||
|
@ -125,7 +125,7 @@ void ParallelRun()
|
|||||||
|
|
||||||
else if ( message == "bcastparthread" )
|
else if ( message == "bcastparthread" )
|
||||||
{
|
{
|
||||||
MyMPI_Bcast (mparam.parthread);
|
MyMPI_Bcast (mparam.parthread, MPI_COMM_WORLD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user