mirror of
https://github.com/NGSolve/netgen.git
synced 2024-12-24 04:50:34 +05:00
reduce duplicated mpi-wrapping
This commit is contained in:
parent
c5795aade8
commit
c074e0c752
@ -227,6 +227,14 @@ namespace ngcore
|
||||
MPI_Bcast (&s[0], len, MPI_CHAR, root, comm);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void AllToAll (FlatArray<T> send, FlatArray<T> recv) const
|
||||
{
|
||||
MPI_Alltoall (send.Data(), 1, GetMPIType<T>(),
|
||||
recv.Data(), 1, GetMPIType<T>(), comm);
|
||||
}
|
||||
|
||||
|
||||
NgMPI_Comm SubCommunicator (FlatArray<int> procs) const
|
||||
{
|
||||
MPI_Comm subcomm;
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "bitarray.hpp"
|
||||
#include "exception.hpp"
|
||||
#include "flags.hpp"
|
||||
#include "table.hpp"
|
||||
#include "hashtable.hpp"
|
||||
#include "localheap.hpp"
|
||||
#include "logging.hpp"
|
||||
@ -13,7 +14,6 @@
|
||||
#include "profiler.hpp"
|
||||
#include "signal.hpp"
|
||||
#include "symboltable.hpp"
|
||||
#include "table.hpp"
|
||||
#include "taskmanager.hpp"
|
||||
#include "version.hpp"
|
||||
#include "xbool.hpp"
|
||||
|
@ -14,65 +14,8 @@
|
||||
|
||||
namespace netgen
|
||||
{
|
||||
// using ngcore::id;
|
||||
// using ngcore::ntasks;
|
||||
|
||||
#ifndef PARALLEL
|
||||
/** without MPI, we need a dummy typedef **/
|
||||
// typedef int MPI_Comm;
|
||||
#endif
|
||||
|
||||
/** This is the "standard" communicator that will be used for netgen-objects. **/
|
||||
// extern DLL_HEADER NgMPI_Comm ng_comm;
|
||||
|
||||
#ifdef OLD
|
||||
#ifdef PARALLEL
|
||||
inline int MyMPI_GetNTasks (MPI_Comm comm /* = ng_comm */)
|
||||
{
|
||||
int ntasks;
|
||||
MPI_Comm_size(comm, &ntasks);
|
||||
return ntasks;
|
||||
}
|
||||
inline int MyMPI_GetId (MPI_Comm comm /* = ng_comm */)
|
||||
{
|
||||
int id;
|
||||
MPI_Comm_rank(comm, &id);
|
||||
return id;
|
||||
}
|
||||
#else
|
||||
// enum { MPI_COMM_WORLD = 12345, MPI_COMM_NULL = 0};
|
||||
inline int MyMPI_GetNTasks (MPI_Comm comm /* = ng_comm */) { return 1; }
|
||||
inline int MyMPI_GetId (MPI_Comm comm /* = ng_comm */) { return 0; }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
#ifdef PARALLEL
|
||||
// For python wrapping of communicators
|
||||
struct PyMPI_Comm {
|
||||
MPI_Comm comm;
|
||||
bool owns_comm;
|
||||
PyMPI_Comm (MPI_Comm _comm, bool _owns_comm = false) : comm(_comm), owns_comm(_owns_comm) { }
|
||||
PyMPI_Comm (const PyMPI_Comm & c) = delete;
|
||||
~PyMPI_Comm () {
|
||||
if (owns_comm)
|
||||
MPI_Comm_free(&comm);
|
||||
}
|
||||
inline int Rank() const { return MyMPI_GetId(comm); }
|
||||
inline int Size() const { return MyMPI_GetNTasks(comm); }
|
||||
};
|
||||
#else
|
||||
// dummy without MPI
|
||||
struct PyMPI_Comm {
|
||||
MPI_Comm comm = 0;
|
||||
PyMPI_Comm (MPI_Comm _comm, bool _owns_comm = false) { }
|
||||
~PyMPI_Comm () { }
|
||||
inline int Rank() const { return 0; }
|
||||
inline int Size() const { return 1; }
|
||||
};
|
||||
#endif
|
||||
*/
|
||||
|
||||
#ifdef PARALLEL
|
||||
template <class T>
|
||||
inline MPI_Datatype MyGetMPIType ( )
|
||||
@ -93,32 +36,35 @@ namespace netgen
|
||||
typedef int MPI_Datatype;
|
||||
template <class T> inline MPI_Datatype MyGetMPIType ( ) { return 0; }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef PARALLEL
|
||||
enum { MPI_TAG_CMD = 110 };
|
||||
enum { MPI_TAG_MESH = 210 };
|
||||
enum { MPI_TAG_VIS = 310 };
|
||||
|
||||
inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm /* = ng_comm */)
|
||||
|
||||
[[deprecated("mympi_send int, use comm.Send instead")]]
|
||||
inline void MyMPI_Send (int i, int dest, int tag, MPI_Comm comm)
|
||||
{
|
||||
int hi = i;
|
||||
MPI_Send( &hi, 1, MPI_INT, dest, tag, comm);
|
||||
}
|
||||
|
||||
inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm /* = ng_comm */)
|
||||
|
||||
[[deprecated("mympi_revc int, use comm.Recv instead")]]
|
||||
inline void MyMPI_Recv (int & i, int src, int tag, MPI_Comm comm)
|
||||
{
|
||||
MPI_Status status;
|
||||
MPI_Recv( &i, 1, MPI_INT, src, tag, comm, &status);
|
||||
}
|
||||
|
||||
|
||||
|
||||
inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
|
||||
inline void MyMPI_Send (const string & s, int dest, int tag, MPI_Comm comm)
|
||||
{
|
||||
MPI_Send( const_cast<char*> (s.c_str()), s.length(), MPI_CHAR, dest, tag, comm);
|
||||
}
|
||||
|
||||
inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm /* = ng_comm */)
|
||||
inline void MyMPI_Recv (string & s, int src, int tag, MPI_Comm comm)
|
||||
{
|
||||
MPI_Status status;
|
||||
int len;
|
||||
@ -132,32 +78,32 @@ namespace netgen
|
||||
|
||||
|
||||
template <class T, int BASE>
|
||||
inline void MyMPI_Send (NgFlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
|
||||
inline void MyMPI_Send (NgFlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm)
|
||||
{
|
||||
MPI_Send( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm);
|
||||
MPI_Send( &s.First(), s.Size(), GetMPIType<T>(), dest, tag, comm);
|
||||
}
|
||||
|
||||
template <class T, int BASE>
|
||||
inline void MyMPI_Recv ( NgFlatArray<T, BASE> s, int src, int tag, MPI_Comm comm /* = ng_comm */)
|
||||
inline void MyMPI_Recv ( NgFlatArray<T, BASE> s, int src, int tag, MPI_Comm comm)
|
||||
{
|
||||
MPI_Status status;
|
||||
MPI_Recv( &s.First(), s.Size(), MyGetMPIType<T>(), src, tag, comm, &status);
|
||||
MPI_Recv( &s.First(), s.Size(), GetMPIType<T>(), src, tag, comm, &status);
|
||||
}
|
||||
|
||||
template <class T, int BASE>
|
||||
inline void MyMPI_Recv ( NgArray <T, BASE> & s, int src, int tag, MPI_Comm comm /* = ng_comm */)
|
||||
inline void MyMPI_Recv ( NgArray <T, BASE> & s, int src, int tag, MPI_Comm comm)
|
||||
{
|
||||
MPI_Status status;
|
||||
int len;
|
||||
MPI_Probe (src, tag, comm, &status);
|
||||
MPI_Get_count (&status, MyGetMPIType<T>(), &len);
|
||||
MPI_Get_count (&status, GetMPIType<T>(), &len);
|
||||
|
||||
s.SetSize (len);
|
||||
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, comm, &status);
|
||||
MPI_Recv( &s.First(), len, GetMPIType<T>(), src, tag, comm, &status);
|
||||
}
|
||||
|
||||
template <class T, int BASE>
|
||||
inline int MyMPI_Recv ( NgArray <T, BASE> & s, int tag, MPI_Comm comm /* = ng_comm */)
|
||||
inline int MyMPI_Recv ( NgArray <T, BASE> & s, int tag, MPI_Comm comm)
|
||||
{
|
||||
MPI_Status status;
|
||||
int len;
|
||||
@ -165,10 +111,10 @@ namespace netgen
|
||||
|
||||
int src = status.MPI_SOURCE;
|
||||
|
||||
MPI_Get_count (&status, MyGetMPIType<T>(), &len);
|
||||
MPI_Get_count (&status, GetMPIType<T>(), &len);
|
||||
|
||||
s.SetSize (len);
|
||||
MPI_Recv( &s.First(), len, MyGetMPIType<T>(), src, tag, comm, &status);
|
||||
MPI_Recv( &s.First(), len, GetMPIType<T>(), src, tag, comm, &status);
|
||||
|
||||
return src;
|
||||
}
|
||||
@ -190,22 +136,22 @@ namespace netgen
|
||||
*/
|
||||
|
||||
template <class T, int BASE>
|
||||
inline MPI_Request MyMPI_ISend (NgFlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
|
||||
inline MPI_Request MyMPI_ISend (NgFlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm)
|
||||
{
|
||||
MPI_Request request;
|
||||
MPI_Isend( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
||||
MPI_Isend( &s.First(), s.Size(), GetMPIType<T>(), dest, tag, comm, &request);
|
||||
return request;
|
||||
}
|
||||
|
||||
|
||||
template <class T, int BASE>
|
||||
inline MPI_Request MyMPI_IRecv (NgFlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm /* = ng_comm */)
|
||||
inline MPI_Request MyMPI_IRecv (NgFlatArray<T, BASE> s, int dest, int tag, MPI_Comm comm)
|
||||
{
|
||||
MPI_Request request;
|
||||
MPI_Irecv( &s.First(), s.Size(), MyGetMPIType<T>(), dest, tag, comm, &request);
|
||||
MPI_Irecv( &s.First(), s.Size(), GetMPIType<T>(), dest, tag, comm, &request);
|
||||
return request;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
template <class T, int BASE>
|
||||
inline void MyMPI_ISend (NgFlatArray<T, BASE> s, int dest, int tag)
|
||||
@ -232,106 +178,59 @@ namespace netgen
|
||||
receive-table entries will be set
|
||||
*/
|
||||
|
||||
/*
|
||||
template <typename T>
|
||||
inline void MyMPI_ExchangeTable (TABLE<T> & send_data,
|
||||
TABLE<T> & recv_data, int tag,
|
||||
MPI_Comm comm = MPI_COMM_WORLD)
|
||||
const NgMPI_Comm & comm)
|
||||
{
|
||||
int ntasks, rank;
|
||||
MPI_Comm_size(comm, &ntasks);
|
||||
MPI_Comm_rank(comm, &rank);
|
||||
|
||||
NgArray<MPI_Request> requests;
|
||||
for (int dest = 0; dest < ntasks; dest++)
|
||||
if (dest != rank)
|
||||
requests.Append (MyMPI_ISend (send_data[dest], dest, tag, comm));
|
||||
|
||||
for (int i = 0; i < ntasks-1; i++)
|
||||
{
|
||||
MPI_Status status;
|
||||
MPI_Probe (MPI_ANY_SOURCE, tag, comm, &status);
|
||||
int size, src = status.MPI_SOURCE;
|
||||
MPI_Get_count (&status, MPI_INT, &size);
|
||||
recv_data.SetEntrySize (src, size, sizeof(T));
|
||||
requests.Append (MyMPI_IRecv (recv_data[src], src, tag, comm));
|
||||
}
|
||||
MPI_Barrier (comm);
|
||||
MPI_Waitall (requests.Size(), &requests[0], MPI_STATUS_IGNORE);
|
||||
}
|
||||
*/
|
||||
|
||||
template <typename T>
|
||||
inline void MyMPI_ExchangeTable (TABLE<T> & send_data,
|
||||
TABLE<T> & recv_data, int tag,
|
||||
const NgMPI_Comm & comm /* = ng_comm */)
|
||||
{
|
||||
/*
|
||||
int rank = MyMPI_GetId(comm);
|
||||
int ntasks = MyMPI_GetNTasks(comm);
|
||||
*/
|
||||
int rank = comm.Rank();
|
||||
int ntasks = comm.Size();
|
||||
|
||||
NgArray<int> send_sizes(ntasks);
|
||||
NgArray<int> recv_sizes(ntasks);
|
||||
Array<int> send_sizes(ntasks);
|
||||
Array<int> recv_sizes(ntasks);
|
||||
for (int i = 0; i < ntasks; i++)
|
||||
send_sizes[i] = send_data[i].Size();
|
||||
|
||||
comm.AllToAll (send_sizes, recv_sizes);
|
||||
|
||||
MPI_Alltoall (&send_sizes[0], 1, MPI_INT,
|
||||
&recv_sizes[0], 1, MPI_INT, comm);
|
||||
|
||||
// in-place is buggy !
|
||||
// MPI_Alltoall (MPI_IN_PLACE, 1, MPI_INT,
|
||||
// &recv_sizes[0], 1, MPI_INT, comm);
|
||||
|
||||
|
||||
for (int i = 0; i < ntasks; i++)
|
||||
recv_data.SetEntrySize (i, recv_sizes[i], sizeof(T));
|
||||
|
||||
NgArray<MPI_Request> requests;
|
||||
Array<MPI_Request> requests;
|
||||
for (int dest = 0; dest < ntasks; dest++)
|
||||
if (dest != rank && send_data[dest].Size())
|
||||
requests.Append (MyMPI_ISend (send_data[dest], dest, tag, comm));
|
||||
requests.Append (comm.ISend (FlatArray<T>(send_data[dest]), dest, tag));
|
||||
|
||||
for (int dest = 0; dest < ntasks; dest++)
|
||||
if (dest != rank && recv_data[dest].Size())
|
||||
requests.Append (MyMPI_IRecv (recv_data[dest], dest, tag, comm));
|
||||
requests.Append (comm.IRecv (FlatArray<T>(recv_data[dest]), dest, tag));
|
||||
|
||||
// MPI_Barrier (comm);
|
||||
MPI_Waitall (requests.Size(), &requests[0], MPI_STATUS_IGNORE);
|
||||
MyMPI_WaitAll (requests);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern void MyMPI_SendCmd (const char * cmd);
|
||||
extern string MyMPI_RecvCmd ();
|
||||
|
||||
|
||||
|
||||
|
||||
template <class T>
|
||||
inline void MyMPI_Bcast (T & s, MPI_Comm comm /* = ng_comm */)
|
||||
inline void MyMPI_Bcast (T & s, MPI_Comm comm)
|
||||
{
|
||||
MPI_Bcast (&s, 1, MyGetMPIType<T>(), 0, comm);
|
||||
MPI_Bcast (&s, 1, GetMPIType<T>(), 0, comm);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void MyMPI_Bcast (NgArray<T, 0> & s, NgMPI_Comm comm /* = ng_comm */)
|
||||
inline void MyMPI_Bcast (NgArray<T, 0> & s, NgMPI_Comm comm)
|
||||
{
|
||||
int size = s.Size();
|
||||
MyMPI_Bcast (size, comm);
|
||||
// if (MyMPI_GetId(comm) != 0) s.SetSize (size);
|
||||
if (comm.Rank() != 0) s.SetSize (size);
|
||||
MPI_Bcast (&s[0], size, MyGetMPIType<T>(), 0, comm);
|
||||
MPI_Bcast (&s[0], size, GetMPIType<T>(), 0, comm);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void MyMPI_Bcast (NgArray<T, 0> & s, int root, MPI_Comm comm /* = ng_comm */)
|
||||
inline void MyMPI_Bcast (NgArray<T, 0> & s, int root, MPI_Comm comm)
|
||||
{
|
||||
int id;
|
||||
MPI_Comm_rank(comm, &id);
|
||||
@ -340,67 +239,21 @@ namespace netgen
|
||||
MPI_Bcast (&size, 1, MPI_INT, root, comm);
|
||||
if (id != root) s.SetSize (size);
|
||||
if ( !size ) return;
|
||||
MPI_Bcast (&s[0], size, MyGetMPIType<T>(), root, comm);
|
||||
MPI_Bcast (&s[0], size, GetMPIType<T>(), root, comm);
|
||||
}
|
||||
|
||||
template <class T, class T2>
|
||||
inline void MyMPI_Allgather (const T & send, NgFlatArray<T2> recv, MPI_Comm comm /* = ng_comm */)
|
||||
inline void MyMPI_Allgather (const T & send, NgFlatArray<T2> recv, MPI_Comm comm)
|
||||
{
|
||||
MPI_Allgather( const_cast<T*> (&send), 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
||||
MPI_Allgather( const_cast<T*> (&send), 1, GetMPIType<T>(), &recv[0], 1, GetMPIType<T2>(), comm);
|
||||
}
|
||||
|
||||
template <class T, class T2>
|
||||
inline void MyMPI_Alltoall (NgFlatArray<T> send, NgFlatArray<T2> recv, MPI_Comm comm /* = ng_comm */)
|
||||
inline void MyMPI_Alltoall (NgFlatArray<T> send, NgFlatArray<T2> recv, MPI_Comm comm)
|
||||
{
|
||||
MPI_Alltoall( &send[0], 1, MyGetMPIType<T>(), &recv[0], 1, MyGetMPIType<T2>(), comm);
|
||||
MPI_Alltoall( &send[0], 1, GetMPIType<T>(), &recv[0], 1, GetMPIType<T2>(), comm);
|
||||
}
|
||||
|
||||
// template <class T, class T2>
|
||||
// inline void MyMPI_Alltoall_Block (NgFlatArray<T> send, NgFlatArray<T2> recv, int blocklen, MPI_Comm comm = ng_comm)
|
||||
// {
|
||||
// MPI_Alltoall( &send[0], blocklen, MyGetMPIType<T>(), &recv[0], blocklen, MyGetMPIType<T2>(), comm);
|
||||
// }
|
||||
|
||||
|
||||
|
||||
/*
|
||||
inline void MyMPI_Send ( int *& s, int len, int dest, int tag)
|
||||
{
|
||||
int hlen = len;
|
||||
MPI_Send( &hlen, 1, MPI_INT, dest, tag, MPI_COMM_WORLD);
|
||||
MPI_Send( s, len, MPI_INT, dest, tag, MPI_COMM_WORLD);
|
||||
}
|
||||
|
||||
|
||||
inline void MyMPI_Recv ( int *& s, int & len, int src, int tag)
|
||||
{
|
||||
MPI_Status status;
|
||||
MPI_Recv( &len, 1, MPI_INT, src, tag, MPI_COMM_WORLD, &status);
|
||||
if ( s )
|
||||
delete [] s;
|
||||
s = new int [len];
|
||||
MPI_Recv( s, len, MPI_INT, src, tag, MPI_COMM_WORLD, &status);
|
||||
}
|
||||
|
||||
|
||||
|
||||
inline void MyMPI_Send ( double * s, int len, int dest, int tag)
|
||||
{
|
||||
MPI_Send( &len, 1, MPI_INT, dest, tag, MPI_COMM_WORLD);
|
||||
MPI_Send( s, len, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
|
||||
}
|
||||
|
||||
|
||||
inline void MyMPI_Recv ( double *& s, int & len, int src, int tag)
|
||||
{
|
||||
MPI_Status status;
|
||||
MPI_Recv( &len, 1, MPI_INT, src, tag, MPI_COMM_WORLD, &status);
|
||||
if ( s )
|
||||
delete [] s;
|
||||
s = new double [len];
|
||||
MPI_Recv( s, len, MPI_DOUBLE, src, tag, MPI_COMM_WORLD, &status);
|
||||
}
|
||||
*/
|
||||
|
||||
#endif // PARALLEL
|
||||
|
||||
|
@ -205,6 +205,12 @@ namespace netgen
|
||||
{
|
||||
return ( Pos(elem) >= 0 );
|
||||
}
|
||||
|
||||
operator FlatArray<T> () const
|
||||
{
|
||||
static_assert (BASE==0);
|
||||
return FlatArray<T>(size, data);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -478,7 +478,7 @@ namespace netgen
|
||||
};
|
||||
|
||||
|
||||
#ifdef PARALLEL
|
||||
#ifdef PARALLEL_OLD
|
||||
template <>
|
||||
inline MPI_Datatype MyGetMPIType<Vec<3, double> > ()
|
||||
{
|
||||
|
@ -30,14 +30,14 @@ namespace netgen
|
||||
(char*)&hp.layer - (char*)&hp,
|
||||
(char*)&hp.singular - (char*)&hp };
|
||||
MPI_Datatype types[] = { MPI_DOUBLE, MPI_INT, MPI_DOUBLE };
|
||||
*testout << "displ = " << displ[0] << ", " << displ[1] << ", " << displ[2] << endl;
|
||||
*testout << "sizeof = " << sizeof (MeshPoint) << endl;
|
||||
// *testout << "displ = " << displ[0] << ", " << displ[1] << ", " << displ[2] << endl;
|
||||
// *testout << "sizeof = " << sizeof (MeshPoint) << endl;
|
||||
MPI_Type_create_struct (3, blocklen, displ, types, &htype);
|
||||
MPI_Type_commit ( &htype );
|
||||
MPI_Aint lb, ext;
|
||||
MPI_Type_get_extent (htype, &lb, &ext);
|
||||
*testout << "lb = " << lb << endl;
|
||||
*testout << "ext = " << ext << endl;
|
||||
// *testout << "lb = " << lb << endl;
|
||||
// *testout << "ext = " << ext << endl;
|
||||
ext = sizeof (MeshPoint);
|
||||
MPI_Type_create_resized (htype, lb, ext, &type);
|
||||
MPI_Type_commit ( &type );
|
||||
|
@ -25,13 +25,19 @@ namespace metis {
|
||||
using namespace metis;
|
||||
#endif
|
||||
|
||||
namespace ngcore {
|
||||
template <> struct MPI_typetrait<netgen::PointIndex> {
|
||||
static MPI_Datatype MPIType () { return MPI_INT; } };
|
||||
}
|
||||
|
||||
namespace netgen
|
||||
{
|
||||
|
||||
/*
|
||||
template <>
|
||||
inline MPI_Datatype MyGetMPIType<PointIndex> ( )
|
||||
{ return MPI_INT; }
|
||||
|
||||
*/
|
||||
|
||||
void Mesh :: SendRecvMesh ()
|
||||
{
|
||||
|
@ -460,24 +460,6 @@ namespace netgen
|
||||
MyMPI_ExchangeTable (send_edges, recv_edges, MPI_TAG_MESH+9, MPI_LocalComm);
|
||||
// cout << "UpdateCoarseGrid - edges mpi-exchange done" << endl;
|
||||
|
||||
/*
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
{
|
||||
auto ex2loc = dest2vert[dest-1];
|
||||
NgFlatArray<int> recvarray = recv_edges[dest-1];
|
||||
for (int ii = 0; ii < recvarray.Size(); ii+=2)
|
||||
for (int edge : dest2edge[dest-1])
|
||||
{
|
||||
topology.GetEdgeVertices (edge, v1, v2);
|
||||
INDEX_2 re(ex2loc[recvarray[ii]],
|
||||
ex2loc[recvarray[ii+1]]);
|
||||
INDEX_2 es(v1, v2);
|
||||
if (es == re)
|
||||
SetDistantEdgeNum(dest, edge);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
{
|
||||
auto ex2loc = dest2vert[dest-1];
|
||||
@ -504,8 +486,6 @@ namespace netgen
|
||||
|
||||
NgProfiler::StopTimer (timere);
|
||||
|
||||
// MPI_Barrier (MPI_LocalComm);
|
||||
|
||||
// cout << "UpdateCoarseGrid - faces" << endl;
|
||||
if (mesh.GetDimension() == 3)
|
||||
{
|
||||
@ -543,13 +523,6 @@ namespace netgen
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
if (dest != id)
|
||||
{
|
||||
/*
|
||||
loc2exchange = -1;
|
||||
int cnt = 0;
|
||||
for (PointIndex pi : mesh.Points().Range())
|
||||
if (IsExchangeVert(dest, pi))
|
||||
loc2exchange[pi] = cnt++;
|
||||
*/
|
||||
if (dest2vert[dest-1].Size() == 0) continue;
|
||||
|
||||
loc2exchange = -1;
|
||||
@ -575,29 +548,6 @@ namespace netgen
|
||||
TABLE<int> recv_faces(ntasks-1);
|
||||
MyMPI_ExchangeTable (send_faces, recv_faces, MPI_TAG_MESH+9, MPI_LocalComm);
|
||||
// cout << "UpdateCoarseGrid - faces mpi-exchange done" << endl;
|
||||
|
||||
/*
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
if (dest != id)
|
||||
{
|
||||
loc2exchange = -1;
|
||||
int cnt = 0;
|
||||
for (PointIndex pi : dest2vert[dest-1])
|
||||
loc2exchange[pi] = cnt++;
|
||||
|
||||
NgFlatArray<int> recvarray = recv_faces[dest-1];
|
||||
for (int ii = 0; ii < recvarray.Size(); ii+=3)
|
||||
for (int face : dest2face[dest-1])
|
||||
{
|
||||
topology.GetFaceVertices (face, verts);
|
||||
INDEX_3 re(recvarray[ii], recvarray[ii+1], recvarray[ii+2]);
|
||||
INDEX_3 es(loc2exchange[verts[0]], loc2exchange[verts[1]], loc2exchange[verts[2]]);
|
||||
if (es == re)
|
||||
SetDistantFaceNum(dest, face);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
{
|
||||
@ -622,77 +572,6 @@ namespace netgen
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
NgArray<int,1> glob2loc;
|
||||
|
||||
int maxface = 0;
|
||||
for (int face = 1; face <= nfa; face++)
|
||||
maxface = max (maxface, GetGlobalFaceNum (face));
|
||||
|
||||
// glob2loc.SetSize (nfaglob);
|
||||
glob2loc.SetSize (maxface);
|
||||
glob2loc = -1;
|
||||
|
||||
for (int loc = 1; loc <= nfa; loc++)
|
||||
glob2loc[GetGlobalFaceNum(loc)] = loc;
|
||||
|
||||
cnt_send = 0;
|
||||
NgArray<int> verts;
|
||||
for (int face = 1; face <= nfa; face++)
|
||||
{
|
||||
topology.GetFaceVertices (face, verts);
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
if (IsExchangeVert (dest, verts[0]) &&
|
||||
IsExchangeVert (dest, verts[1]) &&
|
||||
IsExchangeVert (dest, verts[2]))
|
||||
{
|
||||
cnt_send[dest-1]+=2;
|
||||
}
|
||||
}
|
||||
|
||||
TABLE<int> send_faces(cnt_send);
|
||||
for (int face = 1; face <= nfa; face++)
|
||||
{
|
||||
topology.GetFaceVertices (face, verts);
|
||||
for (int dest = 1; dest < ntasks; dest++)
|
||||
{
|
||||
if (IsExchangeVert (dest, verts[0]) &&
|
||||
IsExchangeVert (dest, verts[1]) &&
|
||||
IsExchangeVert (dest, verts[2]))
|
||||
{
|
||||
send_faces.Add (dest-1, GetGlobalFaceNum(face));
|
||||
send_faces.Add (dest-1, face);
|
||||
}
|
||||
}
|
||||
}
|
||||
TABLE<int> recv_faces(ntasks-1);
|
||||
MyMPI_ExchangeTable (send_faces, recv_faces, MPI_TAG_MESH+8, MPI_LocalComm);
|
||||
|
||||
for (int sender = 1; sender < ntasks; sender ++)
|
||||
if (id != sender)
|
||||
{
|
||||
NgFlatArray<int> recvarray = recv_faces[sender-1];
|
||||
|
||||
for (int ii = 0; ii < recvarray.Size(); )
|
||||
{
|
||||
int globf = recvarray[ii++];
|
||||
int distf = recvarray[ii++];
|
||||
|
||||
if (globf <= maxface)
|
||||
{
|
||||
int locf = glob2loc[globf];
|
||||
if (locf != -1)
|
||||
SetDistantFaceNum (sender, locf);
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
NgProfiler::StopTimer (timerf);
|
||||
}
|
||||
// cout << "UpdateCoarseGrid - done" << endl;
|
||||
|
@ -69,23 +69,6 @@ namespace netgen
|
||||
extern bool netgen_executable_started;
|
||||
extern shared_ptr<NetgenGeometry> ng_geometry;
|
||||
extern void Optimize2d (Mesh & mesh, MeshingParameters & mp);
|
||||
|
||||
#ifdef PARALLEL
|
||||
/** we need allreduce in python-wrapped communicators **/
|
||||
template <typename T>
|
||||
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op /* = MPI_SUM */, MPI_Comm comm)
|
||||
{
|
||||
T global_d;
|
||||
MPI_Allreduce ( &d, &global_d, 1, MyGetMPIType<T>(), op, comm);
|
||||
return global_d;
|
||||
}
|
||||
#else
|
||||
// enum { MPI_SUM = 0, MPI_MIN = 1, MPI_MAX = 2 };
|
||||
// typedef int MPI_Op;
|
||||
template <typename T>
|
||||
inline T MyMPI_AllReduceNG (T d, const MPI_Op & op /* = MPI_SUM */, MPI_Comm comm)
|
||||
{ return d; }
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -140,15 +123,15 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
|
||||
#else
|
||||
.def("WTime", [](NgMPI_Comm & c) { return -1.0; })
|
||||
#endif
|
||||
.def("Sum", [](NgMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_SUM, c); })
|
||||
.def("Min", [](NgMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_MIN, c); })
|
||||
.def("Max", [](NgMPI_Comm & c, double x) { return MyMPI_AllReduceNG(x, MPI_MAX, c); })
|
||||
.def("Sum", [](NgMPI_Comm & c, int x) { return MyMPI_AllReduceNG(x, MPI_SUM, c); })
|
||||
.def("Min", [](NgMPI_Comm & c, int x) { return MyMPI_AllReduceNG(x, MPI_MIN, c); })
|
||||
.def("Max", [](NgMPI_Comm & c, int x) { return MyMPI_AllReduceNG(x, MPI_MAX, c); })
|
||||
.def("Sum", [](NgMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_SUM, c); })
|
||||
.def("Min", [](NgMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_MIN, c); })
|
||||
.def("Max", [](NgMPI_Comm & c, size_t x) { return MyMPI_AllReduceNG(x, MPI_MAX, c); })
|
||||
.def("Sum", [](NgMPI_Comm & c, double x) { return c.AllReduce(x, MPI_SUM); })
|
||||
.def("Min", [](NgMPI_Comm & c, double x) { return c.AllReduce(x, MPI_MIN); })
|
||||
.def("Max", [](NgMPI_Comm & c, double x) { return c.AllReduce(x, MPI_MAX); })
|
||||
.def("Sum", [](NgMPI_Comm & c, int x) { return c.AllReduce(x, MPI_SUM); })
|
||||
.def("Min", [](NgMPI_Comm & c, int x) { return c.AllReduce(x, MPI_MIN); })
|
||||
.def("Max", [](NgMPI_Comm & c, int x) { return c.AllReduce(x, MPI_MAX); })
|
||||
.def("Sum", [](NgMPI_Comm & c, size_t x) { return c.AllReduce(x, MPI_SUM); })
|
||||
.def("Min", [](NgMPI_Comm & c, size_t x) { return c.AllReduce(x, MPI_MIN); })
|
||||
.def("Max", [](NgMPI_Comm & c, size_t x) { return c.AllReduce(x, MPI_MAX); })
|
||||
.def("SubComm", [](NgMPI_Comm & c, std::vector<int> proc_list) {
|
||||
Array<int> procs(proc_list.size());
|
||||
for (int i = 0; i < procs.Size(); i++)
|
||||
|
Loading…
Reference in New Issue
Block a user