mirror of
https://github.com/NGSolve/netgen.git
synced 2025-01-13 14:40:35 +05:00
commit
584f4506a9
@ -16,35 +16,29 @@ namespace netgen
|
|||||||
|
|
||||||
void MyMPI_SendCmd (const char * cmd)
|
void MyMPI_SendCmd (const char * cmd)
|
||||||
{
|
{
|
||||||
char buf[10000];
|
int ntasks;
|
||||||
strcpy (buf, cmd);
|
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
|
||||||
// MPI_Bcast (&buf, 100, MPI_CHAR, 0, MPI_COMM_WORLD);
|
|
||||||
|
|
||||||
|
if(ntasks==1)
|
||||||
|
return;
|
||||||
|
|
||||||
for (int dest = 1; dest < ntasks; dest++)
|
for (int dest = 1; dest < ntasks; dest++)
|
||||||
MPI_Send( &buf, 10000, MPI_CHAR, dest, MPI_TAG_CMD, MPI_COMM_WORLD);
|
MPI_Send( cmd, (strlen(cmd)+1), MPI_CHAR, dest, MPI_TAG_CMD, MPI_COMM_WORLD);
|
||||||
}
|
}
|
||||||
|
|
||||||
string MyMPI_RecvCmd ()
|
string MyMPI_RecvCmd ()
|
||||||
{
|
{
|
||||||
char buf[10000];
|
|
||||||
// MPI_Bcast (&buf, 100, MPI_CHAR, 0, MPI_COMM_WORLD);
|
|
||||||
|
|
||||||
// VT_OFF();
|
|
||||||
MPI_Status status;
|
MPI_Status status;
|
||||||
int flag;
|
int flag;
|
||||||
do
|
int size_of_msg = -1;
|
||||||
{
|
|
||||||
MPI_Iprobe (0, MPI_TAG_CMD, MPI_COMM_WORLD, &flag, &status);
|
|
||||||
if (!flag)
|
|
||||||
{
|
|
||||||
VT_TRACER ("sleep");
|
|
||||||
usleep (1000);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
while (!flag);
|
|
||||||
// VT_ON();
|
|
||||||
|
|
||||||
MPI_Recv( &buf, 10000, MPI_CHAR, 0, MPI_TAG_CMD, MPI_COMM_WORLD, &status);
|
MPI_Probe(0, MPI_TAG_CMD, MPI_COMM_WORLD, &status);
|
||||||
|
MPI_Get_count(&status, MPI_CHAR, &size_of_msg);
|
||||||
|
|
||||||
|
//char* buf = (char*)malloc(size_of_msg*sizeof(char));
|
||||||
|
char buf[100000]; //1MB should be enough...
|
||||||
|
|
||||||
|
MPI_Recv( &buf, size_of_msg, MPI_CHAR, 0, MPI_TAG_CMD, MPI_COMM_WORLD, &status);
|
||||||
|
|
||||||
return string(buf);
|
return string(buf);
|
||||||
}
|
}
|
||||||
|
@ -577,8 +577,8 @@ namespace netgen
|
|||||||
PrintMessage (1, "Curve elements, order = ", aorder);
|
PrintMessage (1, "Curve elements, order = ", aorder);
|
||||||
if (rational) PrintMessage (1, "curved elements with rational splines");
|
if (rational) PrintMessage (1, "curved elements with rational splines");
|
||||||
|
|
||||||
if (working)
|
// if (working)
|
||||||
const_cast<Mesh&> (mesh).UpdateTopology();
|
const_cast<Mesh&> (mesh).UpdateTopology();
|
||||||
const MeshTopology & top = mesh.GetTopology();
|
const MeshTopology & top = mesh.GetTopology();
|
||||||
|
|
||||||
rational = arational;
|
rational = arational;
|
||||||
|
@ -527,6 +527,55 @@ namespace netgen
|
|||||||
|
|
||||||
MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE);
|
MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE);
|
||||||
|
|
||||||
|
PrintMessage ( 3, "Sending domain+bc - names");
|
||||||
|
|
||||||
|
sendrequests.SetSize(6*(ntasks-1));
|
||||||
|
/** Send bc-names **/
|
||||||
|
int nbcs = bcnames.Size();
|
||||||
|
Array<int> bcname_sizes(nbcs);
|
||||||
|
int tot_bcsize = 0;
|
||||||
|
for(int k=0;k<nbcs;k++) {
|
||||||
|
bcname_sizes[k] = bcnames[k]->size();
|
||||||
|
tot_bcsize += bcname_sizes[k];
|
||||||
|
}
|
||||||
|
char compiled_bcnames[tot_bcsize];
|
||||||
|
tot_bcsize = 0;
|
||||||
|
for(int k=0;k<nbcs;k++)
|
||||||
|
for(int j=0;j<bcname_sizes[k];j++)
|
||||||
|
compiled_bcnames[tot_bcsize++] = (*bcnames[k])[j];
|
||||||
|
|
||||||
|
for(int k=1;k<ntasks;k++) {
|
||||||
|
sendrequests[6*(k-1)] = MyMPI_ISend(FlatArray<int>(1, &nbcs), k, MPI_TAG_MESH+6);
|
||||||
|
sendrequests[6*(k-1)+1] = MyMPI_ISend(bcname_sizes, k, MPI_TAG_MESH+6);
|
||||||
|
(void) MPI_Isend(compiled_bcnames, tot_bcsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+2]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Send mat-names **/
|
||||||
|
int nmats = materials.Size();
|
||||||
|
Array<int> mat_sizes(nmats);
|
||||||
|
int tot_matsize = 0;
|
||||||
|
for(int k=0;k<nmats;k++) {
|
||||||
|
mat_sizes[k] = materials[k]->size();
|
||||||
|
tot_matsize += mat_sizes[k];
|
||||||
|
}
|
||||||
|
char compiled_mats[tot_matsize];
|
||||||
|
tot_matsize = 0;
|
||||||
|
for(int k=0;k<nmats;k++)
|
||||||
|
for(int j=0;j<mat_sizes[k];j++)
|
||||||
|
compiled_mats[tot_matsize++] = (*materials[k])[j];
|
||||||
|
for(int k=1;k<ntasks;k++) {
|
||||||
|
sendrequests[6*(k-1)+3] = MyMPI_ISend(FlatArray<int>(1, &nmats), k, MPI_TAG_MESH+6);
|
||||||
|
sendrequests[6*(k-1)+4] = MyMPI_ISend(mat_sizes, k, MPI_TAG_MESH+6);
|
||||||
|
(void) MPI_Isend(compiled_mats, tot_matsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+5]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* now wait ... **/
|
||||||
|
PrintMessage( 3, "now wait for domain+bc - names");
|
||||||
|
|
||||||
|
MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE);
|
||||||
|
|
||||||
|
PrintMessage( 3, "send mesh complete");
|
||||||
|
|
||||||
MPI_Barrier(MPI_COMM_WORLD);
|
MPI_Barrier(MPI_COMM_WORLD);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -698,6 +747,42 @@ namespace netgen
|
|||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Recv bc-names **/
|
||||||
|
int nbcs;
|
||||||
|
MyMPI_Recv(nbcs, 0, MPI_TAG_MESH+6);
|
||||||
|
Array<int> bcs(nbcs);
|
||||||
|
MyMPI_Recv(bcs, 0, MPI_TAG_MESH+6);
|
||||||
|
int size_bc = 0;
|
||||||
|
for(int k=0;k<nbcs;k++)
|
||||||
|
size_bc += bcs[k];
|
||||||
|
char compiled_bcnames[size_bc];
|
||||||
|
MPI_Recv(compiled_bcnames, size_bc, MPI_CHAR, 0, MPI_TAG_MESH+6, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||||
|
|
||||||
|
SetNBCNames(nbcs);
|
||||||
|
int cnt = 0;
|
||||||
|
for(int k=0;k<nbcs;k++) {
|
||||||
|
SetBCName(k, string(&compiled_bcnames[cnt], bcs[k]));
|
||||||
|
cnt += bcs[k];
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Recv mat-names **/
|
||||||
|
int nmats;
|
||||||
|
MyMPI_Recv(nmats, 0, MPI_TAG_MESH+6);
|
||||||
|
Array<int> matsz(nmats);
|
||||||
|
MyMPI_Recv(matsz, 0, MPI_TAG_MESH+6);
|
||||||
|
int size_mats = 0;
|
||||||
|
for(int k=0;k<nmats;k++)
|
||||||
|
size_mats += matsz[k];
|
||||||
|
char compiled_mats[size_mats];
|
||||||
|
MPI_Recv(compiled_mats, size_mats, MPI_CHAR, 0, MPI_TAG_MESH+6, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||||
|
cnt = 0;
|
||||||
|
materials.SetSize(nmats);
|
||||||
|
for(int k=0;k<nmats;k++) {
|
||||||
|
// setmaterial is 1-based ...
|
||||||
|
SetMaterial(k+1, string(&compiled_mats[cnt], matsz[k]));
|
||||||
|
cnt += matsz[k];
|
||||||
|
}
|
||||||
|
|
||||||
MPI_Barrier(MPI_COMM_WORLD);
|
MPI_Barrier(MPI_COMM_WORLD);
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ namespace netgen
|
|||||||
|
|
||||||
void ParallelMeshTopology :: UpdateCoarseGridGlobal ()
|
void ParallelMeshTopology :: UpdateCoarseGridGlobal ()
|
||||||
{
|
{
|
||||||
cout << "updatecoarsegridglobal called" << endl;
|
// cout << "updatecoarsegridglobal called" << endl;
|
||||||
if (id == 0)
|
if (id == 0)
|
||||||
PrintMessage ( 3, "UPDATE GLOBAL COARSEGRID STARTS" );
|
PrintMessage ( 3, "UPDATE GLOBAL COARSEGRID STARTS" );
|
||||||
|
|
||||||
@ -203,7 +203,7 @@ namespace netgen
|
|||||||
|
|
||||||
void ParallelMeshTopology :: UpdateCoarseGrid ()
|
void ParallelMeshTopology :: UpdateCoarseGrid ()
|
||||||
{
|
{
|
||||||
cout << "UpdateCoarseGrid" << endl;
|
// cout << "UpdateCoarseGrid" << endl;
|
||||||
// if (is_updated) return;
|
// if (is_updated) return;
|
||||||
|
|
||||||
Reset();
|
Reset();
|
||||||
@ -241,7 +241,7 @@ namespace netgen
|
|||||||
// update new vertices after mesh-refinement
|
// update new vertices after mesh-refinement
|
||||||
if (mesh.mlbetweennodes.Size() > 0)
|
if (mesh.mlbetweennodes.Size() > 0)
|
||||||
{
|
{
|
||||||
cout << "UpdateCoarseGrid - vertices" << endl;
|
// cout << "UpdateCoarseGrid - vertices" << endl;
|
||||||
int newnv = mesh.mlbetweennodes.Size();
|
int newnv = mesh.mlbetweennodes.Size();
|
||||||
loc2distvert.ChangeSize(mesh.mlbetweennodes.Size());
|
loc2distvert.ChangeSize(mesh.mlbetweennodes.Size());
|
||||||
/*
|
/*
|
||||||
@ -376,7 +376,7 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
|
|
||||||
Array<int> sendarray, recvarray;
|
Array<int> sendarray, recvarray;
|
||||||
cout << "UpdateCoarseGrid - edges" << endl;
|
// cout << "UpdateCoarseGrid - edges" << endl;
|
||||||
|
|
||||||
// static int timerv = NgProfiler::CreateTimer ("UpdateCoarseGrid - ex vertices");
|
// static int timerv = NgProfiler::CreateTimer ("UpdateCoarseGrid - ex vertices");
|
||||||
static int timere = NgProfiler::CreateTimer ("UpdateCoarseGrid - ex edges");
|
static int timere = NgProfiler::CreateTimer ("UpdateCoarseGrid - ex edges");
|
||||||
@ -442,10 +442,10 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cout << "UpdateCoarseGrid - edges mpi-exchange" << endl;
|
// cout << "UpdateCoarseGrid - edges mpi-exchange" << endl;
|
||||||
TABLE<int> recv_edges(ntasks-1);
|
TABLE<int> recv_edges(ntasks-1);
|
||||||
MyMPI_ExchangeTable (send_edges, recv_edges, MPI_TAG_MESH+9, MPI_LocalComm);
|
MyMPI_ExchangeTable (send_edges, recv_edges, MPI_TAG_MESH+9, MPI_LocalComm);
|
||||||
cout << "UpdateCoarseGrid - edges mpi-exchange done" << endl;
|
// cout << "UpdateCoarseGrid - edges mpi-exchange done" << endl;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
for (int dest = 1; dest < ntasks; dest++)
|
for (int dest = 1; dest < ntasks; dest++)
|
||||||
@ -493,7 +493,7 @@ namespace netgen
|
|||||||
|
|
||||||
// MPI_Barrier (MPI_LocalComm);
|
// MPI_Barrier (MPI_LocalComm);
|
||||||
|
|
||||||
cout << "UpdateCoarseGrid - faces" << endl;
|
// cout << "UpdateCoarseGrid - faces" << endl;
|
||||||
if (mesh.GetDimension() == 3)
|
if (mesh.GetDimension() == 3)
|
||||||
{
|
{
|
||||||
NgProfiler::StartTimer (timerf);
|
NgProfiler::StartTimer (timerf);
|
||||||
@ -558,10 +558,10 @@ namespace netgen
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cout << "UpdateCoarseGrid - faces mpi-exchange" << endl;
|
// cout << "UpdateCoarseGrid - faces mpi-exchange" << endl;
|
||||||
TABLE<int> recv_faces(ntasks-1);
|
TABLE<int> recv_faces(ntasks-1);
|
||||||
MyMPI_ExchangeTable (send_faces, recv_faces, MPI_TAG_MESH+9, MPI_LocalComm);
|
MyMPI_ExchangeTable (send_faces, recv_faces, MPI_TAG_MESH+9, MPI_LocalComm);
|
||||||
cout << "UpdateCoarseGrid - faces mpi-exchange done" << endl;
|
// cout << "UpdateCoarseGrid - faces mpi-exchange done" << endl;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
for (int dest = 1; dest < ntasks; dest++)
|
for (int dest = 1; dest < ntasks; dest++)
|
||||||
@ -682,7 +682,7 @@ namespace netgen
|
|||||||
|
|
||||||
NgProfiler::StopTimer (timerf);
|
NgProfiler::StopTimer (timerf);
|
||||||
}
|
}
|
||||||
cout << "UpdateCoarseGrid - done" << endl;
|
// cout << "UpdateCoarseGrid - done" << endl;
|
||||||
|
|
||||||
is_updated = true;
|
is_updated = true;
|
||||||
}
|
}
|
||||||
|
@ -423,26 +423,48 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
|
|||||||
.def("Load", FunctionPointer
|
.def("Load", FunctionPointer
|
||||||
([](Mesh & self, const string & filename)
|
([](Mesh & self, const string & filename)
|
||||||
{
|
{
|
||||||
istream * infile;
|
istream * infile;
|
||||||
|
|
||||||
|
#ifdef PARALLEL
|
||||||
|
MPI_Comm_rank(MPI_COMM_WORLD, &id);
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
|
||||||
|
|
||||||
|
char* buf = nullptr;
|
||||||
|
int strs = 0;
|
||||||
|
if(id==0) {
|
||||||
|
#endif
|
||||||
if (filename.find(".vol.gz") != string::npos)
|
if (filename.find(".vol.gz") != string::npos)
|
||||||
infile = new igzstream (filename.c_str());
|
infile = new igzstream (filename.c_str());
|
||||||
else
|
else
|
||||||
infile = new ifstream (filename.c_str());
|
infile = new ifstream (filename.c_str());
|
||||||
// ifstream input(filename);
|
// ifstream input(filename);
|
||||||
#ifdef PARALLEL
|
#ifdef PARALLEL
|
||||||
// int id;
|
//still inside id==0-bracket...
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &id);
|
self.Load(*infile);
|
||||||
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
|
|
||||||
|
|
||||||
if (id == 0)
|
|
||||||
{
|
|
||||||
self.Load(*infile);
|
|
||||||
self.Distribute();
|
self.Distribute();
|
||||||
|
|
||||||
|
/** Copy the rest of the file into a string (for geometry) **/
|
||||||
|
stringstream geom_part;
|
||||||
|
geom_part << infile->rdbuf();
|
||||||
|
string geom_part_string = geom_part.str();
|
||||||
|
strs = geom_part_string.size();
|
||||||
|
buf = new char[strs];
|
||||||
|
memcpy(buf, geom_part_string.c_str(), strs*sizeof(char));
|
||||||
}
|
}
|
||||||
else
|
else {
|
||||||
{
|
self.SendRecvMesh();
|
||||||
self.SendRecvMesh();
|
}
|
||||||
}
|
|
||||||
|
/** Scatter the geometry-string **/
|
||||||
|
MPI_Bcast(&strs, 1, MPI_INT, 0, MPI_COMM_WORLD);
|
||||||
|
if(id!=0)
|
||||||
|
buf = new char[strs];
|
||||||
|
MPI_Bcast(buf, strs, MPI_CHAR, 0, MPI_COMM_WORLD);
|
||||||
|
if(id==0)
|
||||||
|
delete infile;
|
||||||
|
infile = new istringstream(string((const char*)buf, (size_t)strs));
|
||||||
|
delete[] buf;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
self.Load(*infile);
|
self.Load(*infile);
|
||||||
#endif
|
#endif
|
||||||
@ -456,6 +478,10 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!ng_geometry)
|
||||||
|
ng_geometry = make_shared<NetgenGeometry>();
|
||||||
|
self.SetGeometry(ng_geometry);
|
||||||
|
delete infile;
|
||||||
}))
|
}))
|
||||||
// static_cast<void(Mesh::*)(const string & name)>(&Mesh::Load))
|
// static_cast<void(Mesh::*)(const string & name)>(&Mesh::Load))
|
||||||
.def("Save", static_cast<void(Mesh::*)(const string & name)const>(&Mesh::Save))
|
.def("Save", static_cast<void(Mesh::*)(const string & name)const>(&Mesh::Save))
|
||||||
|
Loading…
Reference in New Issue
Block a user