Merge branch 'master' of data.asc.tuwien.ac.at:jschoeberl/netgen

This commit is contained in:
Joachim Schöberl 2017-06-23 22:10:14 +02:00
commit 5d62946130
6 changed files with 154 additions and 44 deletions

View File

@ -65,6 +65,11 @@ elseif(WIN32)
LOG_DOWNLOAD 1
)
set (TK_INCLUDE_PATH ${CMAKE_INSTALL_PREFIX}/include)
set (TCL_INCLUDE_PATH ${CMAKE_INSTALL_PREFIX}/include)
set (TCL_LIBRARY ${CMAKE_INSTALL_PREFIX}/lib/tcl86.lib)
set (TK_LIBRARY ${CMAKE_INSTALL_PREFIX}/lib/tk86.lib)
list(APPEND NETGEN_DEPENDENCIES project_win_extlibs)
else(WIN32)
find_package(TCL 8.5 REQUIRED)

View File

@ -16,35 +16,29 @@ namespace netgen
void MyMPI_SendCmd (const char * cmd)
{
char buf[10000];
strcpy (buf, cmd);
// MPI_Bcast (&buf, 100, MPI_CHAR, 0, MPI_COMM_WORLD);
int ntasks;
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if(ntasks==1)
return;
for (int dest = 1; dest < ntasks; dest++)
MPI_Send( &buf, 10000, MPI_CHAR, dest, MPI_TAG_CMD, MPI_COMM_WORLD);
MPI_Send( cmd, (strlen(cmd)+1), MPI_CHAR, dest, MPI_TAG_CMD, MPI_COMM_WORLD);
}
string MyMPI_RecvCmd ()
{
char buf[10000];
// MPI_Bcast (&buf, 100, MPI_CHAR, 0, MPI_COMM_WORLD);
// VT_OFF();
MPI_Status status;
int flag;
do
{
MPI_Iprobe (0, MPI_TAG_CMD, MPI_COMM_WORLD, &flag, &status);
if (!flag)
{
VT_TRACER ("sleep");
usleep (1000);
}
}
while (!flag);
// VT_ON();
int size_of_msg = -1;
MPI_Recv( &buf, 10000, MPI_CHAR, 0, MPI_TAG_CMD, MPI_COMM_WORLD, &status);
MPI_Probe(0, MPI_TAG_CMD, MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_CHAR, &size_of_msg);
//char* buf = (char*)malloc(size_of_msg*sizeof(char));
char buf[100000]; //1MB should be enough...
MPI_Recv( &buf, size_of_msg, MPI_CHAR, 0, MPI_TAG_CMD, MPI_COMM_WORLD, &status);
return string(buf);
}

View File

@ -577,8 +577,8 @@ namespace netgen
PrintMessage (1, "Curve elements, order = ", aorder);
if (rational) PrintMessage (1, "curved elements with rational splines");
if (working)
const_cast<Mesh&> (mesh).UpdateTopology();
// if (working)
const_cast<Mesh&> (mesh).UpdateTopology();
const MeshTopology & top = mesh.GetTopology();
rational = arational;

View File

@ -527,6 +527,55 @@ namespace netgen
MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE);
PrintMessage ( 3, "Sending domain+bc - names");
sendrequests.SetSize(6*(ntasks-1));
/** Send bc-names **/
int nbcs = bcnames.Size();
Array<int> bcname_sizes(nbcs);
int tot_bcsize = 0;
for(int k=0;k<nbcs;k++) {
bcname_sizes[k] = bcnames[k]->size();
tot_bcsize += bcname_sizes[k];
}
char compiled_bcnames[tot_bcsize];
tot_bcsize = 0;
for(int k=0;k<nbcs;k++)
for(int j=0;j<bcname_sizes[k];j++)
compiled_bcnames[tot_bcsize++] = (*bcnames[k])[j];
for(int k=1;k<ntasks;k++) {
sendrequests[6*(k-1)] = MyMPI_ISend(FlatArray<int>(1, &nbcs), k, MPI_TAG_MESH+6);
sendrequests[6*(k-1)+1] = MyMPI_ISend(bcname_sizes, k, MPI_TAG_MESH+6);
(void) MPI_Isend(compiled_bcnames, tot_bcsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+2]);
}
/** Send mat-names **/
int nmats = materials.Size();
Array<int> mat_sizes(nmats);
int tot_matsize = 0;
for(int k=0;k<nmats;k++) {
mat_sizes[k] = materials[k]->size();
tot_matsize += mat_sizes[k];
}
char compiled_mats[tot_matsize];
tot_matsize = 0;
for(int k=0;k<nmats;k++)
for(int j=0;j<mat_sizes[k];j++)
compiled_mats[tot_matsize++] = (*materials[k])[j];
for(int k=1;k<ntasks;k++) {
sendrequests[6*(k-1)+3] = MyMPI_ISend(FlatArray<int>(1, &nmats), k, MPI_TAG_MESH+6);
sendrequests[6*(k-1)+4] = MyMPI_ISend(mat_sizes, k, MPI_TAG_MESH+6);
(void) MPI_Isend(compiled_mats, tot_matsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+5]);
}
/* now wait ... **/
PrintMessage( 3, "now wait for domain+bc - names");
MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE);
PrintMessage( 3, "send mesh complete");
MPI_Barrier(MPI_COMM_WORLD);
}
@ -698,6 +747,42 @@ namespace netgen
}
}
/** Recv bc-names **/
int nbcs;
MyMPI_Recv(nbcs, 0, MPI_TAG_MESH+6);
Array<int> bcs(nbcs);
MyMPI_Recv(bcs, 0, MPI_TAG_MESH+6);
int size_bc = 0;
for(int k=0;k<nbcs;k++)
size_bc += bcs[k];
char compiled_bcnames[size_bc];
MPI_Recv(compiled_bcnames, size_bc, MPI_CHAR, 0, MPI_TAG_MESH+6, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
SetNBCNames(nbcs);
int cnt = 0;
for(int k=0;k<nbcs;k++) {
SetBCName(k, string(&compiled_bcnames[cnt], bcs[k]));
cnt += bcs[k];
}
/** Recv mat-names **/
int nmats;
MyMPI_Recv(nmats, 0, MPI_TAG_MESH+6);
Array<int> matsz(nmats);
MyMPI_Recv(matsz, 0, MPI_TAG_MESH+6);
int size_mats = 0;
for(int k=0;k<nmats;k++)
size_mats += matsz[k];
char compiled_mats[size_mats];
MPI_Recv(compiled_mats, size_mats, MPI_CHAR, 0, MPI_TAG_MESH+6, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
cnt = 0;
materials.SetSize(nmats);
for(int k=0;k<nmats;k++) {
// setmaterial is 1-based ...
SetMaterial(k+1, string(&compiled_mats[cnt], matsz[k]));
cnt += matsz[k];
}
MPI_Barrier(MPI_COMM_WORLD);

View File

@ -112,7 +112,7 @@ namespace netgen
void ParallelMeshTopology :: UpdateCoarseGridGlobal ()
{
cout << "updatecoarsegridglobal called" << endl;
// cout << "updatecoarsegridglobal called" << endl;
if (id == 0)
PrintMessage ( 3, "UPDATE GLOBAL COARSEGRID STARTS" );
@ -203,7 +203,7 @@ namespace netgen
void ParallelMeshTopology :: UpdateCoarseGrid ()
{
cout << "UpdateCoarseGrid" << endl;
// cout << "UpdateCoarseGrid" << endl;
// if (is_updated) return;
Reset();
@ -241,7 +241,7 @@ namespace netgen
// update new vertices after mesh-refinement
if (mesh.mlbetweennodes.Size() > 0)
{
cout << "UpdateCoarseGrid - vertices" << endl;
// cout << "UpdateCoarseGrid - vertices" << endl;
int newnv = mesh.mlbetweennodes.Size();
loc2distvert.ChangeSize(mesh.mlbetweennodes.Size());
/*
@ -376,7 +376,7 @@ namespace netgen
}
Array<int> sendarray, recvarray;
cout << "UpdateCoarseGrid - edges" << endl;
// cout << "UpdateCoarseGrid - edges" << endl;
// static int timerv = NgProfiler::CreateTimer ("UpdateCoarseGrid - ex vertices");
static int timere = NgProfiler::CreateTimer ("UpdateCoarseGrid - ex edges");
@ -442,10 +442,10 @@ namespace netgen
}
}
cout << "UpdateCoarseGrid - edges mpi-exchange" << endl;
// cout << "UpdateCoarseGrid - edges mpi-exchange" << endl;
TABLE<int> recv_edges(ntasks-1);
MyMPI_ExchangeTable (send_edges, recv_edges, MPI_TAG_MESH+9, MPI_LocalComm);
cout << "UpdateCoarseGrid - edges mpi-exchange done" << endl;
// cout << "UpdateCoarseGrid - edges mpi-exchange done" << endl;
/*
for (int dest = 1; dest < ntasks; dest++)
@ -493,7 +493,7 @@ namespace netgen
// MPI_Barrier (MPI_LocalComm);
cout << "UpdateCoarseGrid - faces" << endl;
// cout << "UpdateCoarseGrid - faces" << endl;
if (mesh.GetDimension() == 3)
{
NgProfiler::StartTimer (timerf);
@ -558,10 +558,10 @@ namespace netgen
}
}
cout << "UpdateCoarseGrid - faces mpi-exchange" << endl;
// cout << "UpdateCoarseGrid - faces mpi-exchange" << endl;
TABLE<int> recv_faces(ntasks-1);
MyMPI_ExchangeTable (send_faces, recv_faces, MPI_TAG_MESH+9, MPI_LocalComm);
cout << "UpdateCoarseGrid - faces mpi-exchange done" << endl;
// cout << "UpdateCoarseGrid - faces mpi-exchange done" << endl;
/*
for (int dest = 1; dest < ntasks; dest++)
@ -682,7 +682,7 @@ namespace netgen
NgProfiler::StopTimer (timerf);
}
cout << "UpdateCoarseGrid - done" << endl;
// cout << "UpdateCoarseGrid - done" << endl;
is_updated = true;
}

View File

@ -423,26 +423,48 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
.def("Load", FunctionPointer
([](Mesh & self, const string & filename)
{
istream * infile;
istream * infile;
#ifdef PARALLEL
MPI_Comm_rank(MPI_COMM_WORLD, &id);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
char* buf = nullptr;
int strs = 0;
if(id==0) {
#endif
if (filename.find(".vol.gz") != string::npos)
infile = new igzstream (filename.c_str());
else
infile = new ifstream (filename.c_str());
// ifstream input(filename);
#ifdef PARALLEL
// int id;
MPI_Comm_rank(MPI_COMM_WORLD, &id);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (id == 0)
{
self.Load(*infile);
//still inside id==0-bracket...
self.Load(*infile);
self.Distribute();
/** Copy the rest of the file into a string (for geometry) **/
stringstream geom_part;
geom_part << infile->rdbuf();
string geom_part_string = geom_part.str();
strs = geom_part_string.size();
buf = new char[strs];
memcpy(buf, geom_part_string.c_str(), strs*sizeof(char));
}
else
{
self.SendRecvMesh();
}
else {
self.SendRecvMesh();
}
/** Scatter the geometry-string **/
MPI_Bcast(&strs, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(id!=0)
buf = new char[strs];
MPI_Bcast(buf, strs, MPI_CHAR, 0, MPI_COMM_WORLD);
if(id==0)
delete infile;
infile = new istringstream(string((const char*)buf, (size_t)strs));
delete[] buf;
#else
self.Load(*infile);
#endif
@ -456,6 +478,10 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m)
break;
}
}
if (!ng_geometry)
ng_geometry = make_shared<NetgenGeometry>();
self.SetGeometry(ng_geometry);
delete infile;
}))
// static_cast<void(Mesh::*)(const string & name)>(&Mesh::Load))
.def("Save", static_cast<void(Mesh::*)(const string & name)const>(&Mesh::Save))