diff --git a/cmake/external_projects/tcltk.cmake b/cmake/external_projects/tcltk.cmake index 6be407b3..7a939539 100644 --- a/cmake/external_projects/tcltk.cmake +++ b/cmake/external_projects/tcltk.cmake @@ -65,6 +65,11 @@ elseif(WIN32) LOG_DOWNLOAD 1 ) + set (TK_INCLUDE_PATH ${CMAKE_INSTALL_PREFIX}/include) + set (TCL_INCLUDE_PATH ${CMAKE_INSTALL_PREFIX}/include) + set (TCL_LIBRARY ${CMAKE_INSTALL_PREFIX}/lib/tcl86.lib) + set (TK_LIBRARY ${CMAKE_INSTALL_PREFIX}/lib/tk86.lib) + list(APPEND NETGEN_DEPENDENCIES project_win_extlibs) else(WIN32) find_package(TCL 8.5 REQUIRED) diff --git a/libsrc/general/mpi_interface.cpp b/libsrc/general/mpi_interface.cpp index 36df5542..8ea6a3c5 100644 --- a/libsrc/general/mpi_interface.cpp +++ b/libsrc/general/mpi_interface.cpp @@ -16,35 +16,29 @@ namespace netgen void MyMPI_SendCmd (const char * cmd) { - char buf[10000]; - strcpy (buf, cmd); - // MPI_Bcast (&buf, 100, MPI_CHAR, 0, MPI_COMM_WORLD); + int ntasks; + MPI_Comm_size(MPI_COMM_WORLD, &ntasks); + if(ntasks==1) + return; + for (int dest = 1; dest < ntasks; dest++) - MPI_Send( &buf, 10000, MPI_CHAR, dest, MPI_TAG_CMD, MPI_COMM_WORLD); + MPI_Send( cmd, (strlen(cmd)+1), MPI_CHAR, dest, MPI_TAG_CMD, MPI_COMM_WORLD); } string MyMPI_RecvCmd () { - char buf[10000]; - // MPI_Bcast (&buf, 100, MPI_CHAR, 0, MPI_COMM_WORLD); - - // VT_OFF(); MPI_Status status; int flag; - do - { - MPI_Iprobe (0, MPI_TAG_CMD, MPI_COMM_WORLD, &flag, &status); - if (!flag) - { - VT_TRACER ("sleep"); - usleep (1000); - } - } - while (!flag); - // VT_ON(); + int size_of_msg = -1; - MPI_Recv( &buf, 10000, MPI_CHAR, 0, MPI_TAG_CMD, MPI_COMM_WORLD, &status); + MPI_Probe(0, MPI_TAG_CMD, MPI_COMM_WORLD, &status); + MPI_Get_count(&status, MPI_CHAR, &size_of_msg); + + //char* buf = (char*)malloc(size_of_msg*sizeof(char)); + char buf[100000]; //1MB should be enough... + + MPI_Recv( &buf, size_of_msg, MPI_CHAR, 0, MPI_TAG_CMD, MPI_COMM_WORLD, &status); return string(buf); } diff --git a/libsrc/meshing/curvedelems.cpp b/libsrc/meshing/curvedelems.cpp index 4b2238a7..16655e78 100644 --- a/libsrc/meshing/curvedelems.cpp +++ b/libsrc/meshing/curvedelems.cpp @@ -577,8 +577,8 @@ namespace netgen PrintMessage (1, "Curve elements, order = ", aorder); if (rational) PrintMessage (1, "curved elements with rational splines"); - if (working) - const_cast (mesh).UpdateTopology(); + // if (working) + const_cast (mesh).UpdateTopology(); const MeshTopology & top = mesh.GetTopology(); rational = arational; diff --git a/libsrc/meshing/parallelmesh.cpp b/libsrc/meshing/parallelmesh.cpp index a9a84207..50f64c1c 100644 --- a/libsrc/meshing/parallelmesh.cpp +++ b/libsrc/meshing/parallelmesh.cpp @@ -527,6 +527,55 @@ namespace netgen MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE); + PrintMessage ( 3, "Sending domain+bc - names"); + + sendrequests.SetSize(6*(ntasks-1)); + /** Send bc-names **/ + int nbcs = bcnames.Size(); + Array bcname_sizes(nbcs); + int tot_bcsize = 0; + for(int k=0;ksize(); + tot_bcsize += bcname_sizes[k]; + } + char compiled_bcnames[tot_bcsize]; + tot_bcsize = 0; + for(int k=0;k(1, &nbcs), k, MPI_TAG_MESH+6); + sendrequests[6*(k-1)+1] = MyMPI_ISend(bcname_sizes, k, MPI_TAG_MESH+6); + (void) MPI_Isend(compiled_bcnames, tot_bcsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+2]); + } + + /** Send mat-names **/ + int nmats = materials.Size(); + Array mat_sizes(nmats); + int tot_matsize = 0; + for(int k=0;ksize(); + tot_matsize += mat_sizes[k]; + } + char compiled_mats[tot_matsize]; + tot_matsize = 0; + for(int k=0;k(1, &nmats), k, MPI_TAG_MESH+6); + sendrequests[6*(k-1)+4] = MyMPI_ISend(mat_sizes, k, MPI_TAG_MESH+6); + (void) MPI_Isend(compiled_mats, tot_matsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+5]); + } + + /* now wait ... **/ + PrintMessage( 3, "now wait for domain+bc - names"); + + MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE); + + PrintMessage( 3, "send mesh complete"); + MPI_Barrier(MPI_COMM_WORLD); } @@ -698,6 +747,42 @@ namespace netgen } } + + /** Recv bc-names **/ + int nbcs; + MyMPI_Recv(nbcs, 0, MPI_TAG_MESH+6); + Array bcs(nbcs); + MyMPI_Recv(bcs, 0, MPI_TAG_MESH+6); + int size_bc = 0; + for(int k=0;k matsz(nmats); + MyMPI_Recv(matsz, 0, MPI_TAG_MESH+6); + int size_mats = 0; + for(int k=0;k 0) { - cout << "UpdateCoarseGrid - vertices" << endl; + // cout << "UpdateCoarseGrid - vertices" << endl; int newnv = mesh.mlbetweennodes.Size(); loc2distvert.ChangeSize(mesh.mlbetweennodes.Size()); /* @@ -376,7 +376,7 @@ namespace netgen } Array sendarray, recvarray; - cout << "UpdateCoarseGrid - edges" << endl; + // cout << "UpdateCoarseGrid - edges" << endl; // static int timerv = NgProfiler::CreateTimer ("UpdateCoarseGrid - ex vertices"); static int timere = NgProfiler::CreateTimer ("UpdateCoarseGrid - ex edges"); @@ -442,10 +442,10 @@ namespace netgen } } - cout << "UpdateCoarseGrid - edges mpi-exchange" << endl; + // cout << "UpdateCoarseGrid - edges mpi-exchange" << endl; TABLE recv_edges(ntasks-1); MyMPI_ExchangeTable (send_edges, recv_edges, MPI_TAG_MESH+9, MPI_LocalComm); - cout << "UpdateCoarseGrid - edges mpi-exchange done" << endl; + // cout << "UpdateCoarseGrid - edges mpi-exchange done" << endl; /* for (int dest = 1; dest < ntasks; dest++) @@ -493,7 +493,7 @@ namespace netgen // MPI_Barrier (MPI_LocalComm); - cout << "UpdateCoarseGrid - faces" << endl; + // cout << "UpdateCoarseGrid - faces" << endl; if (mesh.GetDimension() == 3) { NgProfiler::StartTimer (timerf); @@ -558,10 +558,10 @@ namespace netgen } } - cout << "UpdateCoarseGrid - faces mpi-exchange" << endl; + // cout << "UpdateCoarseGrid - faces mpi-exchange" << endl; TABLE recv_faces(ntasks-1); MyMPI_ExchangeTable (send_faces, recv_faces, MPI_TAG_MESH+9, MPI_LocalComm); - cout << "UpdateCoarseGrid - faces mpi-exchange done" << endl; + // cout << "UpdateCoarseGrid - faces mpi-exchange done" << endl; /* for (int dest = 1; dest < ntasks; dest++) @@ -682,7 +682,7 @@ namespace netgen NgProfiler::StopTimer (timerf); } - cout << "UpdateCoarseGrid - done" << endl; + // cout << "UpdateCoarseGrid - done" << endl; is_updated = true; } diff --git a/libsrc/meshing/python_mesh.cpp b/libsrc/meshing/python_mesh.cpp index 1ff9bf6c..7315bc7b 100644 --- a/libsrc/meshing/python_mesh.cpp +++ b/libsrc/meshing/python_mesh.cpp @@ -423,26 +423,48 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m) .def("Load", FunctionPointer ([](Mesh & self, const string & filename) { - istream * infile; + istream * infile; + +#ifdef PARALLEL + MPI_Comm_rank(MPI_COMM_WORLD, &id); + MPI_Comm_size(MPI_COMM_WORLD, &ntasks); + + char* buf = nullptr; + int strs = 0; + if(id==0) { +#endif if (filename.find(".vol.gz") != string::npos) infile = new igzstream (filename.c_str()); else infile = new ifstream (filename.c_str()); // ifstream input(filename); #ifdef PARALLEL - // int id; - MPI_Comm_rank(MPI_COMM_WORLD, &id); - MPI_Comm_size(MPI_COMM_WORLD, &ntasks); - - if (id == 0) - { - self.Load(*infile); + //still inside id==0-bracket... + self.Load(*infile); self.Distribute(); + + /** Copy the rest of the file into a string (for geometry) **/ + stringstream geom_part; + geom_part << infile->rdbuf(); + string geom_part_string = geom_part.str(); + strs = geom_part_string.size(); + buf = new char[strs]; + memcpy(buf, geom_part_string.c_str(), strs*sizeof(char)); } - else - { - self.SendRecvMesh(); - } + else { + self.SendRecvMesh(); + } + + /** Scatter the geometry-string **/ + MPI_Bcast(&strs, 1, MPI_INT, 0, MPI_COMM_WORLD); + if(id!=0) + buf = new char[strs]; + MPI_Bcast(buf, strs, MPI_CHAR, 0, MPI_COMM_WORLD); + if(id==0) + delete infile; + infile = new istringstream(string((const char*)buf, (size_t)strs)); + delete[] buf; + #else self.Load(*infile); #endif @@ -456,6 +478,10 @@ DLL_HEADER void ExportNetgenMeshing(py::module &m) break; } } + if (!ng_geometry) + ng_geometry = make_shared(); + self.SetGeometry(ng_geometry); + delete infile; })) // static_cast(&Mesh::Load)) .def("Save", static_cast(&Mesh::Save))