diff --git a/libsrc/meshing/parallelmesh.cpp b/libsrc/meshing/parallelmesh.cpp index 799f8bb3..2f565a2f 100644 --- a/libsrc/meshing/parallelmesh.cpp +++ b/libsrc/meshing/parallelmesh.cpp @@ -527,6 +527,55 @@ namespace netgen MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE); + PrintMessage ( 3, "Sending domain+bc - names"); + + sendrequests.SetSize(6*(ntasks-1)); + /** Send bc-names **/ + int nbcs = bcnames.Size(); + Array bcname_sizes(nbcs); + int tot_bcsize = 0; + for(int k=0;ksize(); + tot_bcsize += bcname_sizes[k]; + } + char compiled_bcnames[tot_bcsize]; + tot_bcsize = 0; + for(int k=0;k(1, &nbcs), k, MPI_TAG_MESH+6); + sendrequests[6*(k-1)+1] = MyMPI_ISend(bcname_sizes, k, MPI_TAG_MESH+6); + (void) MPI_Isend(compiled_bcnames, tot_bcsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+2]); + } + + /** Send mat-names **/ + int nmats = materials.Size(); + Array mat_sizes(nmats); + int tot_matsize = 0; + for(int k=0;ksize(); + tot_matsize += mat_sizes[k]; + } + char compiled_mats[tot_matsize]; + tot_matsize = 0; + for(int k=0;k(1, &nmats), k, MPI_TAG_MESH+6); + sendrequests[6*(k-1)+4] = MyMPI_ISend(mat_sizes, k, MPI_TAG_MESH+6); + (void) MPI_Isend(compiled_mats, tot_matsize, MPI_CHAR, k, MPI_TAG_MESH+6, MPI_COMM_WORLD, &sendrequests[6*(k-1)+5]); + } + + /* now wait ... **/ + PrintMessage( 3, "now wait for domain+bc - names"); + + MPI_Waitall (sendrequests.Size(), &sendrequests[0], MPI_STATUS_IGNORE); + + PrintMessage( 3, "send mesh complete"); + MPI_Barrier(MPI_COMM_WORLD); } @@ -698,7 +747,43 @@ namespace netgen } } + + /** Recv bc-names **/ + int nbcs; + MyMPI_Recv(nbcs, 0, MPI_TAG_MESH+6); + Array bcs(nbcs); + MyMPI_Recv(bcs, 0, MPI_TAG_MESH+6); + int size_bc = 0; + for(int k=0;k matsz(nmats); + MyMPI_Recv(matsz, 0, MPI_TAG_MESH+6); + int size_mats = 0; + for(int k=0;k