28
42
GraphPartition::edgecut(graph, num_partitions, partitions.values());
30
44
//-----------------------------------------------------------------------------
45
void MeshPartition::partition(Mesh& mesh, MeshFunction<uint>& partitions)
47
partitionCommonMetis(mesh, partitions, 0);
49
//-----------------------------------------------------------------------------
50
void MeshPartition::partition(Mesh& mesh, MeshFunction<uint>& partitions,
51
MeshFunction<uint>& weight)
53
partitionCommonMetis(mesh, partitions, &weight);
55
//-----------------------------------------------------------------------------
57
//-----------------------------------------------------------------------------
58
void MeshPartition::partitionCommonMetis(Mesh& mesh,
59
MeshFunction<uint>& partitions,
60
MeshFunction<uint>* weight)
63
// Metis assumes vertices numbered from process 0
64
MeshRenumber::renumber_vertices(mesh);
67
int numflag = 0; // C-style numbering
71
wgtflag = 2; // Weights on vertices only
72
ncon = 1; // One weight per vertex
75
wgtflag = 0; // Turn off graph weights
76
ncon = 0; // No weights on vertices
79
// Duplicate MPI communicator
81
MPI_Comm_dup(MPI::DOLFIN_COMM, &comm);
83
// Get information about the PE
85
MPI_Comm_size(MPI::DOLFIN_COMM, &size);
86
MPI_Comm_rank(MPI::DOLFIN_COMM, &rank);
89
idxtype *elmdist = new idxtype[size + 1];
90
int ncells = mesh.numCells();
91
elmdist[rank] = ncells;
92
MPI_Allgather(&elmdist[rank], 1, MPI_INT, elmdist,
93
1, MPI_INT, MPI::DOLFIN_COMM);
95
idxtype *elmwgt = NULL;
97
elmwgt = new idxtype[ncells];
98
for(CellIterator c(mesh); !c.end(); ++c)
99
elmwgt[c->index()] = static_cast<idxtype>(weight->get(*c));
102
int sum_elm = elmdist[0];
105
for(int i=1;i<size+1;i++){
106
tmp_elm = elmdist[i];
107
elmdist[i] = sum_elm;
108
sum_elm = tmp_elm + sum_elm;
111
int nvertices = mesh.type().numVertices(mesh.topology().dim());
112
int ncnodes = nvertices - 1 ;
114
idxtype *eptr = new idxtype[ncells + 1];
116
for(uint i=1;i < (mesh.numCells() + 1);i++)
117
eptr[i] = eptr[i-1] + nvertices;
119
int *eind = new idxtype[nvertices * ncells];
121
for(CellIterator c(mesh); !c.end(); ++c)
122
for(VertexIterator v(*c); !v.end(); ++v)
123
eind[i++] = mesh.distdata().get_global(*v);
125
idxtype *part = new idxtype[ncells];
127
float *tpwgts = new float[size];
128
for(i=0; i<size; i++)
129
tpwgts[i] = 1.0/(float)(size);
132
int options[3] = {1, 0, 15};
135
ParMETIS_V3_PartMeshKway(elmdist, eptr, eind, elmwgt, &wgtflag,&numflag,
136
&ncon,&ncnodes,&size, tpwgts, &ubvec,
137
options, &edgecut, part,&comm);
146
// Create partition function
147
partitions.init(mesh, mesh.topology().dim());
149
for(CellIterator cell(mesh); !cell.end(); ++cell)
150
partitions.set(*cell, (uint) part[ cell->index() ]);
153
MPI_Comm_free(&comm);
155
//-----------------------------------------------------------------------------
156
void MeshPartition::partition_geom(Mesh& mesh, MeshFunction<uint>& partitions)
158
// Duplicate MPI communicator
160
MPI_Comm_dup(MPI::DOLFIN_COMM, &comm);
163
// Get information about the PE
164
MPI_Comm_size(MPI::DOLFIN_COMM, &size);
165
MPI_Comm_rank(MPI::DOLFIN_COMM, &rank);
167
// Gather number of locally stored vertices for each processor
168
idxtype *vtxdist = new idxtype[size+1];
169
vtxdist[rank] = static_cast<idxtype> (mesh.numVertices());
172
MPI_Allgather(&vtxdist[rank], 1, MPI_INT, vtxdist, 1,
173
MPI_INT, MPI::DOLFIN_COMM);
176
int sum = vtxdist[0];
178
for(i=1;i<size+1;i++){
184
idxtype *part = new idxtype[mesh.numVertices()];
185
int gdim = static_cast<int>( mesh.geometry().dim() );
186
float *xdy = new float[gdim * mesh.numVertices()];
189
for(VertexIterator vertex(mesh); !vertex.end(); ++vertex) {
190
xdy[i] = static_cast<float>(vertex->point().x());
191
xdy[i+1] = static_cast<float>(vertex->point().y());
193
xdy[i+2] = static_cast<float>(vertex->point().z());
197
ParMETIS_V3_PartGeom(vtxdist,&gdim,xdy,part,&comm);
199
// Create meshfunction from partitions
200
partitions.init(mesh,0);
201
for(VertexIterator vertex(mesh); !vertex.end(); ++vertex)
202
partitions.set(*vertex, static_cast<uint>( part[vertex->index()]) );
207
MPI_Comm_free(&comm);
209
//-----------------------------------------------------------------------------
211
//-----------------------------------------------------------------------------
212
void MeshPartition::partitionCommonMetis(Mesh& mesh,
213
MeshFunction<uint>& partitions,
214
MeshFunction<uint>* weight)
216
error("ParMetis needs MPI");
218
//-----------------------------------------------------------------------------
219
void MeshPartition::partition_geom(Mesh& mesh, MeshFunction<uint>& partitions)
221
error("ParMetis needs MPI");
223
//-----------------------------------------------------------------------------