182
216
// Either lookup pretabulated values (if build() has been called)
183
217
// or ask the ufc::dof_map to tabulate the values
187
for (uint i = 0; i < local_dimension(); i++)
188
dofs[i] = dof_map[cell_index][i];
189
//memcpy(dofs, dof_map[cell_index], sizeof(uint)*local_dimension()); // FIXME: Maybe memcpy() can be used to speed this up? Test this!
192
ufc_dof_map->tabulate_dofs(dofs, ufc_mesh, ufc_cell);
194
//-----------------------------------------------------------------------------
195
void DofMap::build(UFC& ufc)
197
printf("BUILDING\n");
199
// Initialize new dof map
202
dof_map = new uint*[dolfin_mesh.numCells()];
205
uint current_dof = 0;
206
for (uint p = 0; p < MPI::numProcesses(); ++p)
209
for (CellIterator c(dolfin_mesh); !c.end(); ++c)
211
// if cell in partition belonging to process p
212
if ((*partitions)(*c) != p)
215
dof_map[c->index()] = new uint[local_dimension()];
216
//dolfin_debug2("cpu %d building cell %d", MPI::processNumber(), c->index());
218
ufc_dof_map->tabulate_dofs(ufc.dofs[0], ufc.mesh, ufc.cell);
220
for (uint i=0; i < ufc_dof_map->local_dimension(); ++i)
222
const uint dof = ufc.dofs[0][i];
223
//dolfin_debug3("ufc.dofs[%d][%d] = %d", 0, MPI::processNumber(), ufc.dofs[0][i]);
225
std::map<uint, uint>::iterator it = map.find(dof);
228
//dolfin_debug2("cpu %d dof %d already computed", MPI::processNumber(), it->second);
229
dof_map[c->index()][i] = it->second;
233
dof_map[c->index()][i] = current_dof;
234
map[dof] = current_dof++;
240
//-----------------------------------------------------------------------------
241
std::map<dolfin::uint, dolfin::uint> DofMap::getMap() const
221
Cell c(dolfin_mesh, cell_index);
222
for (uint i = 0; i < local_dimension(); i++)
223
dofs[i] = dolfin_mesh.distdata().get_global(c.entities(0)[i], 0);
227
*dofs = dolfin_mesh.distdata().get_cell_global(cell_index);
229
else if(_type_ == 2 && v_map)
231
Cell c(dolfin_mesh, cell_index);
232
uint gdim = ufc_dof_map->geometric_dimension();
233
uint num_entities = c.numEntities(0);
234
dolfin_assert(gdim * num_entities == local_dimension());
235
for (uint k = 0; k < gdim; k++)
236
for (uint i = 0; i < num_entities; i++)
237
dofs[i + k * (gdim + 1)] = v_map[c.entities(0)[i]] + k;
241
uint gdim = ufc_dof_map->geometric_dimension();
242
dolfin_assert(local_dimension() == gdim);
243
for(uint i = 0; i < gdim; i++)
244
dofs[i] = (cell_index + i * dolfin_mesh.numCells()) + _offset_;
248
uint offset = local_dimension() * cell_index;
249
memcpy(dofs, &dof_map[offset], sizeof(uint)*local_dimension());
252
ufc_dof_map->tabulate_dofs(dofs, ufc_mesh, ufc_cell);
254
//-----------------------------------------------------------------------------
255
void DofMap::tabulate_dofs(uint* dofs, const ufc::cell& ufc_cell, uint cell_index) const
257
// Either lookup pretabulated values (if build() has been called)
258
// or ask the ufc::dof_map to tabulate the values
261
Cell c(dolfin_mesh, cell_index);
262
for (uint i = 0; i < local_dimension(); i++)
263
dofs[i] = dolfin_mesh.distdata().get_global(c.entities(0)[i], 0);
267
*dofs = dolfin_mesh.distdata().get_cell_global(cell_index);
269
else if(_type_ == 2 && v_map)
271
Cell c(dolfin_mesh, cell_index);
272
uint gdim = ufc_dof_map->geometric_dimension();
273
uint num_entities = c.numEntities(0);
274
for (uint k = 0; k < gdim; k++)
275
for (uint i = 0; i < num_entities; i++)
276
dofs[i + k * (gdim + 1)] = v_map[c.entities(0)[i]] + k;
280
uint gdim = ufc_dof_map->geometric_dimension();
281
for(uint i = 0; i < gdim; i++)
282
dofs[i] = (cell_index + i * dolfin_mesh.numCells()) + _offset_;
287
uint offset = local_dimension() * cell_index;
288
memcpy(dofs, &dof_map[offset], sizeof(uint)*local_dimension());
291
ufc_dof_map->tabulate_dofs(dofs, ufc_mesh, ufc_cell);
293
}//-----------------------------------------------------------------------------
303
if(MPI::numProcesses() == 1) {
305
uint *dofs = new uint[local_dimension()];
307
dof_map = new uint*[dolfin_mesh.numCells()];
311
uint tt = local_dimension() / ufc_dof_map->geometric_dimension();
312
for(CellIterator c(dolfin_mesh); !c.end(); ++c) {
314
dof_map[c->index()] = new uint[local_dimension()];
315
ufc.update(*c, dolfin_mesh.distdata());
317
for(uint j = 0; j < ufc.form.rank(); j++) {
318
ufc_dof_map->tabulate_dofs(dofs, ufc.mesh, ufc.cell);
321
for(uint i = 0; i < local_dimension(); i++) {
322
const uint dof = dofs[i];
324
std::map<uint, uint>::iterator it = map.find(dof);
326
dof_map[c->index()][i] = it->second;
328
dof_map[c->index()][i] = num;
331
dof_map[c->index()][i] /= 2;
338
for(uint i = 0; i < tt; i++) {
339
for(uint k = 0; k < ufc_dof_map->geometric_dimension(); k++) {
340
const uint dof = dofs[i + k * tt];
342
std::map<uint, uint>::iterator it = map.find(dof);
344
dof_map[c->index()][i + k * tt] = it->second;
346
dof_map[c->index()][i + k * tt] = num;
349
dof_map[c->index()][i + k * tt] /= 2;
359
// for(CellIterator c(dolfin_mesh); !c.end(); ++c) {
361
// for(uint j = 0; j< ufc.form.rank(); j++) {
362
// tabulate_dofs(dofs, ufc.cell, c->index());
363
// for(uint i = 0; i < local_dimension(); i++)
364
// cout<< dofs[i] << " ";
375
uint *dofs = new uint[local_dimension()];
377
uint pe_size = MPI::numProcesses();
378
uint rank = MPI::processNumber();
380
dolfin_mesh.renumber();
382
if (ufc_dof_map->global_dimension() == dolfin_mesh.distdata().global_numVertices()) {
384
_local_size = dolfin_mesh.numVertices() - dolfin_mesh.distdata().num_ghost(0);
386
else if(ufc_dof_map->global_dimension() == dolfin_mesh.distdata().global_numCells()) {
388
_local_size = dolfin_mesh.numCells();
390
else if(ufc_dof_map->global_dimension() ==
391
ufc_dof_map->geometric_dimension() * dolfin_mesh.distdata().global_numVertices()) {
393
uint gdim = ufc_dof_map->geometric_dimension();
394
uint num_local = dolfin_mesh.numVertices() - dolfin_mesh.distdata().num_ghost(0);
396
uint num_dofs = gdim * num_local;
399
#if ( MPI_VERSION > 1 )
400
MPI_Exscan(&num_dofs, &offset, 1, MPI_UNSIGNED, MPI_SUM, MPI::DOLFIN_COMM);
402
MPI_Scan(&num_dofs, &offset, 1, MPI_UNSIGNED, MPI_SUM, MPI::DOLFIN_COMM);
405
_map<uint, uint> v_offset;
407
for(VertexIterator v(dolfin_mesh); !v.end(); ++v) {
408
if(!dolfin_mesh.distdata().is_ghost(v->index(), 0)) {
409
v_offset[dolfin_mesh.distdata().get_global(*v)] = offset;
414
Array<uint> *ghost_buff = new Array<uint>[pe_size];
415
for(MeshGhostIterator iter(dolfin_mesh.distdata(), 0); !iter.end(); ++iter)
416
ghost_buff[iter.owner()].push_back(dolfin_mesh.distdata().get_global(iter.index(), 0));
419
Array<uint> send_buff;
421
uint recv_size = dolfin_mesh.distdata().num_ghost(0);
422
int recv_count, recv_size_gh, send_size;
424
for(uint i = 0; i < pe_size; i++) {
425
send_size = ghost_buff[i].size();
426
MPI_Reduce(&send_size, &recv_size_gh, 1,
427
MPI_INT, MPI_SUM, i, MPI::DOLFIN_COMM);
430
uint *recv_ghost = new uint[ recv_size_gh];
431
uint *recv_buff = new uint[ recv_size ];
433
for(uint j=1; j < pe_size; j++){
434
src = (rank - j + pe_size) % pe_size;
435
dest = (rank + j) % pe_size;
437
MPI_Sendrecv(&ghost_buff[dest][0], ghost_buff[dest].size(),
438
MPI_UNSIGNED, dest, 1, recv_ghost, recv_size_gh,
439
MPI_UNSIGNED, src, 1, MPI::DOLFIN_COMM, &status);
440
MPI_Get_count(&status,MPI_UNSIGNED,&recv_count);
442
for(int k=0; k < recv_count; k++)
443
send_buff.push_back(v_offset[recv_ghost[k]]);
445
MPI_Sendrecv(&send_buff[0], send_buff.size(), MPI_UNSIGNED, src, 2,
446
recv_buff, recv_size , MPI_UNSIGNED, dest, 2,
447
MPI::DOLFIN_COMM,&status);
448
MPI_Get_count(&status,MPI_UNSIGNED,&recv_count);
450
for(int j=0; j < recv_count; j++)
451
v_offset[ghost_buff[dest][j]] = recv_buff[j];
462
v_map = new uint[dolfin_mesh.numVertices()];
463
for(VertexIterator v(dolfin_mesh); !v.end(); ++v)
464
v_map[v->index()] = v_offset[dolfin_mesh.distdata().get_global(v->index(), 0)];
469
_local_size = gdim * num_local;
471
for(uint i =0; i < pe_size; i++)
472
ghost_buff[i].clear();
476
else if(ufc_dof_map->global_dimension() ==
477
ufc_dof_map->geometric_dimension() * dolfin_mesh.distdata().global_numCells()) {
479
uint gdim = ufc_dof_map->geometric_dimension();
480
uint num_dofs = gdim * dolfin_mesh.numCells();
483
#if ( MPI_VERSION > 1 )
484
MPI_Exscan(&num_dofs, &offset, 1, MPI_UNSIGNED, MPI_SUM, MPI::DOLFIN_COMM);
486
MPI_Scan(&num_dofs, &offset, 1, MPI_UNSIGNED, MPI_SUM, MPI::DOLFIN_COMM);
491
_local_size = gdim * dolfin_mesh.numCells();
496
BoundaryMesh interior_boundary;
497
interior_boundary.init_interior(dolfin_mesh);
498
MeshFunction<uint>* cell_map =interior_boundary.data().meshFunction("cell map");
500
std::vector<uint> send_buffer;
501
_set<uint> shared_dofs, forbidden_dofs, owned_dofs;
502
_map<uint, uint> dof_vote;
503
_map<uint, std::vector<uint> > dof2index;
505
uint n = local_dimension();
506
dof_map = new uint[n * dolfin_mesh.numCells()];
507
uint *facet_dofs = new uint[num_facet_dofs()];
509
Cell c_tmp(dolfin_mesh, 1);
510
UFCCell ufc_cell(c_tmp);
512
// Initialize random number generator differently on each process
513
srand((uint)time(0) + MPI::processNumber());
515
// Decide ownership of shared dofs
516
for (CellIterator bc(interior_boundary); !bc.end(); ++bc)
518
Facet f(dolfin_mesh, cell_map->get(*bc));
519
Cell c(dolfin_mesh, f.entities(dolfin_mesh.topology().dim())[0]);
521
uint local_facet = c.index(f);
523
ufc_cell.update(c, dolfin_mesh.distdata());
524
ufc_dof_map->tabulate_dofs(dofs, ufc_mesh, ufc_cell);
525
ufc_dof_map->tabulate_facet_dofs(facet_dofs, local_facet);
527
for (uint i = 0; i < num_facet_dofs(); i++)
529
// Assign an ownership vote for each "shared" dof
530
if (shared_dofs.find(dofs[i]) == shared_dofs.end())
532
shared_dofs.insert(dofs[i]);
533
dof_vote[dofs[i]] = (uint) rand() + (uint) MPI::processNumber();
534
send_buffer.push_back(dofs[i]);
535
send_buffer.push_back(dof_vote[dofs[i]]);
540
// Decide ownership of "shared" dofs
543
uint src, dest, max_recv;
544
uint num_proc = MPI::numProcesses();
545
uint proc_num = MPI::processNumber();
546
uint local_size = send_buffer.size();
547
MPI_Allreduce(&local_size, &max_recv, 1,
548
MPI_UNSIGNED, MPI_MAX, MPI::DOLFIN_COMM);
549
uint *recv_buffer = new uint[max_recv];
550
for(uint k = 1; k < MPI::numProcesses(); ++k)
552
src = (proc_num - k + num_proc) % num_proc;
553
dest = (proc_num +k) % num_proc;
555
MPI_Sendrecv(&send_buffer[0], send_buffer.size(), MPI_UNSIGNED, dest, 1,
556
recv_buffer, max_recv, MPI_UNSIGNED, src, 1,
557
MPI::DOLFIN_COMM, &status);
558
MPI_Get_count(&status, MPI_UNSIGNED, &recv_count);
560
for (int i = 0; i < recv_count; i += 2)
562
if (shared_dofs.find(recv_buffer[i]) != shared_dofs.end())
564
// Move dofs with higher ownership votes from shared to forbidden
565
if (recv_buffer[i+1] < dof_vote[recv_buffer[i]] ) {
566
forbidden_dofs.insert(recv_buffer[i]);
567
shared_dofs.erase(recv_buffer[i]);
575
// Mark all non forbidden dofs as owned by the processes
576
for (CellIterator c(dolfin_mesh); !c.end(); ++c)
578
ufc_cell.update(*c, dolfin_mesh.distdata());
579
ufc_dof_map->tabulate_dofs(dofs, ufc_mesh, ufc_cell);
580
for (uint i = 0; i < n; i++)
582
if (forbidden_dofs.find(dofs[i]) == forbidden_dofs.end())
585
owned_dofs.insert(dofs[i]);
588
// Create mapping from dof to dof_map offset
589
dof2index[dofs[i]].push_back(c->index() * n + i);
593
// Compute offset for owned and non shared dofs
594
uint range = owned_dofs.size();
597
#if ( MPI_VERSION > 1 )
598
MPI_Exscan(&range, &offset, 1, MPI_UNSIGNED, MPI_SUM, MPI::DOLFIN_COMM);
600
MPI_Scan(&range, &offset, 1, MPI_UNSIGNED, MPI_SUM, MPI::DOLFIN_COMM);
604
// Compute renumbering for local and owned shared dofs
605
for (_set<uint>::iterator it = owned_dofs.begin();
606
it != owned_dofs.end(); ++it, offset++)
608
for(std::vector<uint>::iterator di = dof2index[*it].begin();
609
di != dof2index[*it].end(); ++di)
610
dof_map[*di] = offset;
612
if (shared_dofs.find(*it) != shared_dofs.end())
614
send_buffer.push_back(*it);
615
send_buffer.push_back(offset);
620
// Exchange new dof numbers for shared dofs
621
delete[] recv_buffer;
622
local_size = send_buffer.size();
623
MPI_Allreduce(&local_size, &max_recv, 1,
624
MPI_UNSIGNED, MPI_MAX, MPI::DOLFIN_COMM);
625
recv_buffer = new uint[max_recv];
626
for(uint k = 1; k < MPI::numProcesses(); ++k)
628
src = (proc_num - k + num_proc) % num_proc;
629
dest = (proc_num +k) % num_proc;
631
MPI_Sendrecv(&send_buffer[0], send_buffer.size(), MPI_UNSIGNED, dest, 1,
632
recv_buffer, max_recv, MPI_UNSIGNED, src, 1,
633
MPI::DOLFIN_COMM, &status);
634
MPI_Get_count(&status, MPI_UNSIGNED, &recv_count);
636
for (int i = 0; i < recv_count; i += 2)
638
// Assign new dof number for shared dofs
639
if (forbidden_dofs.find(recv_buffer[i]) != forbidden_dofs.end())
641
for(std::vector<uint>::iterator di = dof2index[recv_buffer[i]].begin();
642
di != dof2index[recv_buffer[i]].end(); ++di)
643
dof_map[*di] = recv_buffer[i+1];
647
delete[] recv_buffer;
656
//-----------------------------------------------------------------------------
657
std::map<dolfin::uint, dolfin::uint> DofMap::getMap() //FIXME: const