11
11
#include <fei_Graph_Impl.hpp>
12
12
#include <fei_EqnComm.hpp>
13
#include <fei_CommUtils.hpp>
13
14
#include <fei_TemplateUtils.hpp>
14
15
#include <fei_VectorSpace.hpp>
18
19
#include <fei_ErrMacros.hpp>
20
21
//----------------------------------------------------------------------------
21
fei::Graph_Impl::Graph_Impl(fei::SharedPtr<snl_fei::CommUtils<int> > commUtils,
22
fei::Graph_Impl::Graph_Impl(MPI_Comm comm, int firstLocalRow, int lastLocalRow)
24
23
: localGraphData_(NULL),
25
24
remoteGraphData_(),
27
26
firstLocalRow_(firstLocalRow),
28
27
lastLocalRow_(lastLocalRow),
29
localProc_(commUtils->localProc()),
30
numProcs_(commUtils->numProcs()),
32
localProc_ = fei::localProc(comm_);
33
numProcs_ = fei::numProcs(comm_);
33
34
//for remoteGraphData_, we don't know what the range of row-numbers will
34
35
//be, so we'll just construct it with -1,-1
35
36
remoteGraphData_.resize(numProcs_);
36
37
for(int p=0; p<numProcs_; ++p) {
37
38
remoteGraphData_[p] = new remote_table_type(-1, -1);
39
eqnComm_.reset(new fei::EqnComm(commUtils_->getCommunicator(),
40
lastLocalRow-firstLocalRow+1));
40
eqnComm_.reset(new fei::EqnComm(comm_, lastLocalRow-firstLocalRow+1));
41
41
localGraphData_ = new table_type(firstLocalRow_, lastLocalRow_);
53
53
//----------------------------------------------------------------------------
54
int fei::Graph_Impl::addIndices(int row, int len, int* indices)
54
int fei::Graph_Impl::addIndices(int row, int len, const int* indices)
214
214
//now we can find out which procs we'll be receiving from.
215
215
std::vector<int> recvProcs;
216
commUtils_->mirrorProcs(sendProcs, recvProcs);
216
fei::mirrorProcs(comm_, sendProcs, recvProcs);
218
218
//next we'll declare arrays to receive into.
219
219
std::vector<std::vector<int> > recv_ints(recvProcs.size());
223
223
std::vector<MPI_Request> mpiReqs(recvProcs.size());
224
224
std::vector<MPI_Status> mpiStatuses(recvProcs.size());
228
228
unsigned offset = 0;
229
229
for(unsigned i=0; i<recvProcs.size(); ++i) {
230
230
MPI_Irecv(&recv_sizes[i], 1, MPI_INT, recvProcs[i],
231
tag1, commUtils_->getCommunicator(), &mpiReqs[i]);
231
tag1, comm_, &mpiReqs[i]);
234
234
//now we'll pack our to-be-sent data into buffers, and send the
243
243
int isize = send_ints[i].size();
245
MPI_Send(&isize, 1, MPI_INT, proc, tag1, commUtils_->getCommunicator());
245
MPI_Send(&isize, 1, MPI_INT, proc, tag1, comm_);
248
MPI_Waitall(mpiReqs.size(), &mpiReqs[0], &mpiStatuses[0]);
248
if (mpiReqs.size() > 0) {
249
MPI_Waitall(mpiReqs.size(), &mpiReqs[0], &mpiStatuses[0]);
250
252
//now resize our recv buffers, and post the recvs.
251
for(unsigned i=0; i<recvProcs.size(); ++i) {
253
for(size_t i=0; i<recvProcs.size(); ++i) {
252
254
int intsize = recv_sizes[i];
254
256
recv_ints[i].resize(intsize);
256
258
MPI_Irecv(&(recv_ints[i][0]), intsize, MPI_INT, recvProcs[i],
257
tag1, commUtils_->getCommunicator(), &mpiReqs[i]);
259
tag1, comm_, &mpiReqs[i]);
260
262
//now send our packed buffers.
261
for(unsigned i=0; i<sendProcs.size(); ++i) {
263
for(size_t i=0; i<sendProcs.size(); ++i) {
262
264
int proc = sendProcs[i];
264
266
MPI_Send(&(send_ints[i][0]), send_ints[i].size(), MPI_INT,
265
proc, tag1, commUtils_->getCommunicator());
268
MPI_Waitall(mpiReqs.size(), &mpiReqs[0], &mpiStatuses[0]);
270
if (mpiReqs.size() > 0) {
271
MPI_Waitall(mpiReqs.size(), &mpiReqs[0], &mpiStatuses[0]);
270
274
for(unsigned i=0; i<recvProcs.size(); ++i) {
271
275
std::vector<int> recvdata = recv_ints[i];