~vcs-imports/escript-finley/trunk

« back to all changes in this revision

Viewing changes to finley/src/ElementFile_distributeByRankOfDOF.c

  • Committer: jfenwick
  • Date: 2010-10-11 01:48:14 UTC
  • Revision ID: svn-v4:77569008-7704-0410-b7a0-a92fef0b09fd:trunk:3259
Merging dudley and scons updates from branches

Show diffs side-by-side

added added

removed removed

Lines of Context:
25
25
 
26
26
/**************************************************************/
27
27
 
28
 
void Finley_ElementFile_distributeByRankOfDOF(Finley_ElementFile* self, Paso_MPI_rank* mpiRankOfDOF, index_t* Id) {
 
28
void Finley_ElementFile_distributeByRankOfDOF(Finley_ElementFile* self, Esys_MPI_rank* mpiRankOfDOF, index_t* Id) {
29
29
     size_t size_size;
30
 
     Paso_MPI_rank myRank, p, *Owner_buffer=NULL, loc_proc_mask_max;
 
30
     Esys_MPI_rank myRank, p, *Owner_buffer=NULL, loc_proc_mask_max;
31
31
     dim_t e, j, i, size, *send_count=NULL, *recv_count=NULL, *newOwner=NULL, *loc_proc_mask=NULL, *loc_send_count=NULL,
32
32
           newNumElements, numElementsInBuffer, numNodes, numRequests, NN;
33
33
     index_t *send_offset=NULL, *recv_offset=NULL, *Id_buffer=NULL, *Tag_buffer=NULL, *Nodes_buffer=NULL, k;
34
34
     bool_t *proc_mask=NULL;
35
 
     #ifdef PASO_MPI
 
35
     #ifdef ESYS_MPI
36
36
        MPI_Request* mpi_requests=NULL;
37
37
        MPI_Status* mpi_stati=NULL;
38
38
     #endif
43
43
     numNodes=self->numNodes;
44
44
     NN=self->numNodes;
45
45
     if (size>1) {
46
 
         #ifdef PASO_MPI
 
46
         #ifdef ESYS_MPI
47
47
            mpi_requests=TMPMEMALLOC(8*size, MPI_Request);
48
48
            mpi_stati=TMPMEMALLOC(8*size, MPI_Status);
49
49
            Finley_checkPtr(mpi_requests);
54
54
           and define a new element owner as the processor with the largest number of DOFs and the smallest id */
55
55
        send_count=TMPMEMALLOC(size,dim_t);
56
56
        recv_count=TMPMEMALLOC(size,dim_t);
57
 
        newOwner=TMPMEMALLOC(self->numElements,Paso_MPI_rank);
 
57
        newOwner=TMPMEMALLOC(self->numElements,Esys_MPI_rank);
58
58
        if ( !( Finley_checkPtr(send_count) || Finley_checkPtr(recv_count) || Finley_checkPtr(newOwner) ) ) {
59
59
           memset(send_count, 0, size_size);
60
60
           #pragma omp parallel private(p,loc_proc_mask,loc_send_count)
90
90
               THREAD_MEMFREE(loc_proc_mask);
91
91
               THREAD_MEMFREE(loc_send_count);
92
92
           }
93
 
           #ifdef PASO_MPI
 
93
           #ifdef ESYS_MPI
94
94
              MPI_Alltoall(send_count,1,MPI_INT,recv_count,1,MPI_INT,self->MPIInfo->comm);
95
95
           #else
96
96
              for (p=0;p<size;++p) recv_count[p]=send_count[p];
105
105
           /* allocate buffers */
106
106
           Id_buffer=TMPMEMALLOC(numElementsInBuffer,index_t);
107
107
           Tag_buffer=TMPMEMALLOC(numElementsInBuffer,index_t);
108
 
           Owner_buffer=TMPMEMALLOC(numElementsInBuffer,Paso_MPI_rank);
 
108
           Owner_buffer=TMPMEMALLOC(numElementsInBuffer,Esys_MPI_rank);
109
109
           Nodes_buffer=TMPMEMALLOC(numElementsInBuffer*NN,index_t);
110
110
           send_offset=TMPMEMALLOC(size,index_t);
111
111
           recv_offset=TMPMEMALLOC(size,index_t);
146
146
              numRequests=0;
147
147
              for (p=0;p<size;++p) {
148
148
                 if (recv_count[p]>0) {
149
 
                    #ifdef PASO_MPI
 
149
                    #ifdef ESYS_MPI
150
150
                    MPI_Irecv(&(self->Id[recv_offset[p]]), recv_count[p], 
151
151
                              MPI_INT, p, self->MPIInfo->msg_tag_counter+myRank,
152
152
                              self->MPIInfo->comm, &mpi_requests[numRequests]);
169
169
              /* now the buffers can be send away */
170
170
              for (p=0;p<size;++p) {
171
171
                 if (send_count[p]>0) {
172
 
                   #ifdef PASO_MPI
 
172
                   #ifdef ESYS_MPI
173
173
                   MPI_Issend(&(Id_buffer[send_offset[p]]), send_count[p], 
174
174
                              MPI_INT, p, self->MPIInfo->msg_tag_counter+p,
175
175
                              self->MPIInfo->comm, &mpi_requests[numRequests]);
192
192
              }
193
193
              self->MPIInfo->msg_tag_counter+=4*size;
194
194
              /* wait for the requests to be finalized */
195
 
              #ifdef PASO_MPI
 
195
              #ifdef ESYS_MPI
196
196
              MPI_Waitall(numRequests,mpi_requests,mpi_stati);
197
197
              #endif
198
198
           }
205
205
           TMPMEMFREE(recv_offset);
206
206
           TMPMEMFREE(proc_mask);
207
207
        }
208
 
        #ifdef PASO_MPI
 
208
        #ifdef ESYS_MPI
209
209
            TMPMEMFREE(mpi_requests);
210
210
            TMPMEMFREE(mpi_stati);
211
211
        #endif