~vcs-imports/escript-finley/trunk

« back to all changes in this revision

Viewing changes to finley/src/Mesh_optimizeDOFDistribution.c

  • Committer: jfenwick
  • Date: 2010-10-11 01:48:14 UTC
  • Revision ID: svn-v4:77569008-7704-0410-b7a0-a92fef0b09fd:trunk:3259
Merging dudley and scons updates from branches

Show diffs side-by-side

added added

removed removed

Lines of Context:
70
70
     size_t mpiSize_size;
71
71
     index_t* partition=NULL;
72
72
     Paso_Pattern *pattern=NULL;
73
 
     Paso_MPI_rank myRank,dest,source,current_rank, rank;
 
73
     Esys_MPI_rank myRank,dest,source,current_rank, rank;
74
74
     Finley_IndexList* index_list=NULL;
75
75
     float *xyz=NULL;
76
76
     int c;
77
77
     
78
 
     #ifdef PASO_MPI
 
78
     #ifdef ESYS_MPI
79
79
     MPI_Status status;
80
80
     #endif
81
81
 
212
212
               }
213
213
               THREAD_MEMFREE(loc_partition_count);
214
214
           }
215
 
           #ifdef PASO_MPI
 
215
           #ifdef ESYS_MPI
216
216
              /* recvbuf will be the concatenation of each CPU's contribution to new_distribution */
217
217
              MPI_Allgather(new_distribution, mpiSize, MPI_INT, recvbuf, mpiSize, MPI_INT, in->MPIInfo->comm);
218
218
           #else
235
235
 
236
236
           /* now the overlap needs to be created by sending the partition around*/
237
237
 
238
 
           dest=Paso_MPIInfo_mod(mpiSize, myRank + 1);
239
 
           source=Paso_MPIInfo_mod(mpiSize, myRank - 1);
 
238
           dest=Esys_MPIInfo_mod(mpiSize, myRank + 1);
 
239
           source=Esys_MPIInfo_mod(mpiSize, myRank - 1);
240
240
           current_rank=myRank;
241
241
           #pragma omp parallel for private(i)
242
242
           for (i=0;i<in->Nodes->numNodes;++i) setNewDOFId[i]=TRUE;
255
255
               }
256
256
 
257
257
               if (p<mpiSize-1) {  /* the final send can be skipped */
258
 
                  #ifdef PASO_MPI
 
258
                  #ifdef ESYS_MPI
259
259
                  MPI_Sendrecv_replace(newGlobalDOFID,len, MPI_INT,
260
260
                                       dest, in->MPIInfo->msg_tag_counter,
261
261
                                       source, in->MPIInfo->msg_tag_counter,
262
262
                                       in->MPIInfo->comm,&status);
263
263
                  #endif
264
264
                  in->MPIInfo->msg_tag_counter++;
265
 
                  current_rank=Paso_MPIInfo_mod(mpiSize, current_rank-1);
 
265
                  current_rank=Esys_MPIInfo_mod(mpiSize, current_rank-1);
266
266
              }
267
267
           }
268
268
           for (i=0;i<mpiSize+1;++i) distribution[i]=new_distribution[i];