~ubuntu-branches/ubuntu/vivid/mpich/vivid-proposed

« back to all changes in this revision

Viewing changes to src/mpid/pamid/src/comm/mpid_selectcolls.c

  • Committer: Package Import Robot
  • Author(s): Anton Gladky
  • Date: 2014-04-01 20:24:20 UTC
  • mfrom: (5.2.4 sid)
  • Revision ID: package-import@ubuntu.com-20140401202420-t5ey1ia2klt5dkq3
Tags: 3.1-4
* [c3e3398] Disable test_primitives, which is unreliable on some platforms.
            (Closes: #743047)
* [265a699] Add minimal autotest.

Show diffs side-by-side

added added

removed removed

Lines of Context:
24
24
 
25
25
#include <mpidimpl.h>
26
26
 
 
27
pami_metadata_t       ext_metadata;
 
28
advisor_algorithm_t   ext_algorithms[1];
 
29
external_algorithm_t  ext_algorithm;
 
30
 
 
31
#define MPIDI_UPDATE_COLLSEL_EXT_ALGO(cb,nm,xfer_type) {             \
 
32
  ext_algorithm.callback               =  cb;                        \
 
33
  ext_algorithm.cookie                 =  comm;                      \
 
34
  ext_metadata.name                    =  nm;                        \
 
35
  ext_algorithms[0].algorithm.external =  ext_algorithm;             \
 
36
  ext_algorithms[0].metadata           = &ext_metadata;              \
 
37
  ext_algorithms[0].algorithm_type     =  COLLSEL_EXTERNAL_ALGO;     \
 
38
  pamix_collsel_register_algorithms(MPIDI_Collsel_advisor_table,     \
 
39
                                   comm->mpid.geometry,              \
 
40
                                   xfer_type,                        \
 
41
                                  &ext_algorithms[0],                \
 
42
                                   1);                               \
 
43
}
 
44
 
 
45
 
 
46
pami_result_t MPIDI_Register_algorithms_ext(void                 *cookie,
 
47
                                            pami_xfer_type_t      collective,
 
48
                                            advisor_algorithm_t **algorithms,
 
49
                                            size_t               *num_algorithms)
 
50
{
 
51
  external_algorithm_fn callback;
 
52
  char                 *algoname;
 
53
 
 
54
  switch(collective)
 
55
  {
 
56
      case PAMI_XFER_BROADCAST:  callback = MPIDO_CSWrapper_bcast; algoname = "EXT:Bcast:P2P:P2P"; break;
 
57
      case PAMI_XFER_ALLREDUCE:  callback = MPIDO_CSWrapper_allreduce; algoname = "EXT:Allreduce:P2P:P2P"; break;
 
58
      case PAMI_XFER_REDUCE:  callback = MPIDO_CSWrapper_reduce; algoname = "EXT:Reduce:P2P:P2P"; break;
 
59
      case PAMI_XFER_ALLGATHER:  callback = MPIDO_CSWrapper_allgather; algoname = "EXT:Allgather:P2P:P2P"; break;
 
60
      case PAMI_XFER_ALLGATHERV_INT:  callback = MPIDO_CSWrapper_allgatherv; algoname = "EXT:Allgatherv:P2P:P2P"; break;
 
61
      case PAMI_XFER_SCATTER:  callback = MPIDO_CSWrapper_scatter; algoname = "EXT:Scatter:P2P:P2P"; break;
 
62
      case PAMI_XFER_SCATTERV_INT:  callback = MPIDO_CSWrapper_scatterv; algoname = "EXT:Scatterv:P2P:P2P"; break;
 
63
      case PAMI_XFER_GATHER:  callback = MPIDO_CSWrapper_gather; algoname = "EXT:Gather:P2P:P2P"; break;
 
64
      case PAMI_XFER_GATHERV_INT: callback = MPIDO_CSWrapper_gatherv; algoname = "EXT:Gatherv:P2P:P2P"; break;
 
65
      case PAMI_XFER_BARRIER: callback = MPIDO_CSWrapper_barrier; algoname = "EXT:Barrier:P2P:P2P"; break;
 
66
      case PAMI_XFER_ALLTOALL: callback = MPIDO_CSWrapper_alltoall; algoname = "EXT:Alltoall:P2P:P2P"; break;
 
67
      case PAMI_XFER_ALLTOALLV_INT: callback = MPIDO_CSWrapper_alltoallv; algoname = "EXT:Alltoallv:P2P:P2P"; break;
 
68
      case PAMI_XFER_SCAN: callback = MPIDO_CSWrapper_scan; algoname = "EXT:Scan:P2P:P2P"; break;
 
69
      case PAMI_XFER_ALLGATHERV:
 
70
      case PAMI_XFER_SCATTERV:
 
71
      case PAMI_XFER_GATHERV:
 
72
      case PAMI_XFER_ALLTOALLV:
 
73
      case PAMI_XFER_REDUCE_SCATTER:
 
74
           *num_algorithms = 0;
 
75
           return PAMI_SUCCESS;
 
76
      default: return -1;
 
77
  }
 
78
  *num_algorithms                      =  1;
 
79
  ext_algorithm.callback               =  callback;
 
80
  ext_algorithm.cookie                 =  cookie;
 
81
  ext_metadata.name                    =  algoname;
 
82
  ext_algorithms[0].algorithm.external =  ext_algorithm;
 
83
  ext_algorithms[0].metadata           = &ext_metadata;
 
84
  ext_algorithms[0].algorithm_type     =  COLLSEL_EXTERNAL_ALGO;
 
85
  *algorithms                          = &ext_algorithms[0];
 
86
  return PAMI_SUCCESS;
 
87
}
 
88
 
27
89
static char* MPIDI_Coll_type_name(int i)
28
90
{
29
91
   switch(i)
79
141
      {
80
142
         /* For now, if there's a check_fn we will always call it and not cache.
81
143
            We *could* be smarter about this eventually.                        */
82
 
         TRACE_ERR("Protocol %s setting to always query\n", comm->mpid.coll_metadata[coll][type][index].name);
 
144
         TRACE_ERR("Protocol %s setting to always query/call check_fn\n", comm->mpid.coll_metadata[coll][type][index].name);
 
145
         comm->mpid.user_selected_type[coll] = MPID_COLL_CHECK_FN_REQUIRED;
 
146
      } 
 
147
      else /* No check fn but we still need to check metadata bits (query protocol)  */
 
148
      {
 
149
         TRACE_ERR("Protocol %s setting to always query/no check_fn\n", comm->mpid.coll_metadata[coll][type][index].name);
83
150
         comm->mpid.user_selected_type[coll] = MPID_COLL_ALWAYS_QUERY;
84
151
      }
85
152
 
190
257
      comm->mpid.user_selected_type[i] = MPID_COLL_NOSELECTION;
191
258
         if(MPIDI_Process.verbose >= MPIDI_VERBOSE_DETAILS_0 && comm->rank == 0)
192
259
            fprintf(stderr,"Setting up collective %d on comm %p\n", i, comm);
193
 
      if(comm->mpid.coll_count[i][0] == 0)
 
260
         if((comm->mpid.coll_count[i][0] == 0) && (comm->mpid.coll_count[i][1] == 0))
194
261
      {
195
 
         if(MPIDI_Process.verbose >= MPIDI_VERBOSE_DETAILS_0 && comm->rank == 0)
196
 
            fprintf(stderr,"There are no 'always works' protocols of type %d. This could be a problem later in your app\n", i);
197
262
         comm->mpid.user_selected_type[i] = MPID_COLL_USE_MPICH;
198
263
         comm->mpid.user_selected[i] = 0;
199
264
      }
200
 
      else
 
265
         else if(comm->mpid.coll_count[i][0] != 0)
201
266
      {
202
267
         comm->mpid.user_selected[i] = comm->mpid.coll_algorithm[i][0][0];
203
268
         memcpy(&comm->mpid.user_metadata[i], &comm->mpid.coll_metadata[i][0][0],
204
269
               sizeof(pami_metadata_t));
205
270
      }
 
271
         else
 
272
           {
 
273
             MPIDI_Update_coll(i, MPID_COLL_QUERY, 0, comm);
 
274
             /* even though it's a query protocol, say NOSELECTION 
 
275
                so the optcoll selection will override (maybe) */
 
276
             comm->mpid.user_selected_type[i] = MPID_COLL_NOSELECTION;
 
277
           }
206
278
   }
207
279
 
208
280
 
240
312
   }
241
313
   {
242
314
      TRACE_ERR("Checking alltaoll\n");
243
 
      char* names[] = {"PAMID_COLLECTIVE_ALLTOALL", NULL};
 
315
      char* names[] = {"PAMID_COLLECTIVE_ALLTOALL", "MP_S_MPI_ALLTOALL", NULL};
244
316
      MPIDI_Check_protocols(names, comm, "alltoall", PAMI_XFER_ALLTOALL);
245
317
   }
 
318
   comm->mpid.optreduce = 0;
 
319
   envopts = getenv("PAMID_COLLECTIVE_REDUCE");
 
320
   if(envopts != NULL)
 
321
   {
 
322
      if(strcasecmp(envopts, "GLUE_ALLREDUCE") == 0)
 
323
      {
 
324
         if(MPIDI_Process.verbose >= MPIDI_VERBOSE_DETAILS_0 && comm->rank == 0)
 
325
            fprintf(stderr,"Selecting glue allreduce for reduce\n");
 
326
         comm->mpid.optreduce = 1;
 
327
      }
 
328
   }
 
329
   /* In addition to glue protocols, check for other PAMI protocols and check for PE now */
246
330
   {
247
331
      TRACE_ERR("Checking reduce\n");
248
332
      char* names[] = {"PAMID_COLLECTIVE_REDUCE", "MP_S_MPI_REDUCE", NULL};
250
334
   }
251
335
   {
252
336
      TRACE_ERR("Checking alltoallv\n");
253
 
      char* names[] = {"PAMID_COLLECTIVE_ALLTOALLV", NULL};
 
337
      char* names[] = {"PAMID_COLLECTIVE_ALLTOALLV", "MP_S_MPI_ALLTOALLV", NULL};
254
338
      MPIDI_Check_protocols(names, comm, "alltoallv", PAMI_XFER_ALLTOALLV_INT);
255
339
   }
256
340
   {
257
341
      TRACE_ERR("Checking gatherv\n");
258
 
      char* names[] = {"PAMID_COLLECTIVE_GATHERV",  NULL};
 
342
      char* names[] = {"PAMID_COLLECTIVE_GATHERV",  "MP_S_MPI_GATHERV", NULL};
259
343
      MPIDI_Check_protocols(names, comm, "gatherv", PAMI_XFER_GATHERV_INT);
260
344
   }
261
345
   {
262
346
      TRACE_ERR("Checking scan\n");
263
 
      char* names[] = {"PAMID_COLLECTIVE_SCAN", NULL};
 
347
      char* names[] = {"PAMID_COLLECTIVE_SCAN", "MP_S_MPI_SCAN", NULL};
264
348
      MPIDI_Check_protocols(names, comm, "scan", PAMI_XFER_SCAN);
265
349
   }
266
350
 
284
368
      }
285
369
   }
286
370
   { /* In addition to glue protocols, check for other PAMI protocols and check for PE now */
287
 
      char* names[] = {"PAMID_COLLECTIVE_SCATTERV", NULL};
 
371
      char* names[] = {"PAMID_COLLECTIVE_SCATTERV", "MP_S_MPI_SCATTERV", NULL};
288
372
      MPIDI_Check_protocols(names, comm, "scatterv", PAMI_XFER_SCATTERV_INT);
289
 
 
290
 
      /* Use MPICH on large communicators (Issue 7516 and ticket 595)*/
291
 
      if((comm->mpid.user_selected_type[PAMI_XFER_SCATTERV_INT] == 
292
 
          MPID_COLL_NOSELECTION) /* no env var selected */
293
 
         && (comm->local_size > (16*1024))) /* and > 16k ranks */
294
 
        {
295
 
         comm->mpid.user_selected_type[PAMI_XFER_SCATTERV_INT] = MPID_COLL_USE_MPICH;
296
 
         comm->mpid.user_selected[PAMI_XFER_SCATTERV_INT] = 0;
297
 
        }
298
373
   }
299
374
   
300
375
   TRACE_ERR("Checking scatter\n");
310
385
      }
311
386
   }
312
387
   { /* In addition to glue protocols, check for other PAMI protocols and check for PE now */
313
 
      char* names[] = {"PAMID_COLLECTIVE_SCATTER", NULL};
 
388
      char* names[] = {"PAMID_COLLECTIVE_SCATTER", "MP_S_MPI_SCATTER", NULL};
314
389
      MPIDI_Check_protocols(names, comm, "scatter", PAMI_XFER_SCATTER);
315
390
   }
316
391
 
384
459
      if(strcasecmp(envopts, "GLUE_REDUCE") == 0)
385
460
      {
386
461
         if(MPIDI_Process.verbose >= MPIDI_VERBOSE_DETAILS_0 && comm->rank == 0)
387
 
            fprintf(stderr,"using glue_reduce for gather\n");
 
462
            fprintf(stderr,"Selecting glue reduce for gather\n");
388
463
         comm->mpid.optgather = 1;
389
464
      }
390
465
   }
391
466
   { /* In addition to glue protocols, check for other PAMI protocols and check for PE now */
392
 
      char* names[] = {"PAMID_COLLECTIVE_GATHER", NULL};
 
467
      char* names[] = {"PAMID_COLLECTIVE_GATHER", "MP_S_MPI_GATHER", NULL};
393
468
      MPIDI_Check_protocols(names, comm, "gather", PAMI_XFER_GATHER);
394
469
   }
395
470
 
 
471
   /*   If automatic collective selection is enabled and user didn't specifically overwrite
 
472
      it, then use auto coll sel.. Otherwise, go through the manual coll sel code path. */
 
473
   comm->mpid.collsel_fast_query = NULL; /* Init to NULL.. Should only have a value if we create query */
 
474
   if(MPIDI_Process.optimized.auto_select_colls != MPID_AUTO_SELECT_COLLS_NONE && MPIDI_Process.optimized.auto_select_colls != MPID_AUTO_SELECT_COLLS_TUNE && comm->local_size > 1)
 
475
   {
 
476
     /* Create a fast query object, cache it on the comm/geometry and use it in each collective */
 
477
     pami_extension_collsel_query_create pamix_collsel_query_create =
 
478
      (pami_extension_collsel_query_create) PAMI_Extension_symbol(MPIDI_Collsel_extension,
 
479
                                                                        "Collsel_query_create");
 
480
     if(pamix_collsel_query_create != NULL)
 
481
     {
 
482
       pamix_collsel_query_create(MPIDI_Collsel_advisor_table, comm->mpid.geometry, &(comm->mpid.collsel_fast_query));
 
483
     }
 
484
 
 
485
     MPIDI_Pamix_collsel_advise = /* Get the function pointer and cache it */
 
486
      (pami_extension_collsel_advise) PAMI_Extension_symbol(MPIDI_Collsel_extension,
 
487
                                                                        "Collsel_advise");
 
488
 
 
489
     pami_extension_collsel_register_algorithms pamix_collsel_register_algorithms =
 
490
      (pami_extension_collsel_register_algorithms) PAMI_Extension_symbol(MPIDI_Collsel_extension,
 
491
                                                                        "Collsel_register_algorithms");
 
492
    if(pamix_collsel_register_algorithms != NULL)
 
493
    {
 
494
 
 
495
      /* ************ Barrier ************ */
 
496
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_BARRIER) &&
 
497
         comm->mpid.user_selected_type[PAMI_XFER_BARRIER] == MPID_COLL_NOSELECTION)
 
498
      {
 
499
         comm->coll_fns->Barrier      = MPIDO_Barrier_simple;
 
500
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_barrier,"EXT:Barrier:P2P:P2P",PAMI_XFER_BARRIER);
 
501
      }
 
502
      /* ************ Bcast ************ */
 
503
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_BCAST) &&
 
504
         comm->mpid.user_selected_type[PAMI_XFER_BROADCAST] == MPID_COLL_NOSELECTION)
 
505
      {
 
506
         comm->coll_fns->Bcast        = MPIDO_Bcast_simple;
 
507
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_bcast,"EXT:Bcast:P2P:P2P",PAMI_XFER_BROADCAST);
 
508
      }
 
509
      /* ************ Allreduce ************ */
 
510
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_ALLREDUCE) &&
 
511
         comm->mpid.user_selected_type[PAMI_XFER_ALLREDUCE] == MPID_COLL_NOSELECTION)
 
512
      {
 
513
         comm->coll_fns->Allreduce    = MPIDO_Allreduce_simple;
 
514
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_allreduce,"EXT:Allreduce:P2P:P2P",PAMI_XFER_ALLREDUCE);
 
515
      }
 
516
      /* ************ Allgather ************ */
 
517
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_ALLGATHER) &&
 
518
         comm->mpid.user_selected_type[PAMI_XFER_ALLGATHER] == MPID_COLL_NOSELECTION)
 
519
      {
 
520
         comm->coll_fns->Allgather    = MPIDO_Allgather_simple;
 
521
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_allgather,"EXT:Allgather:P2P:P2P",PAMI_XFER_ALLGATHER);
 
522
      }
 
523
      /* ************ Allgatherv ************ */
 
524
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_ALLGATHERV) &&
 
525
         comm->mpid.user_selected_type[PAMI_XFER_ALLGATHERV_INT] == MPID_COLL_NOSELECTION)
 
526
      {
 
527
         comm->coll_fns->Allgatherv   = MPIDO_Allgatherv_simple;
 
528
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_allgatherv,"EXT:Allgatherv:P2P:P2P",PAMI_XFER_ALLGATHERV_INT);
 
529
      }
 
530
      /* ************ Scatterv ************ */
 
531
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_SCATTERV) &&
 
532
         comm->mpid.user_selected_type[PAMI_XFER_SCATTERV_INT] == MPID_COLL_NOSELECTION)
 
533
      {
 
534
         comm->coll_fns->Scatterv     = MPIDO_Scatterv_simple;
 
535
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_scatterv,"EXT:Scatterv:P2P:P2P",PAMI_XFER_SCATTERV_INT);
 
536
      }
 
537
      /* ************ Scatter ************ */
 
538
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_SCATTER) &&
 
539
         comm->mpid.user_selected_type[PAMI_XFER_SCATTER] == MPID_COLL_NOSELECTION)
 
540
      {
 
541
         comm->coll_fns->Scatter      = MPIDO_Scatter_simple;
 
542
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_scatter,"EXT:Scatter:P2P:P2P",PAMI_XFER_SCATTER);
 
543
      }
 
544
      /* ************ Gather ************ */
 
545
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_GATHER) &&
 
546
         comm->mpid.user_selected_type[PAMI_XFER_GATHER] == MPID_COLL_NOSELECTION)
 
547
      {
 
548
         comm->coll_fns->Gather       = MPIDO_Gather_simple;
 
549
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_gather,"EXT:Gather:P2P:P2P",PAMI_XFER_GATHER);
 
550
      }
 
551
      /* ************ Alltoallv ************ */
 
552
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_ALLTOALLV) &&
 
553
         comm->mpid.user_selected_type[PAMI_XFER_ALLTOALLV_INT] == MPID_COLL_NOSELECTION)
 
554
      {
 
555
         comm->coll_fns->Alltoallv    = MPIDO_Alltoallv_simple;
 
556
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_alltoallv,"EXT:Alltoallv:P2P:P2P",PAMI_XFER_ALLTOALLV_INT);
 
557
      }
 
558
      /* ************ Alltoall ************ */
 
559
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_ALLTOALL) &&
 
560
         comm->mpid.user_selected_type[PAMI_XFER_ALLTOALL] == MPID_COLL_NOSELECTION)
 
561
      {
 
562
         comm->coll_fns->Alltoall     = MPIDO_Alltoall_simple;
 
563
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_alltoall,"EXT:Alltoall:P2P:P2P",PAMI_XFER_ALLTOALL);
 
564
      }
 
565
      /* ************ Gatherv ************ */
 
566
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_GATHERV) &&
 
567
         comm->mpid.user_selected_type[PAMI_XFER_GATHERV_INT] == MPID_COLL_NOSELECTION)
 
568
      {
 
569
         comm->coll_fns->Gatherv      = MPIDO_Gatherv_simple;
 
570
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_gatherv,"EXT:Gatherv:P2P:P2P",PAMI_XFER_GATHERV_INT);
 
571
      }
 
572
      /* ************ Reduce ************ */
 
573
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_REDUCE) &&
 
574
         comm->mpid.user_selected_type[PAMI_XFER_REDUCE] == MPID_COLL_NOSELECTION)
 
575
      {
 
576
         comm->coll_fns->Reduce       = MPIDO_Reduce_simple;
 
577
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_reduce,"EXT:Reduce:P2P:P2P",PAMI_XFER_REDUCE);
 
578
      }
 
579
      /* ************ Scan ************ */
 
580
      if((MPIDI_Process.optimized.auto_select_colls & MPID_AUTO_SELECT_COLLS_SCAN) &&
 
581
         comm->mpid.user_selected_type[PAMI_XFER_SCAN] == MPID_COLL_NOSELECTION)
 
582
      {
 
583
         comm->coll_fns->Scan         = MPIDO_Scan_simple;
 
584
         MPIDI_UPDATE_COLLSEL_EXT_ALGO(MPIDO_CSWrapper_scan,"EXT:Scan:P2P:P2P",PAMI_XFER_SCAN);
 
585
      }
 
586
    }
 
587
   }
396
588
   TRACE_ERR("MPIDI_Comm_coll_envvars exit\n");
397
589
}
398
590
 
476
668
            {
477
669
               fprintf(stderr,"comm[%p] coll type %d (%s), \"glue\" algorithm: GLUE_BCAST\n", comm, i, MPIDI_Coll_type_name(i));
478
670
            }
 
671
            if(i == PAMI_XFER_REDUCE)
 
672
            {
 
673
               fprintf(stderr,"comm[%p] coll type %d (%s), \"glue\" algorithm: GLUE_ALLREDUCE\n", comm, i, MPIDI_Coll_type_name(i));
 
674
            }
479
675
         }
480
676
      }
481
677
   }
482
 
   /* Determine if we have protocols for these maybe, rather than just setting them? */
 
678
   /* Determine if we have protocols for these maybe, rather than just setting them?  */
483
679
   comm->coll_fns->Barrier      = MPIDO_Barrier;
484
680
   comm->coll_fns->Bcast        = MPIDO_Bcast;
485
681
   comm->coll_fns->Allreduce    = MPIDO_Allreduce;
495
691
   comm->coll_fns->Scan         = MPIDO_Scan;
496
692
   comm->coll_fns->Exscan       = MPIDO_Exscan;
497
693
 
 
694
   /* MPI-3 Support, no optimized collectives hooked in yet */
 
695
   comm->coll_fns->Ibarrier_sched              = MPIR_Ibarrier_intra;
 
696
   comm->coll_fns->Ibcast_sched                = MPIR_Ibcast_intra;
 
697
   comm->coll_fns->Igather_sched               = MPIR_Igather_intra;
 
698
   comm->coll_fns->Igatherv_sched              = MPIR_Igatherv;
 
699
   comm->coll_fns->Iscatter_sched              = MPIR_Iscatter_intra;
 
700
   comm->coll_fns->Iscatterv_sched             = MPIR_Iscatterv;
 
701
   comm->coll_fns->Iallgather_sched            = MPIR_Iallgather_intra;
 
702
   comm->coll_fns->Iallgatherv_sched           = MPIR_Iallgatherv_intra;
 
703
   comm->coll_fns->Ialltoall_sched             = MPIR_Ialltoall_intra;
 
704
   comm->coll_fns->Ialltoallv_sched            = MPIR_Ialltoallv_intra;
 
705
   comm->coll_fns->Ialltoallw_sched            = MPIR_Ialltoallw_intra;
 
706
   comm->coll_fns->Iallreduce_sched            = MPIR_Iallreduce_intra;
 
707
   comm->coll_fns->Ireduce_sched               = MPIR_Ireduce_intra;
 
708
   comm->coll_fns->Ireduce_scatter_sched       = MPIR_Ireduce_scatter_intra;
 
709
   comm->coll_fns->Ireduce_scatter_block_sched = MPIR_Ireduce_scatter_block_intra;
 
710
   comm->coll_fns->Iscan_sched                 = MPIR_Iscan_rec_dbl;
 
711
   comm->coll_fns->Iexscan_sched               = MPIR_Iexscan;
 
712
   comm->coll_fns->Neighbor_allgather    = MPIR_Neighbor_allgather_default;
 
713
   comm->coll_fns->Neighbor_allgatherv   = MPIR_Neighbor_allgatherv_default;
 
714
   comm->coll_fns->Neighbor_alltoall     = MPIR_Neighbor_alltoall_default;
 
715
   comm->coll_fns->Neighbor_alltoallv    = MPIR_Neighbor_alltoallv_default;
 
716
   comm->coll_fns->Neighbor_alltoallw    = MPIR_Neighbor_alltoallw_default;
 
717
 
 
718
   /* MPI-3 Support, optimized collectives hooked in */
 
719
   comm->coll_fns->Ibarrier_req              = MPIDO_Ibarrier;
 
720
   comm->coll_fns->Ibcast_req                = MPIDO_Ibcast;
 
721
   comm->coll_fns->Iallgather_req            = MPIDO_Iallgather;
 
722
   comm->coll_fns->Iallgatherv_req           = MPIDO_Iallgatherv;
 
723
   comm->coll_fns->Iallreduce_req            = MPIDO_Iallreduce;
 
724
   comm->coll_fns->Ialltoall_req             = MPIDO_Ialltoall;
 
725
   comm->coll_fns->Ialltoallv_req            = MPIDO_Ialltoallv;
 
726
   comm->coll_fns->Ialltoallw_req            = MPIDO_Ialltoallw;
 
727
   comm->coll_fns->Iexscan_req               = MPIDO_Iexscan;
 
728
   comm->coll_fns->Igather_req               = MPIDO_Igather;
 
729
   comm->coll_fns->Igatherv_req              = MPIDO_Igatherv;
 
730
   comm->coll_fns->Ireduce_scatter_block_req = MPIDO_Ireduce_scatter_block;
 
731
   comm->coll_fns->Ireduce_scatter_req       = MPIDO_Ireduce_scatter;
 
732
   comm->coll_fns->Ireduce_req               = MPIDO_Ireduce;
 
733
   comm->coll_fns->Iscan_req                 = MPIDO_Iscan;
 
734
   comm->coll_fns->Iscatter_req              = MPIDO_Iscatter;
 
735
   comm->coll_fns->Iscatterv_req             = MPIDO_Iscatterv;
 
736
 
498
737
   TRACE_ERR("MPIDI_Comm_coll_query exit\n");
499
738
}
500
739