2
/******************************************************************************
3
* Copyright (c) 2011, Duane Merrill. All rights reserved.
4
* Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions are met:
8
* * Redistributions of source code must retain the above copyright
9
* notice, this list of conditions and the following disclaimer.
10
* * Redistributions in binary form must reproduce the above copyright
11
* notice, this list of conditions and the following disclaimer in the
12
* documentation and/or other materials provided with the distribution.
13
* * Neither the name of the NVIDIA CORPORATION nor the
14
* names of its contributors may be used to endorse or promote products
15
* derived from this software without specific prior written permission.
17
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
21
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
******************************************************************************/
32
* cub::DeviceScan provides operations for computing a device-wide, parallel prefix scan across data items residing within global memory.
40
#include "block/block_scan_tiles.cuh"
41
#include "../thread/thread_operators.cuh"
42
#include "../grid/grid_queue.cuh"
43
#include "../util_debug.cuh"
44
#include "../util_device.cuh"
45
#include "../util_namespace.cuh"
47
/// Optional outer namespace(s)
54
/******************************************************************************
56
*****************************************************************************/
58
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
62
* Initialization kernel for tile status initialization (multi-block)
65
typename T, ///< Scan value type
66
typename SizeT> ///< Integer type used for global array indexing
67
__global__ void ScanInitKernel(
68
GridQueue<SizeT> grid_queue, ///< [in] Descriptor for performing dynamic mapping of input tiles to thread blocks
69
ScanTileDescriptor<T> *d_tile_status, ///< [out] Tile status words
70
int num_tiles) ///< [in] Number of tiles
72
typedef ScanTileDescriptor<T> ScanTileDescriptorT;
76
TILE_STATUS_PADDING = PtxArchProps::WARP_THREADS,
79
// Reset queue descriptor
80
if ((blockIdx.x == 0) && (threadIdx.x == 0)) grid_queue.ResetDrain(num_tiles);
82
// Initialize tile status
83
int tile_offset = (blockIdx.x * blockDim.x) + threadIdx.x;
84
if (tile_offset < num_tiles)
87
d_tile_status[TILE_STATUS_PADDING + tile_offset].status = SCAN_TILE_INVALID;
90
if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING))
93
d_tile_status[threadIdx.x].status = SCAN_TILE_OOB;
99
* Scan kernel entry point (multi-block)
102
typename BlockScanTilesPolicy, ///< Tuning policy for cub::BlockScanTiles abstraction
103
typename InputIteratorRA, ///< Random-access iterator type for input (may be a simple pointer type)
104
typename OutputIteratorRA, ///< Random-access iterator type for output (may be a simple pointer type)
105
typename T, ///< The scan data type
106
typename ScanOp, ///< Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt>
107
typename Identity, ///< Identity value type (cub::NullType for inclusive scans)
108
typename SizeT> ///< Integer type used for global array indexing
109
__launch_bounds__ (int(BlockScanTilesPolicy::BLOCK_THREADS))
110
__global__ void ScanKernel(
111
InputIteratorRA d_in, ///< Input data
112
OutputIteratorRA d_out, ///< Output data
113
ScanTileDescriptor<T> *d_tile_status, ///< Global list of tile status
114
ScanOp scan_op, ///< Binary scan operator
115
Identity identity, ///< Identity element
116
SizeT num_items, ///< Total number of scan items for the entire problem
117
GridQueue<int> queue) ///< Descriptor for performing dynamic mapping of tile data to thread blocks
121
TILE_STATUS_PADDING = PtxArchProps::WARP_THREADS,
124
// Thread block type for scanning input tiles
125
typedef BlockScanTiles<
126
BlockScanTilesPolicy,
131
SizeT> BlockScanTilesT;
133
// Shared memory for BlockScanTiles
134
__shared__ typename BlockScanTilesT::TempStorage temp_storage;
137
BlockScanTilesT(temp_storage, d_in, d_out, scan_op, identity).ConsumeTiles(
140
d_tile_status + TILE_STATUS_PADDING);
144
#endif // DOXYGEN_SHOULD_SKIP_THIS
148
/******************************************************************************
150
*****************************************************************************/
153
* \brief DeviceScan provides operations for computing a device-wide, parallel prefix scan across data items residing within global memory. ![](device_scan.png)
154
* \ingroup DeviceModule
157
* Given a list of input elements and a binary reduction operator, a [<em>prefix scan</em>](http://en.wikipedia.org/wiki/Prefix_sum)
158
* produces an output list where each element is computed to be the reduction
159
* of the elements occurring earlier in the input list. <em>Prefix sum</em>
160
* connotes a prefix scan with the addition operator. The term \em inclusive indicates
161
* that the <em>i</em><sup>th</sup> output reduction incorporates the <em>i</em><sup>th</sup> input.
162
* The term \em exclusive indicates the <em>i</em><sup>th</sup> input is not incorporated into
163
* the <em>i</em><sup>th</sup> output reduction.
165
* \par Usage Considerations
166
* \cdp_class{DeviceScan}
170
* \image html scan_perf.png
175
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
177
/******************************************************************************
178
* Constants and typedefs
179
******************************************************************************/
181
/// Generic structure for encapsulating dispatch properties. Mirrors the constants within BlockScanTilesPolicy.
182
struct KernelDispachParams
186
int items_per_thread;
187
BlockLoadAlgorithm load_policy;
188
BlockStoreAlgorithm store_policy;
189
BlockScanAlgorithm scan_algorithm;
194
template <typename BlockScanTilesPolicy>
195
__host__ __device__ __forceinline__
198
block_threads = BlockScanTilesPolicy::BLOCK_THREADS;
199
items_per_thread = BlockScanTilesPolicy::ITEMS_PER_THREAD;
200
load_policy = BlockScanTilesPolicy::LOAD_ALGORITHM;
201
store_policy = BlockScanTilesPolicy::STORE_ALGORITHM;
202
scan_algorithm = BlockScanTilesPolicy::SCAN_ALGORITHM;
204
tile_size = block_threads * items_per_thread;
207
__host__ __device__ __forceinline__
210
printf("%d, %d, %d, %d, %d",
221
/******************************************************************************
223
******************************************************************************/
226
/// Specializations of tuned policy types for different PTX architectures
231
struct TunedPolicies;
234
template <typename T, typename SizeT>
235
struct TunedPolicies<T, SizeT, 350>
238
NOMINAL_4B_ITEMS_PER_THREAD = 16,
239
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
242
// ScanPolicy: GTX Titan: 29.1B items/s (232.4 GB/s) @ 48M 32-bit T
243
typedef BlockScanTilesPolicy<128, ITEMS_PER_THREAD, BLOCK_LOAD_DIRECT, false, LOAD_LDG, BLOCK_STORE_WARP_TRANSPOSE, true, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy;
247
template <typename T, typename SizeT>
248
struct TunedPolicies<T, SizeT, 300>
251
NOMINAL_4B_ITEMS_PER_THREAD = 9,
252
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
255
typedef BlockScanTilesPolicy<256, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, false, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, false, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy;
259
template <typename T, typename SizeT>
260
struct TunedPolicies<T, SizeT, 200>
263
NOMINAL_4B_ITEMS_PER_THREAD = 15,
264
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
267
// ScanPolicy: GTX 580: 20.3B items/s (162.3 GB/s) @ 48M 32-bit T
268
typedef BlockScanTilesPolicy<128, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, false, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, false, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy;
272
template <typename T, typename SizeT>
273
struct TunedPolicies<T, SizeT, 100>
276
NOMINAL_4B_ITEMS_PER_THREAD = 7,
277
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
279
typedef BlockScanTilesPolicy<128, ITEMS_PER_THREAD, BLOCK_LOAD_TRANSPOSE, false, LOAD_DEFAULT, BLOCK_STORE_TRANSPOSE, false, BLOCK_SCAN_RAKING> ScanPolicy;
283
/// Tuning policy for the PTX architecture that DeviceScan operations will get dispatched to
284
template <typename T, typename SizeT>
285
struct PtxDefaultPolicies
287
static const int PTX_TUNE_ARCH = (CUB_PTX_ARCH >= 350) ?
289
(CUB_PTX_ARCH >= 300) ?
291
(CUB_PTX_ARCH >= 200) ?
295
// Tuned policy set for the current PTX compiler pass
296
typedef TunedPolicies<T, SizeT, PTX_TUNE_ARCH> PtxTunedPolicies;
298
// ScanPolicy that opaquely derives from the specialization corresponding to the current PTX compiler pass
299
struct ScanPolicy : PtxTunedPolicies::ScanPolicy {};
302
* Initialize dispatch params with the policies corresponding to the PTX assembly we will use
304
static void InitDispatchParams(int ptx_version, KernelDispachParams &scan_dispatch_params)
306
if (ptx_version >= 350)
308
typedef TunedPolicies<T, SizeT, 350> TunedPolicies;
309
scan_dispatch_params.Init<typename TunedPolicies::ScanPolicy>();
311
else if (ptx_version >= 300)
313
typedef TunedPolicies<T, SizeT, 300> TunedPolicies;
314
scan_dispatch_params.Init<typename TunedPolicies::ScanPolicy>();
316
else if (ptx_version >= 200)
318
typedef TunedPolicies<T, SizeT, 200> TunedPolicies;
319
scan_dispatch_params.Init<typename TunedPolicies::ScanPolicy>();
323
typedef TunedPolicies<T, SizeT, 100> TunedPolicies;
324
scan_dispatch_params.Init<typename TunedPolicies::ScanPolicy>();
330
/******************************************************************************
332
******************************************************************************/
335
* Internal dispatch routine
338
typename ScanInitKernelPtr, ///< Function type of cub::ScanInitKernel
339
typename ScanKernelPtr, ///< Function type of cub::ScanKernel
340
typename InputIteratorRA, ///< Random-access iterator type for input (may be a simple pointer type)
341
typename OutputIteratorRA, ///< Random-access iterator type for output (may be a simple pointer type)
342
typename ScanOp, ///< Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt>
343
typename Identity, ///< Identity value type (cub::NullType for inclusive scans)
344
typename SizeT> ///< Integer type used for global array indexing
345
__host__ __device__ __forceinline__
346
static cudaError_t Dispatch(
347
int ptx_version, ///< [in] PTX version
348
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done.
349
size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \p d_temp_storage allocation.
350
ScanInitKernelPtr init_kernel, ///< [in] Kernel function pointer to parameterization of cub::ScanInitKernel
351
ScanKernelPtr scan_kernel, ///< [in] Kernel function pointer to parameterization of cub::ScanKernel
352
KernelDispachParams &scan_dispatch_params, ///< [in] Dispatch parameters that match the policy that \p scan_kernel was compiled for
353
InputIteratorRA d_in, ///< [in] Iterator pointing to scan input
354
OutputIteratorRA d_out, ///< [in] Iterator pointing to scan output
355
ScanOp scan_op, ///< [in] Binary scan operator
356
Identity identity, ///< [in] Identity element
357
SizeT num_items, ///< [in] Total number of items to scan
358
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
359
bool stream_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
362
#ifndef CUB_RUNTIME_ENABLED
364
// Kernel launch not supported from this device
365
return CubDebug(cudaErrorNotSupported);
371
TILE_STATUS_PADDING = 32,
372
INIT_KERNEL_THREADS = 128
376
typedef typename std::iterator_traits<InputIteratorRA>::value_type T;
378
// Tile status descriptor type
379
typedef ScanTileDescriptor<T> ScanTileDescriptorT;
381
cudaError error = cudaSuccess;
384
// Number of input tiles
385
int num_tiles = (num_items + scan_dispatch_params.tile_size - 1) / scan_dispatch_params.tile_size;
387
// Temporary storage allocation requirements
388
void* allocations[2];
389
size_t allocation_sizes[2] =
391
(num_tiles + TILE_STATUS_PADDING) * sizeof(ScanTileDescriptorT), // bytes needed for tile status descriptors
392
GridQueue<int>::AllocationSize() // bytes needed for grid queue descriptor
395
// Alias temporaries (or set the necessary size of the storage allocation)
396
if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break;
398
// Return if the caller is simply requesting the size of the storage allocation
399
if (d_temp_storage == NULL)
402
// Global list of tile status
403
ScanTileDescriptorT *d_tile_status = (ScanTileDescriptorT*) allocations[0];
405
// Grid queue descriptor
406
GridQueue<int> queue(allocations[1]);
408
// Log init_kernel configuration
409
int init_grid_size = (num_tiles + INIT_KERNEL_THREADS - 1) / INIT_KERNEL_THREADS;
410
if (stream_synchronous) CubLog("Invoking init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, (long long) stream);
412
// Invoke init_kernel to initialize tile descriptors and queue descriptors
413
init_kernel<<<init_grid_size, INIT_KERNEL_THREADS, 0, stream>>>(
418
// Sync the stream if specified
419
if (stream_synchronous && (CubDebug(error = SyncStream(stream)))) break;
421
// Get grid size for multi-block kernel
423
int multi_sm_occupancy = -1;
424
if (ptx_version < 200)
426
// We don't have atomics (or don't have fast ones), so just assign one
427
// block per tile (limited to 65K tiles)
428
scan_grid_size = num_tiles;
432
// We have atomics and can thus reuse blocks across multiple tiles using a queue descriptor.
435
if (CubDebug(error = cudaGetDevice(&device_ordinal))) break;
439
if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break;
441
// Get a rough estimate of scan_kernel SM occupancy based upon the maximum SM occupancy of the targeted PTX architecture
442
multi_sm_occupancy = CUB_MIN(
443
ArchProps<CUB_PTX_ARCH>::MAX_SM_THREADBLOCKS,
444
ArchProps<CUB_PTX_ARCH>::MAX_SM_THREADS / scan_dispatch_params.block_threads);
446
#ifndef __CUDA_ARCH__
447
// We're on the host, so come up with a
449
if (CubDebug(error = device_props.Init(device_ordinal))) break;
451
if (CubDebug(error = device_props.MaxSmOccupancy(
454
scan_dispatch_params.block_threads))) break;
456
// Get device occupancy for scan_kernel
457
int scan_occupancy = multi_sm_occupancy * sm_count;
459
// Get grid size for scan_kernel
460
scan_grid_size = (num_tiles < scan_occupancy) ?
461
num_tiles : // Not enough to fill the device with threadblocks
462
scan_occupancy; // Fill the device with threadblocks
465
// Log scan_kernel configuration
466
if (stream_synchronous) CubLog("Invoking scan_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n",
467
scan_grid_size, scan_dispatch_params.block_threads, (long long) stream, scan_dispatch_params.items_per_thread, multi_sm_occupancy);
469
// Invoke scan_kernel
470
scan_kernel<<<scan_grid_size, scan_dispatch_params.block_threads, 0, stream>>>(
479
// Sync the stream if specified
480
if (stream_synchronous && (CubDebug(error = SyncStream(stream)))) break;
486
#endif // CUB_RUNTIME_ENABLED
492
* Internal scan dispatch routine for using default tuning policies
495
typename InputIteratorRA, ///< Random-access iterator type for input (may be a simple pointer type)
496
typename OutputIteratorRA, ///< Random-access iterator type for output (may be a simple pointer type)
497
typename ScanOp, ///< Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt>
498
typename Identity, ///< Identity value type (cub::NullType for inclusive scans)
499
typename SizeT> ///< Integer type used for global array indexing
500
__host__ __device__ __forceinline__
501
static cudaError_t Dispatch(
502
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done.
503
size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \p d_temp_storage allocation.
504
InputIteratorRA d_in, ///< [in] Iterator pointing to scan input
505
OutputIteratorRA d_out, ///< [in] Iterator pointing to scan output
506
ScanOp scan_op, ///< [in] Binary scan operator
507
Identity identity, ///< [in] Identity element
508
SizeT num_items, ///< [in] Total number of items to scan
509
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
510
bool stream_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
513
typedef typename std::iterator_traits<InputIteratorRA>::value_type T;
516
typedef PtxDefaultPolicies<T, SizeT> PtxDefaultPolicies; // Wrapper of default kernel policies
517
typedef typename PtxDefaultPolicies::ScanPolicy ScanPolicy; // Scan kernel policy
519
cudaError error = cudaSuccess;
522
// Declare dispatch parameters
523
KernelDispachParams scan_dispatch_params;
527
// We're on the device, so initialize the dispatch parameters with the PtxDefaultPolicies directly
528
scan_dispatch_params.Init<ScanPolicy>();
529
ptx_version = CUB_PTX_ARCH;
531
// We're on the host, so lookup and initialize the dispatch parameters with the policies that match the device's PTX version
532
if (CubDebug(error = PtxVersion(ptx_version))) break;
533
PtxDefaultPolicies::InitDispatchParams(ptx_version, scan_dispatch_params);
540
ScanInitKernel<T, SizeT>,
541
ScanKernel<ScanPolicy, InputIteratorRA, OutputIteratorRA, T, ScanOp, Identity, SizeT>,
542
scan_dispatch_params,
551
if (CubDebug(error)) break;
558
#endif // DOXYGEN_SHOULD_SKIP_THIS
561
/******************************************************************//**
562
* \name Exclusive scans
563
*********************************************************************/
567
* \brief Computes a device-wide exclusive prefix sum.
576
* The code snippet below illustrates the exclusive prefix sum of a device vector of \p int items.
579
* #include <cub/cub.cuh>
582
* // Declare and initialize device pointers for input and output
583
* int *d_scan_input, *d_scan_output;
584
* int num_items = ...
588
* // Determine temporary device storage requirements for exclusive prefix sum
589
* void *d_temp_storage = NULL;
590
* size_t temp_storage_bytes = 0;
591
* cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_scan_input, d_scan_output, num_items);
593
* // Allocate temporary storage for exclusive prefix sum
594
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
596
* // Run exclusive prefix sum
597
* cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_scan_input, d_scan_output, num_items);
601
* \tparam InputIteratorRA <b>[inferred]</b> Random-access iterator type for input (may be a simple pointer type)
602
* \tparam OutputIteratorRA <b>[inferred]</b> Random-access iterator type for output (may be a simple pointer type)
605
typename InputIteratorRA,
606
typename OutputIteratorRA>
607
__host__ __device__ __forceinline__
608
static cudaError_t ExclusiveSum(
609
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done.
610
size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \p d_temp_storage allocation.
611
InputIteratorRA d_in, ///< [in] Iterator pointing to scan input
612
OutputIteratorRA d_out, ///< [in] Iterator pointing to scan output
613
int num_items, ///< [in] Total number of items to scan
614
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
615
bool stream_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
617
typedef typename std::iterator_traits<InputIteratorRA>::value_type T;
618
return Dispatch(d_temp_storage, temp_storage_bytes, d_in, d_out, Sum(), T(), num_items, stream, stream_synchronous);
623
* \brief Computes a device-wide exclusive prefix scan using the specified binary \p scan_op functor.
626
* Supports non-commutative scan operators.
635
* The code snippet below illustrates the exclusive prefix scan of a device vector of \p int items.
638
* #include <cub/cub.cuh>
641
* // Declare and initialize device pointers for input and output
642
* int *d_scan_input, *d_scan_output;
643
* int num_items = ...
647
* // Determine temporary device storage requirements for exclusive prefix scan
648
* void *d_temp_storage = NULL;
649
* size_t temp_storage_bytes = 0;
650
* cub::DeviceScan::ExclusiveScan(d_temp_storage, temp_storage_bytes, d_scan_input, d_scan_output, cub::Max(), (int) MIN_INT, num_items);
652
* // Allocate temporary storage for exclusive prefix scan
653
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
655
* // Run exclusive prefix scan (max)
656
* cub::DeviceScan::ExclusiveScan(d_temp_storage, temp_storage_bytes, d_scan_input, d_scan_output, cub::Max(), (int) MIN_INT, num_items);
660
* \tparam InputIteratorRA <b>[inferred]</b> Random-access iterator type for input (may be a simple pointer type)
661
* \tparam OutputIteratorRA <b>[inferred]</b> Random-access iterator type for output (may be a simple pointer type)
662
* \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt>
663
* \tparam Identity <b>[inferred]</b> Type of the \p identity value used Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt>
666
typename InputIteratorRA,
667
typename OutputIteratorRA,
670
__host__ __device__ __forceinline__
671
static cudaError_t ExclusiveScan(
672
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done.
673
size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \p d_temp_storage allocation.
674
InputIteratorRA d_in, ///< [in] Iterator pointing to scan input
675
OutputIteratorRA d_out, ///< [in] Iterator pointing to scan output
676
ScanOp scan_op, ///< [in] Binary scan operator
677
Identity identity, ///< [in] Identity element
678
int num_items, ///< [in] Total number of items to scan
679
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
680
bool stream_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
682
return Dispatch(d_temp_storage, temp_storage_bytes, d_in, d_out, scan_op, identity, num_items, stream, stream_synchronous);
686
//@} end member group
687
/******************************************************************//**
688
* \name Inclusive scans
689
*********************************************************************/
694
* \brief Computes a device-wide inclusive prefix sum.
703
* The code snippet below illustrates the inclusive prefix sum of a device vector of \p int items.
706
* #include <cub/cub.cuh>
709
* // Declare and initialize device pointers for input and output
710
* int *d_scan_input, *d_scan_output;
711
* int num_items = ...
714
* // Determine temporary device storage requirements for inclusive prefix sum
715
* void *d_temp_storage = NULL;
716
* size_t temp_storage_bytes = 0;
717
* cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_scan_input, d_scan_output, num_items);
719
* // Allocate temporary storage for inclusive prefix sum
720
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
722
* // Run inclusive prefix sum
723
* cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_scan_input, d_scan_output, num_items);
727
* \tparam InputIteratorRA <b>[inferred]</b> Random-access iterator type for input (may be a simple pointer type)
728
* \tparam OutputIteratorRA <b>[inferred]</b> Random-access iterator type for output (may be a simple pointer type)
731
typename InputIteratorRA,
732
typename OutputIteratorRA>
733
__host__ __device__ __forceinline__
734
static cudaError_t InclusiveSum(
735
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done.
736
size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \p d_temp_storage allocation.
737
InputIteratorRA d_in, ///< [in] Iterator pointing to scan input
738
OutputIteratorRA d_out, ///< [in] Iterator pointing to scan output
739
int num_items, ///< [in] Total number of items to scan
740
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
741
bool stream_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
743
return Dispatch(d_temp_storage, temp_storage_bytes, d_in, d_out, Sum(), NullType(), num_items, stream, stream_synchronous);
748
* \brief Computes a device-wide inclusive prefix scan using the specified binary \p scan_op functor.
751
* Supports non-commutative scan operators.
760
* The code snippet below illustrates the inclusive prefix scan of a device vector of \p int items.
763
* #include <cub/cub.cuh>
766
* // Declare and initialize device pointers for input and output
767
* int *d_scan_input, *d_scan_output;
768
* int num_items = ...
771
* // Determine temporary device storage requirements for inclusive prefix scan
772
* void *d_temp_storage = NULL;
773
* size_t temp_storage_bytes = 0;
774
* cub::DeviceScan::InclusiveScan(d_temp_storage, temp_storage_bytes, d_scan_input, d_scan_output, cub::Max(), num_items);
776
* // Allocate temporary storage for inclusive prefix scan
777
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
779
* // Run inclusive prefix scan (max)
780
* cub::DeviceScan::InclusiveScan(d_temp_storage, temp_storage_bytes, d_scan_input, d_scan_output, cub::Max(), num_items);
784
* \tparam InputIteratorRA <b>[inferred]</b> Random-access iterator type for input (may be a simple pointer type)
785
* \tparam OutputIteratorRA <b>[inferred]</b> Random-access iterator type for output (may be a simple pointer type)
786
* \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt>
789
typename InputIteratorRA,
790
typename OutputIteratorRA,
792
__host__ __device__ __forceinline__
793
static cudaError_t InclusiveScan(
794
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done.
795
size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \p d_temp_storage allocation.
796
InputIteratorRA d_in, ///< [in] Iterator pointing to scan input
797
OutputIteratorRA d_out, ///< [in] Iterator pointing to scan output
798
ScanOp scan_op, ///< [in] Binary scan operator
799
int num_items, ///< [in] Total number of items to scan
800
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
801
bool stream_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
803
return Dispatch(d_temp_storage, temp_storage_bytes, d_in, d_out, scan_op, NullType(), num_items, stream, stream_synchronous);
810
CUB_NS_POSTFIX // Optional outer namespace(s)