1
/******************************************************************************
2
* Copyright (c) 2011, Duane Merrill. All rights reserved.
3
* Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions are met:
7
* * Redistributions of source code must retain the above copyright
8
* notice, this list of conditions and the following disclaimer.
9
* * Redistributions in binary form must reproduce the above copyright
10
* notice, this list of conditions and the following disclaimer in the
11
* documentation and/or other materials provided with the distribution.
12
* * Neither the name of the NVIDIA CORPORATION nor the
13
* names of its contributors may be used to endorse or promote products
14
* derived from this software without specific prior written permission.
16
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
******************************************************************************/
31
* cub::BlockReduceWarpReductions provides variants of warp-reduction-based parallel reduction across a CUDA threadblock
36
#include "../../warp/warp_reduce.cuh"
37
#include "../../util_arch.cuh"
38
#include "../../util_namespace.cuh"
40
/// Optional outer namespace(s)
48
* \brief BlockReduceWarpReductions provides variants of warp-reduction-based parallel reduction across a CUDA threadblock
51
typename T, ///< Data type being reduced
52
int BLOCK_THREADS> ///< The thread block size in threads
53
struct BlockReduceWarpReductions
58
/// Number of active warps
59
WARPS = (BLOCK_THREADS + PtxArchProps::WARP_THREADS - 1) / PtxArchProps::WARP_THREADS,
61
/// The logical warp size for warp reductions
62
LOGICAL_WARP_SIZE = CUB_MIN(BLOCK_THREADS, PtxArchProps::WARP_THREADS),
64
/// Whether or not the logical warp size evenly divides the threadblock size
65
EVEN_WARP_MULTIPLE = (BLOCK_THREADS % LOGICAL_WARP_SIZE == 0)
69
/// WarpReduce utility type
70
typedef typename WarpReduce<T, WARPS, LOGICAL_WARP_SIZE>::InternalWarpReduce WarpReduce;
73
/// Shared memory storage layout type
76
typename WarpReduce::TempStorage warp_reduce; ///< Buffer for warp-synchronous scan
77
T warp_aggregates[WARPS]; ///< Shared totals from each warp-synchronous scan
78
T block_prefix; ///< Shared prefix for the entire threadblock
81
/// Alias wrapper allowing storage to be unioned
82
struct TempStorage : Uninitialized<_TempStorage> {};
86
_TempStorage &temp_storage;
93
__device__ __forceinline__ BlockReduceWarpReductions(
94
TempStorage &temp_storage,
97
temp_storage(temp_storage.Alias()),
98
linear_tid(linear_tid),
99
warp_id((BLOCK_THREADS <= PtxArchProps::WARP_THREADS) ?
101
linear_tid / PtxArchProps::WARP_THREADS),
102
lane_id((BLOCK_THREADS <= PtxArchProps::WARP_THREADS) ?
104
linear_tid % PtxArchProps::WARP_THREADS)
108
/// Returns block-wide aggregate in <em>thread</em><sub>0</sub>.
111
typename ReductionOp>
112
__device__ __forceinline__ T ApplyWarpAggregates(
113
ReductionOp reduction_op, ///< [in] Binary scan operator
114
T warp_aggregate, ///< [in] <b>[<em>lane</em><sub>0</sub>s only]</b> Warp-wide aggregate reduction of input items
115
int num_valid) ///< [in] Number of valid elements (may be less than BLOCK_THREADS)
117
// Share lane aggregates
120
temp_storage.warp_aggregates[warp_id] = warp_aggregate;
125
// Update total aggregate in warp 0, lane 0
129
for (int SUCCESSOR_WARP = 1; SUCCESSOR_WARP < WARPS; SUCCESSOR_WARP++)
131
if (FULL_TILE || (SUCCESSOR_WARP * LOGICAL_WARP_SIZE < num_valid))
133
warp_aggregate = reduction_op(warp_aggregate, temp_storage.warp_aggregates[SUCCESSOR_WARP]);
138
return warp_aggregate;
142
/// Computes a threadblock-wide reduction using addition (+) as the reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread<sub>0</sub>.
143
template <bool FULL_TILE>
144
__device__ __forceinline__ T Sum(
145
T input, ///< [in] Calling thread's input partial reductions
146
int num_valid) ///< [in] Number of valid elements (may be less than BLOCK_THREADS)
148
cub::Sum reduction_op;
149
unsigned int warp_offset = warp_id * LOGICAL_WARP_SIZE;
150
unsigned int warp_num_valid = (FULL_TILE && EVEN_WARP_MULTIPLE) ?
152
(warp_offset < num_valid) ?
153
num_valid - warp_offset :
156
// Warp reduction in every warp
157
T warp_aggregate = WarpReduce(temp_storage.warp_reduce, warp_id, lane_id).template Sum<(FULL_TILE && EVEN_WARP_MULTIPLE), 1>(
161
// Update outputs and block_aggregate with warp-wide aggregates from lane-0s
162
return ApplyWarpAggregates<FULL_TILE>(reduction_op, warp_aggregate, num_valid);
166
/// Computes a threadblock-wide reduction using the specified reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread<sub>0</sub>.
169
typename ReductionOp>
170
__device__ __forceinline__ T Reduce(
171
T input, ///< [in] Calling thread's input partial reductions
172
int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS)
173
ReductionOp reduction_op) ///< [in] Binary reduction operator
175
unsigned int warp_id = (WARPS == 1) ? 0 : (linear_tid / LOGICAL_WARP_SIZE);
176
unsigned int warp_offset = warp_id * LOGICAL_WARP_SIZE;
177
unsigned int warp_num_valid = (FULL_TILE && EVEN_WARP_MULTIPLE) ?
179
(warp_offset < num_valid) ?
180
num_valid - warp_offset :
183
// Warp reduction in every warp
184
T warp_aggregate = WarpReduce(temp_storage.warp_reduce, warp_id, lane_id).template Reduce<(FULL_TILE && EVEN_WARP_MULTIPLE), 1>(
189
// Update outputs and block_aggregate with warp-wide aggregates from lane-0s
190
return ApplyWarpAggregates<FULL_TILE>(reduction_op, warp_aggregate, num_valid);
197
CUB_NS_POSTFIX // Optional outer namespace(s)