1
/******************************************************************************
2
* Copyright (c) 2011, Duane Merrill. All rights reserved.
3
* Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions are met:
7
* * Redistributions of source code must retain the above copyright
8
* notice, this list of conditions and the following disclaimer.
9
* * Redistributions in binary form must reproduce the above copyright
10
* notice, this list of conditions and the following disclaimer in the
11
* documentation and/or other materials provided with the distribution.
12
* * Neither the name of the NVIDIA CORPORATION nor the
13
* names of its contributors may be used to endorse or promote products
14
* derived from this software without specific prior written permission.
16
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
******************************************************************************/
31
* cub::GridEvenShare is a descriptor utility for distributing input among CUDA threadblocks in an "even-share" fashion. Each threadblock gets roughly the same number of fixed-size work units (grains).
37
#include "../util_namespace.cuh"
38
#include "../util_macro.cuh"
40
/// Optional outer namespace(s)
48
* \addtogroup GridModule
54
* \brief GridEvenShare is a descriptor utility for distributing input among CUDA threadblocks in an "even-share" fashion. Each threadblock gets roughly the same number of fixed-size work units (grains).
57
* GridEvenShare indicates which sections of input are to be mapped onto which threadblocks.
58
* Threadblocks may receive one of three different amounts of work: "big", "normal",
59
* and "last". The "big" workloads are one scheduling grain larger than "normal". The "last" work unit
60
* for the last threadblock may be partially-full if the input is not an even multiple of
61
* the scheduling grain size.
64
* Before invoking a child grid, a parent thread will typically construct and initialize an instance of
65
* GridEvenShare using \p GridInit(). The instance can be passed to child threadblocks which can
66
* initialize their per-threadblock offsets using \p BlockInit().
68
* \tparam SizeT Integer type for array indexing
70
template <typename SizeT>
79
SizeT normal_base_offset;
84
/// Total number of input items
87
/// Grid size in threadblocks
90
/// Offset into input marking the beginning of the owning thread block's segment of input tiles
93
/// Offset into input of marking the end (one-past) of the owning thread block's segment of input tiles
97
* \brief Block-based constructor for single-block grids.
99
__device__ __forceinline__ GridEvenShare(SizeT num_items) :
100
num_items(num_items),
103
block_oob(num_items) {}
107
* \brief Default constructor. Zero-initializes block-specific fields.
109
__host__ __device__ __forceinline__ GridEvenShare() :
117
* \brief Initializes the grid-specific members \p num_items and \p grid_size. To be called prior prior to kernel launch)
119
__host__ __device__ __forceinline__ void GridInit(
120
SizeT num_items, ///< Total number of input items
121
int max_grid_size, ///< Maximum grid size allowable (actual grid size may be less if not warranted by the the number of input items)
122
int schedule_granularity) ///< Granularity by which the input can be parcelled into and distributed among threablocks. Usually the thread block's native tile size (or a multiple thereof.
124
this->num_items = num_items;
125
this->block_offset = 0;
127
this->total_grains = (num_items + schedule_granularity - 1) / schedule_granularity;
128
this->grid_size = CUB_MIN(total_grains, max_grid_size);
129
SizeT grains_per_block = total_grains / grid_size;
130
this->big_blocks = total_grains - (grains_per_block * grid_size); // leftover grains go to big blocks
131
this->normal_share = grains_per_block * schedule_granularity;
132
this->normal_base_offset = big_blocks * schedule_granularity;
133
this->big_share = normal_share + schedule_granularity;
138
* \brief Initializes the threadblock-specific details (e.g., to be called by each threadblock after startup)
140
__device__ __forceinline__ void BlockInit()
142
if (blockIdx.x < big_blocks)
144
// This threadblock gets a big share of grains (grains_per_block + 1)
145
block_offset = (blockIdx.x * big_share);
146
block_oob = block_offset + big_share;
148
else if (blockIdx.x < total_grains)
150
// This threadblock gets a normal share of grains (grains_per_block)
151
block_offset = normal_base_offset + (blockIdx.x * normal_share);
152
block_oob = block_offset + normal_share;
156
if (blockIdx.x == grid_size - 1)
158
block_oob = num_items;
166
__host__ __device__ __forceinline__ void Print()
178
"normal_share(%lu)\n",
181
(unsigned long) block_offset,
182
(unsigned long) block_oob,
184
(unsigned long) num_items,
185
(unsigned long) total_grains,
186
(unsigned long) big_blocks,
187
(unsigned long) big_share,
188
(unsigned long) normal_share);
194
/** @} */ // end group GridModule
197
CUB_NS_POSTFIX // Optional outer namespace(s)