2
* Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
* Rob Clark <robclark@freedesktop.org>
27
#ifndef FREEDRENO_UTIL_H_
28
#define FREEDRENO_UTIL_H_
30
#include "drm/freedreno_drmif.h"
31
#include "drm/freedreno_ringbuffer.h"
33
#include "pipe/p_format.h"
34
#include "pipe/p_state.h"
35
#include "util/compiler.h"
36
#include "util/half_float.h"
38
#include "util/u_debug.h"
39
#include "util/u_dynarray.h"
40
#include "util/u_math.h"
41
#include "util/u_pack_color.h"
43
#include "adreno_common.xml.h"
44
#include "adreno_pm4.xml.h"
51
enum adreno_rb_depth_format fd_pipe2depth(enum pipe_format format);
52
enum pc_di_index_size fd_pipe2index(enum pipe_format format);
53
enum pipe_format fd_gmem_restore_format(enum pipe_format format);
54
enum adreno_rb_blend_factor fd_blend_factor(unsigned factor);
55
enum adreno_pa_su_sc_draw fd_polygon_mode(unsigned mode);
56
enum adreno_stencil_op fd_stencil_op(unsigned op);
58
#define A3XX_MAX_MIP_LEVELS 14
60
#define A2XX_MAX_RENDER_TARGETS 1
61
#define A3XX_MAX_RENDER_TARGETS 4
62
#define A4XX_MAX_RENDER_TARGETS 8
63
#define A5XX_MAX_RENDER_TARGETS 8
64
#define A6XX_MAX_RENDER_TARGETS 8
66
#define MAX_RENDER_TARGETS A6XX_MAX_RENDER_TARGETS
68
/* clang-format off */
70
FD_DBG_MSGS = BITFIELD_BIT(0),
71
FD_DBG_DISASM = BITFIELD_BIT(1),
72
FD_DBG_DCLEAR = BITFIELD_BIT(2),
73
FD_DBG_DDRAW = BITFIELD_BIT(3),
74
FD_DBG_NOSCIS = BITFIELD_BIT(4),
75
FD_DBG_DIRECT = BITFIELD_BIT(5),
76
FD_DBG_GMEM = BITFIELD_BIT(6),
77
FD_DBG_PERF = BITFIELD_BIT(7),
78
FD_DBG_NOBIN = BITFIELD_BIT(8),
79
FD_DBG_SYSMEM = BITFIELD_BIT(9),
80
FD_DBG_SERIALC = BITFIELD_BIT(10),
81
FD_DBG_SHADERDB = BITFIELD_BIT(11),
82
FD_DBG_FLUSH = BITFIELD_BIT(12),
83
FD_DBG_DEQP = BITFIELD_BIT(13),
84
FD_DBG_INORDER = BITFIELD_BIT(14),
85
FD_DBG_BSTAT = BITFIELD_BIT(15),
86
FD_DBG_NOGROW = BITFIELD_BIT(16),
87
FD_DBG_LRZ = BITFIELD_BIT(17),
88
FD_DBG_NOINDR = BITFIELD_BIT(18),
89
FD_DBG_NOBLIT = BITFIELD_BIT(19),
90
FD_DBG_HIPRIO = BITFIELD_BIT(20),
91
FD_DBG_TTILE = BITFIELD_BIT(21),
92
FD_DBG_PERFC = BITFIELD_BIT(22),
93
FD_DBG_NOUBWC = BITFIELD_BIT(23),
94
FD_DBG_NOLRZ = BITFIELD_BIT(24),
95
FD_DBG_NOTILE = BITFIELD_BIT(25),
96
FD_DBG_LAYOUT = BITFIELD_BIT(26),
97
FD_DBG_NOFP16 = BITFIELD_BIT(27),
98
FD_DBG_NOHW = BITFIELD_BIT(28),
100
/* clang-format on */
102
extern int fd_mesa_debug;
103
extern bool fd_binning_enabled;
105
#define FD_DBG(category) unlikely(fd_mesa_debug &FD_DBG_##category)
108
#include <sys/types.h>
109
#include <sys/syscall.h>
111
#define DBG(fmt, ...) \
114
mesa_logi("%5d: %s:%d: " fmt, ((pid_t)syscall(SYS_gettid)), \
115
__FUNCTION__, __LINE__, \
119
#define perf_debug_message(debug, type, ...) \
122
mesa_logw(__VA_ARGS__); \
123
struct util_debug_callback *__d = (debug); \
125
util_debug_message(__d, type, __VA_ARGS__); \
128
#define perf_debug_ctx(ctx, ...) \
130
struct fd_context *__c = (ctx); \
131
perf_debug_message(__c ? &__c->debug : NULL, PERF_INFO, __VA_ARGS__); \
134
#define perf_debug(...) perf_debug_ctx(NULL, __VA_ARGS__)
136
#define perf_time_ctx(ctx, limit_ns, fmt, ...) \
137
for (struct __perf_time_state __s = \
139
.t = -__perf_get_time(ctx), \
142
__s.t += __perf_get_time(ctx); \
144
if (__s.t > (limit_ns)) { \
145
perf_debug_ctx(ctx, fmt " (%.03f ms)", ##__VA_ARGS__, \
146
(double)__s.t / 1000000.0); \
150
#define perf_time(limit_ns, fmt, ...) \
151
perf_time_ctx(NULL, limit_ns, fmt, ##__VA_ARGS__)
153
struct __perf_time_state {
158
/* static inline would be nice here, except 'struct fd_context' is not
161
#define __perf_get_time(ctx) \
162
((FD_DBG(PERF) || ({ \
163
struct fd_context *__c = (ctx); \
164
unlikely(__c && __c->debug.debug_message); \
166
? os_time_get_nano() \
172
* A psuedo-variable for defining where various parts of the fd_context
173
* can be safely accessed.
175
* With threaded_context, certain pctx funcs are called from gallium
176
* front-end/state-tracker (eg. CSO creation), while others are called
177
* from the driver thread. Things called from driver thread can safely
178
* access anything in the ctx, while things called from the fe/st thread
179
* must limit themselves to "safe" things (ie. ctx->screen is safe as it
180
* is immutable, but the blitter_context is not).
182
extern lock_cap_t fd_context_access_cap;
185
* Make the annotation a bit less verbose.. mark fields which should only
186
* be accessed by driver-thread with 'dt'
188
#define dt guarded_by(fd_context_access_cap)
191
* Annotation for entry-point functions only called in driver thread.
193
* For static functions, apply the annotation to the function declaration.
194
* Otherwise apply to the function prototype.
196
#define in_dt assert_cap(fd_context_access_cap)
199
* Annotation for internal functions which are only called from entry-
200
* point functions (with 'in_dt' annotation) or other internal functions
201
* with the 'assert_dt' annotation.
203
* For static functions, apply the annotation to the function declaration.
204
* Otherwise apply to the function prototype.
206
#define assert_dt requires_cap(fd_context_access_cap)
209
* Special helpers for context access outside of driver thread. For ex,
210
* pctx->get_query_result() is not called on driver thread, but the
211
* query is guaranteed to be flushed, or the driver thread queue is
212
* guaranteed to be flushed.
217
fd_context_access_begin(struct fd_context *ctx)
218
acquire_cap(fd_context_access_cap)
223
fd_context_access_end(struct fd_context *ctx) release_cap(fd_context_access_cap)
227
/* for conditionally setting boolean flag(s): */
228
#define COND(bool, val) ((bool) ? (val) : 0)
230
#define CP_REG(reg) ((0x4 << 16) | ((unsigned int)((reg) - (0x2000))))
232
static inline uint32_t
233
DRAW(enum pc_di_primtype prim_type, enum pc_di_src_sel source_select,
234
enum pc_di_index_size index_size, enum pc_di_vis_cull_mode vis_cull_mode,
237
return (prim_type << 0) | (source_select << 6) | ((index_size & 1) << 11) |
238
((index_size >> 1) << 13) | (vis_cull_mode << 9) | (1 << 14) |
242
static inline uint32_t
243
DRAW_A20X(enum pc_di_primtype prim_type,
244
enum pc_di_face_cull_sel faceness_cull_select,
245
enum pc_di_src_sel source_select, enum pc_di_index_size index_size,
246
bool pre_fetch_cull_enable, bool grp_cull_enable, uint16_t count)
248
return (prim_type << 0) | (source_select << 6) |
249
(faceness_cull_select << 8) | ((index_size & 1) << 11) |
250
((index_size >> 1) << 13) | (pre_fetch_cull_enable << 14) |
251
(grp_cull_enable << 15) | (count << 16);
254
/* for tracking cmdstream positions that need to be patched: */
259
#define fd_patch_num_elements(buf) ((buf)->size / sizeof(struct fd_cs_patch))
260
#define fd_patch_element(buf, i) \
261
util_dynarray_element(buf, struct fd_cs_patch, i)
263
static inline enum pipe_format
264
pipe_surface_format(struct pipe_surface *psurf)
267
return PIPE_FORMAT_NONE;
268
return psurf->format;
272
fd_surface_half_precision(const struct pipe_surface *psurf)
274
enum pipe_format format;
279
format = psurf->format;
281
/* colors are provided in consts, which go through cov.f32f16, which will
284
if (util_format_is_pure_integer(format))
287
/* avoid losing precision on 32-bit float formats */
288
if (util_format_is_float(format) &&
289
util_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, 0) ==
296
static inline unsigned
297
fd_sampler_first_level(const struct pipe_sampler_view *view)
299
if (view->target == PIPE_BUFFER)
301
return view->u.tex.first_level;
304
static inline unsigned
305
fd_sampler_last_level(const struct pipe_sampler_view *view)
307
if (view->target == PIPE_BUFFER)
309
return view->u.tex.last_level;
313
fd_half_precision(struct pipe_framebuffer_state *pfb)
317
for (i = 0; i < pfb->nr_cbufs; i++)
318
if (!fd_surface_half_precision(pfb->cbufs[i]))
324
static inline void emit_marker(struct fd_ringbuffer *ring, int scratch_idx);
326
/* like OUT_RING() but appends a cmdstream patch point to 'buf' */
328
OUT_RINGP(struct fd_ringbuffer *ring, uint32_t data, struct util_dynarray *buf)
331
DBG("ring[%p]: OUT_RINGP %04x: %08x", ring,
332
(uint32_t)(ring->cur - ring->start), data);
334
util_dynarray_append(buf, struct fd_cs_patch,
335
((struct fd_cs_patch){
342
__OUT_IB(struct fd_ringbuffer *ring, bool prefetch,
343
struct fd_ringbuffer *target)
345
if (target->cur == target->start)
348
unsigned count = fd_ringbuffer_cmd_count(target);
350
/* for debug after a lock up, write a unique counter value
351
* to scratch6 for each IB, to make it easier to match up
352
* register dumps to cmdstream. The combination of IB and
353
* DRAW (scratch7) is enough to "triangulate" the particular
354
* draw that caused lockup.
356
emit_marker(ring, 6);
358
for (unsigned i = 0; i < count; i++) {
360
OUT_PKT3(ring, prefetch ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD,
362
dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
364
OUT_RING(ring, dwords);
368
emit_marker(ring, 6);
372
__OUT_IB5(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
374
if (target->cur == target->start)
377
unsigned count = fd_ringbuffer_cmd_count(target);
379
for (unsigned i = 0; i < count; i++) {
381
OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
382
dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
384
OUT_RING(ring, dwords);
388
/* CP_SCRATCH_REG4 is used to hold base address for query results: */
389
// XXX annoyingly scratch regs move on a5xx.. and additionally different
390
// packet types.. so freedreno_query_hw is going to need a bit of
392
#define HW_QUERY_BASE_REG REG_AXXX_CP_SCRATCH_REG4
395
#define __EMIT_MARKER 1
397
#define __EMIT_MARKER 0
401
emit_marker(struct fd_ringbuffer *ring, int scratch_idx)
403
extern int32_t marker_cnt;
404
unsigned reg = REG_AXXX_CP_SCRATCH_REG0 + scratch_idx;
405
assert(reg != HW_QUERY_BASE_REG);
406
if (reg == HW_QUERY_BASE_REG)
410
OUT_PKT0(ring, reg, 1);
411
OUT_RING(ring, p_atomic_inc_return(&marker_cnt));
415
static inline uint32_t
416
pack_rgba(enum pipe_format format, const float *rgba)
419
util_pack_color(rgba, format, &uc);
424
* swap - swap value of @a and @b
428
__typeof(a) __tmp = (a); \
433
#define BIT(bit) (1u << bit)
439
static inline enum a3xx_msaa_samples
440
fd_msaa_samples(unsigned samples)
445
#if defined(NDEBUG) || defined(DEBUG)
464
static inline enum a4xx_state_block
465
fd4_stage2shadersb(gl_shader_stage type)
468
case MESA_SHADER_VERTEX:
469
return SB4_VS_SHADER;
470
case MESA_SHADER_FRAGMENT:
471
return SB4_FS_SHADER;
472
case MESA_SHADER_COMPUTE:
473
case MESA_SHADER_KERNEL:
474
return SB4_CS_SHADER;
476
unreachable("bad shader type");
477
return (enum a4xx_state_block) ~0;
481
static inline enum a4xx_index_size
482
fd4_size2indextype(unsigned index_size)
484
switch (index_size) {
486
return INDEX4_SIZE_8_BIT;
488
return INDEX4_SIZE_16_BIT;
490
return INDEX4_SIZE_32_BIT;
492
DBG("unsupported index size: %d", index_size);
494
return INDEX4_SIZE_32_BIT;
501
#endif /* FREEDRENO_UTIL_H_ */