2
* Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3
* Copyright 2018 Advanced Micro Devices, Inc.
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the "Software"),
8
* to deal in the Software without restriction, including without limitation
9
* on the rights to use, copy, modify, merge, publish, distribute, sub
10
* license, and/or sell copies of the Software, and to permit persons to whom
11
* the Software is furnished to do so, subject to the following conditions:
13
* The above copyright notice and this permission notice (including the next
14
* paragraph) shall be included in all copies or substantial portions of the
17
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23
* USE OR OTHER DEALINGS IN THE SOFTWARE.
26
#include "si_build_pm4.h"
29
#include "util/os_time.h"
30
#include "util/u_log.h"
31
#include "util/u_upload_mgr.h"
34
void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_handle **fence)
36
struct radeon_cmdbuf *cs = &ctx->gfx_cs;
37
struct radeon_winsys *ws = ctx->ws;
38
struct si_screen *sscreen = ctx->screen;
39
const unsigned wait_ps_cs = SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH;
40
unsigned wait_flags = 0;
42
if (ctx->gfx_flush_in_progress)
45
/* The amdgpu kernel driver synchronizes execution for shared DMABUFs between
46
* processes on DRM >= 3.39.0, so we don't have to wait at the end of IBs to
47
* make sure everything is idle.
49
* The amdgpu winsys synchronizes execution for buffers shared by different
50
* contexts within the same process.
52
* Interop with AMDVLK, RADV, or OpenCL within the same process requires
53
* explicit fences or glFinish.
55
if (sscreen->info.is_amdgpu && sscreen->info.drm_minor >= 39)
56
flags |= RADEON_FLUSH_START_NEXT_GFX_IB_NOW;
58
if (!sscreen->info.kernel_flushes_tc_l2_after_ib) {
59
wait_flags |= wait_ps_cs | SI_CONTEXT_INV_L2;
60
} else if (ctx->chip_class == GFX6) {
61
/* The kernel flushes L2 before shaders are finished. */
62
wait_flags |= wait_ps_cs;
63
} else if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW) ||
64
((flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION) &&
65
!ws->cs_is_secure(cs))) {
66
/* TODO: this workaround fixes subtitles rendering with mpv -vo=vaapi and
67
* tmz but shouldn't be necessary.
69
wait_flags |= wait_ps_cs;
72
/* Drop this flush if it's a no-op. */
73
if (!radeon_emitted(cs, ctx->initial_gfx_cs_size) &&
74
(!wait_flags || !ctx->gfx_last_ib_is_busy) &&
75
!(flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION)) {
76
tc_driver_internal_flush_notify(ctx->tc);
80
/* Non-aux contexts must set up no-op API dispatch on GPU resets. This is
81
* similar to si_get_reset_status but here we can ignore soft-recoveries,
82
* while si_get_reset_status can't. */
83
if (!(ctx->context_flags & SI_CONTEXT_FLAG_AUX) &&
84
ctx->device_reset_callback.reset) {
85
enum pipe_reset_status status = ctx->ws->ctx_query_reset_status(ctx->ctx, true, NULL);
86
if (status != PIPE_NO_RESET)
87
ctx->device_reset_callback.reset(ctx->device_reset_callback.data, status);
90
if (sscreen->debug_flags & DBG(CHECK_VM))
91
flags &= ~PIPE_FLUSH_ASYNC;
93
ctx->gfx_flush_in_progress = true;
95
if (ctx->has_graphics) {
96
if (!list_is_empty(&ctx->active_queries))
97
si_suspend_queries(ctx);
99
ctx->streamout.suspended = false;
100
if (ctx->streamout.begin_emitted) {
101
si_emit_streamout_end(ctx);
102
ctx->streamout.suspended = true;
104
/* Since NGG streamout uses GDS, we need to make GDS
105
* idle when we leave the IB, otherwise another process
106
* might overwrite it while our shaders are busy.
108
if (sscreen->use_ngg_streamout)
109
wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
113
/* Make sure CP DMA is idle at the end of IBs after L2 prefetches
114
* because the kernel doesn't wait for it. */
115
if (ctx->chip_class >= GFX7)
116
si_cp_dma_wait_for_idle(ctx, &ctx->gfx_cs);
118
/* Wait for draw calls to finish if needed. */
120
ctx->flags |= wait_flags;
121
ctx->emit_cache_flush(ctx, &ctx->gfx_cs);
123
ctx->gfx_last_ib_is_busy = (wait_flags & wait_ps_cs) != wait_ps_cs;
125
if (ctx->current_saved_cs) {
128
/* Save the IB for debug contexts. */
129
si_save_cs(ws, cs, &ctx->current_saved_cs->gfx, true);
130
ctx->current_saved_cs->flushed = true;
131
ctx->current_saved_cs->time_flush = os_time_get_nano();
133
si_log_hw_flush(ctx);
136
if (sscreen->debug_flags & DBG(IB))
137
si_print_current_ib(ctx, stderr);
140
flags |= RADEON_FLUSH_NOOP;
143
ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
145
tc_driver_internal_flush_notify(ctx->tc);
147
ws->fence_reference(fence, ctx->last_gfx_fence);
149
ctx->num_gfx_cs_flushes++;
151
/* Check VM faults if needed. */
152
if (sscreen->debug_flags & DBG(CHECK_VM)) {
153
/* Use conservative timeout 800ms, after which we won't wait any
154
* longer and assume the GPU is hung.
156
ctx->ws->fence_wait(ctx->ws, ctx->last_gfx_fence, 800 * 1000 * 1000);
158
si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, RING_GFX);
161
if (unlikely(ctx->thread_trace &&
162
(flags & PIPE_FLUSH_END_OF_FRAME))) {
163
si_handle_thread_trace(ctx, &ctx->gfx_cs);
166
if (ctx->current_saved_cs)
167
si_saved_cs_reference(&ctx->current_saved_cs, NULL);
169
si_begin_new_gfx_cs(ctx, false);
170
ctx->gfx_flush_in_progress = false;
173
static void si_begin_gfx_cs_debug(struct si_context *ctx)
175
static const uint32_t zeros[1];
176
assert(!ctx->current_saved_cs);
178
ctx->current_saved_cs = calloc(1, sizeof(*ctx->current_saved_cs));
179
if (!ctx->current_saved_cs)
182
pipe_reference_init(&ctx->current_saved_cs->reference, 1);
184
ctx->current_saved_cs->trace_buf =
185
si_resource(pipe_buffer_create(ctx->b.screen, 0, PIPE_USAGE_STAGING, 4));
186
if (!ctx->current_saved_cs->trace_buf) {
187
free(ctx->current_saved_cs);
188
ctx->current_saved_cs = NULL;
192
pipe_buffer_write_nooverlap(&ctx->b, &ctx->current_saved_cs->trace_buf->b.b, 0, sizeof(zeros),
194
ctx->current_saved_cs->trace_id = 0;
198
radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->current_saved_cs->trace_buf,
199
RADEON_USAGE_READWRITE | RADEON_PRIO_FENCE_TRACE);
202
static void si_add_gds_to_buffer_list(struct si_context *sctx)
205
sctx->ws->cs_add_buffer(&sctx->gfx_cs, sctx->gds, RADEON_USAGE_READWRITE, 0);
207
sctx->ws->cs_add_buffer(&sctx->gfx_cs, sctx->gds_oa, RADEON_USAGE_READWRITE, 0);
212
void si_allocate_gds(struct si_context *sctx)
214
struct radeon_winsys *ws = sctx->ws;
219
assert(sctx->screen->use_ngg_streamout);
221
/* 4 streamout GDS counters.
222
* We need 256B (64 dw) of GDS, otherwise streamout hangs.
224
sctx->gds = ws->buffer_create(ws, 256, 4, RADEON_DOMAIN_GDS, RADEON_FLAG_DRIVER_INTERNAL);
225
sctx->gds_oa = ws->buffer_create(ws, 4, 1, RADEON_DOMAIN_OA, RADEON_FLAG_DRIVER_INTERNAL);
227
assert(sctx->gds && sctx->gds_oa);
228
si_add_gds_to_buffer_list(sctx);
231
void si_set_tracked_regs_to_clear_state(struct si_context *ctx)
233
STATIC_ASSERT(SI_NUM_TRACKED_REGS <= sizeof(ctx->tracked_regs.reg_saved) * 8);
235
ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_CONTROL] = 0x00000000;
236
ctx->tracked_regs.reg_value[SI_TRACKED_DB_COUNT_CONTROL] = 0x00000000;
237
ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_OVERRIDE2] = 0x00000000;
238
ctx->tracked_regs.reg_value[SI_TRACKED_DB_SHADER_CONTROL] = 0x00000000;
239
ctx->tracked_regs.reg_value[SI_TRACKED_CB_TARGET_MASK] = 0xffffffff;
240
ctx->tracked_regs.reg_value[SI_TRACKED_CB_DCC_CONTROL] = 0x00000000;
241
ctx->tracked_regs.reg_value[SI_TRACKED_SX_PS_DOWNCONVERT] = 0x00000000;
242
ctx->tracked_regs.reg_value[SI_TRACKED_SX_BLEND_OPT_EPSILON] = 0x00000000;
243
ctx->tracked_regs.reg_value[SI_TRACKED_SX_BLEND_OPT_CONTROL] = 0x00000000;
244
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_LINE_CNTL] = 0x00001000;
245
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_AA_CONFIG] = 0x00000000;
246
ctx->tracked_regs.reg_value[SI_TRACKED_DB_EQAA] = 0x00000000;
247
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_MODE_CNTL_1] = 0x00000000;
248
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_PRIM_FILTER_CNTL] = 0;
249
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL] = 0x00000000;
250
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VS_OUT_CNTL] = 0x00000000;
251
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_CLIP_CNTL] = 0x00090000;
252
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_BINNER_CNTL_0] = 0x00000003;
253
ctx->tracked_regs.reg_value[SI_TRACKED_DB_VRS_OVERRIDE_CNTL] = 0x00000000;
254
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ] = 0x3f800000;
255
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ] = 0x3f800000;
256
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ] = 0x3f800000;
257
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ] = 0x3f800000;
258
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET] = 0;
259
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_VTX_CNTL] = 0x00000005;
260
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_CLIPRECT_RULE] = 0xffff;
261
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_LINE_STIPPLE] = 0;
262
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_ESGS_RING_ITEMSIZE] = 0x00000000;
263
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_1] = 0x00000000;
264
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_2] = 0x00000000;
265
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_3] = 0x00000000;
266
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_ITEMSIZE] = 0x00000000;
267
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_VERT_OUT] = 0x00000000;
268
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE] = 0x00000000;
269
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1] = 0x00000000;
270
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2] = 0x00000000;
271
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3] = 0x00000000;
272
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_INSTANCE_CNT] = 0x00000000;
273
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_ONCHIP_CNTL] = 0x00000000;
274
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP] = 0x00000000;
275
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MODE] = 0x00000000;
276
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_PRIMITIVEID_EN] = 0x00000000;
277
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_REUSE_OFF] = 0x00000000;
278
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_VS_OUT_CONFIG] = 0x00000000;
279
ctx->tracked_regs.reg_value[SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP] = 0x00000000;
280
ctx->tracked_regs.reg_value[SI_TRACKED_GE_NGG_SUBGRP_CNTL] = 0x00000000;
281
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_IDX_FORMAT] = 0x00000000;
282
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_POS_FORMAT] = 0x00000000;
283
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VTE_CNTL] = 0x00000000;
284
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_NGG_CNTL] = 0x00000000;
285
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ENA] = 0x00000000;
286
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ADDR] = 0x00000000;
287
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_BARYC_CNTL] = 0x00000000;
288
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_IN_CONTROL] = 0x00000002;
289
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_Z_FORMAT] = 0x00000000;
290
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_COL_FORMAT] = 0x00000000;
291
ctx->tracked_regs.reg_value[SI_TRACKED_CB_SHADER_MASK] = 0xffffffff;
292
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_TF_PARAM] = 0x00000000;
293
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL] = 0x0000001e; /* From GFX8 */
295
/* Set all cleared context registers to saved. */
296
ctx->tracked_regs.reg_saved = BITFIELD64_MASK(SI_TRACKED_GE_PC_ALLOC);
297
ctx->last_gs_out_prim = 0; /* cleared by CLEAR_STATE */
300
void si_install_draw_wrapper(struct si_context *sctx, pipe_draw_vbo_func wrapper,
301
pipe_draw_vertex_state_func vstate_wrapper)
304
if (wrapper != sctx->b.draw_vbo) {
305
assert(!sctx->real_draw_vbo);
306
assert(!sctx->real_draw_vertex_state);
307
sctx->real_draw_vbo = sctx->b.draw_vbo;
308
sctx->real_draw_vertex_state = sctx->b.draw_vertex_state;
309
sctx->b.draw_vbo = wrapper;
310
sctx->b.draw_vertex_state = vstate_wrapper;
312
} else if (sctx->real_draw_vbo) {
313
sctx->real_draw_vbo = NULL;
314
sctx->real_draw_vertex_state = NULL;
315
si_select_draw_vbo(sctx);
319
static void si_tmz_preamble(struct si_context *sctx)
321
bool secure = si_gfx_resources_check_encrypted(sctx);
322
if (secure != sctx->ws->cs_is_secure(&sctx->gfx_cs)) {
323
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
324
RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION, NULL);
328
static void si_draw_vbo_tmz_preamble(struct pipe_context *ctx,
329
const struct pipe_draw_info *info,
330
unsigned drawid_offset,
331
const struct pipe_draw_indirect_info *indirect,
332
const struct pipe_draw_start_count_bias *draws,
333
unsigned num_draws) {
334
struct si_context *sctx = (struct si_context *)ctx;
336
si_tmz_preamble(sctx);
337
sctx->real_draw_vbo(ctx, info, drawid_offset, indirect, draws, num_draws);
340
static void si_draw_vstate_tmz_preamble(struct pipe_context *ctx,
341
struct pipe_vertex_state *state,
342
uint32_t partial_velem_mask,
343
struct pipe_draw_vertex_state_info info,
344
const struct pipe_draw_start_count_bias *draws,
345
unsigned num_draws) {
346
struct si_context *sctx = (struct si_context *)ctx;
348
si_tmz_preamble(sctx);
349
sctx->real_draw_vertex_state(ctx, state, partial_velem_mask, info, draws, num_draws);
352
void si_begin_new_gfx_cs(struct si_context *ctx, bool first_cs)
354
bool is_secure = false;
356
if (unlikely(radeon_uses_secure_bos(ctx->ws))) {
357
is_secure = ctx->ws->cs_is_secure(&ctx->gfx_cs);
359
si_install_draw_wrapper(ctx, si_draw_vbo_tmz_preamble,
360
si_draw_vstate_tmz_preamble);
364
si_begin_gfx_cs_debug(ctx);
366
si_add_gds_to_buffer_list(ctx);
368
/* Always invalidate caches at the beginning of IBs, because external
369
* users (e.g. BO evictions and SDMA/UVD/VCE IBs) can modify our
372
* Note that the cache flush done by the kernel at the end of GFX IBs
373
* isn't useful here, because that flush can finish after the following
376
* TODO: Do we also need to invalidate CB & DB caches?
378
ctx->flags |= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
379
SI_CONTEXT_INV_L2 | SI_CONTEXT_START_PIPELINE_STATS;
380
ctx->pipeline_stats_enabled = -1;
382
/* We don't know if the last draw used NGG because it can be a different process.
383
* When switching NGG->legacy, we need to flush VGT for certain hw generations.
385
if (ctx->screen->info.has_vgt_flush_ngg_legacy_bug && !ctx->ngg)
386
ctx->flags |= SI_CONTEXT_VGT_FLUSH;
388
if (ctx->border_color_buffer) {
389
radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->border_color_buffer,
390
RADEON_USAGE_READ | RADEON_PRIO_BORDER_COLORS);
392
if (ctx->shadowed_regs) {
393
radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->shadowed_regs,
394
RADEON_USAGE_READWRITE | RADEON_PRIO_DESCRIPTORS);
397
si_add_all_descriptors_to_bo_list(ctx);
399
if (first_cs || !ctx->shadowed_regs) {
400
si_shader_pointers_mark_dirty(ctx);
401
ctx->cs_shader_state.initialized = false;
404
if (!ctx->has_graphics) {
405
ctx->initial_gfx_cs_size = ctx->gfx_cs.current.cdw;
409
if (ctx->tess_rings) {
410
radeon_add_to_buffer_list(ctx, &ctx->gfx_cs,
411
unlikely(is_secure) ? si_resource(ctx->tess_rings_tmz) : si_resource(ctx->tess_rings),
412
RADEON_USAGE_READWRITE | RADEON_PRIO_SHADER_RINGS);
415
/* set all valid group as dirty so they get reemited on
418
si_pm4_reset_emitted(ctx, first_cs);
420
/* The CS initialization should be emitted before everything else. */
421
if (ctx->cs_preamble_state)
422
si_pm4_emit(ctx, ctx->cs_preamble_state);
423
if (ctx->cs_preamble_tess_rings)
424
si_pm4_emit(ctx, unlikely(is_secure) ? ctx->cs_preamble_tess_rings_tmz :
425
ctx->cs_preamble_tess_rings);
426
if (ctx->cs_preamble_gs_rings)
427
si_pm4_emit(ctx, ctx->cs_preamble_gs_rings);
429
if (ctx->queued.named.ls)
430
ctx->prefetch_L2_mask |= SI_PREFETCH_LS;
431
if (ctx->queued.named.hs)
432
ctx->prefetch_L2_mask |= SI_PREFETCH_HS;
433
if (ctx->queued.named.es)
434
ctx->prefetch_L2_mask |= SI_PREFETCH_ES;
435
if (ctx->queued.named.gs)
436
ctx->prefetch_L2_mask |= SI_PREFETCH_GS;
437
if (ctx->queued.named.vs)
438
ctx->prefetch_L2_mask |= SI_PREFETCH_VS;
439
if (ctx->queued.named.ps)
440
ctx->prefetch_L2_mask |= SI_PREFETCH_PS;
442
/* CLEAR_STATE disables all colorbuffers, so only enable bound ones. */
443
bool has_clear_state = ctx->screen->info.has_clear_state;
444
if (has_clear_state || ctx->shadowed_regs) {
445
ctx->framebuffer.dirty_cbufs =
446
u_bit_consecutive(0, ctx->framebuffer.state.nr_cbufs);
447
/* CLEAR_STATE disables the zbuffer, so only enable it if it's bound. */
448
ctx->framebuffer.dirty_zsbuf = ctx->framebuffer.state.zsbuf != NULL;
450
ctx->framebuffer.dirty_cbufs = u_bit_consecutive(0, 8);
451
ctx->framebuffer.dirty_zsbuf = true;
454
/* Even with shadowed registers, we have to add buffers to the buffer list.
455
* These atoms are the only ones that add buffers.
457
si_mark_atom_dirty(ctx, &ctx->atoms.s.framebuffer);
458
si_mark_atom_dirty(ctx, &ctx->atoms.s.render_cond);
459
if (ctx->screen->use_ngg_culling)
460
si_mark_atom_dirty(ctx, &ctx->atoms.s.ngg_cull_state);
462
if (first_cs || !ctx->shadowed_regs) {
463
/* These don't add any buffers, so skip them with shadowing. */
464
si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_regs);
465
/* CLEAR_STATE sets zeros. */
466
if (!has_clear_state || ctx->clip_state_any_nonzeros)
467
si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_state);
468
ctx->sample_locs_num_samples = 0;
469
si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_sample_locs);
470
si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_config);
471
/* CLEAR_STATE sets 0xffff. */
472
if (!has_clear_state || ctx->sample_mask != 0xffff)
473
si_mark_atom_dirty(ctx, &ctx->atoms.s.sample_mask);
474
si_mark_atom_dirty(ctx, &ctx->atoms.s.cb_render_state);
475
/* CLEAR_STATE sets zeros. */
476
if (!has_clear_state || ctx->blend_color_any_nonzeros)
477
si_mark_atom_dirty(ctx, &ctx->atoms.s.blend_color);
478
si_mark_atom_dirty(ctx, &ctx->atoms.s.db_render_state);
479
if (ctx->chip_class >= GFX9)
480
si_mark_atom_dirty(ctx, &ctx->atoms.s.dpbb_state);
481
si_mark_atom_dirty(ctx, &ctx->atoms.s.stencil_ref);
482
si_mark_atom_dirty(ctx, &ctx->atoms.s.spi_map);
483
if (!ctx->screen->use_ngg_streamout)
484
si_mark_atom_dirty(ctx, &ctx->atoms.s.streamout_enable);
485
/* CLEAR_STATE disables all window rectangles. */
486
if (!has_clear_state || ctx->num_window_rectangles > 0)
487
si_mark_atom_dirty(ctx, &ctx->atoms.s.window_rectangles);
488
si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
489
si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
490
si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
492
/* Invalidate various draw states so that they are emitted before
493
* the first draw call. */
494
si_invalidate_draw_constants(ctx);
495
ctx->last_index_size = -1;
496
ctx->last_primitive_restart_en = -1;
497
ctx->last_restart_index = SI_RESTART_INDEX_UNKNOWN;
499
ctx->last_multi_vgt_param = -1;
500
ctx->last_vs_state = ~0;
502
ctx->last_tcs = NULL;
503
ctx->last_tes_sh_base = -1;
504
ctx->last_num_tcs_input_cp = -1;
505
ctx->last_ls_hs_config = -1; /* impossible value */
507
if (has_clear_state) {
508
si_set_tracked_regs_to_clear_state(ctx);
510
/* Set all register values to unknown. */
511
ctx->tracked_regs.reg_saved = 0;
512
ctx->last_gs_out_prim = -1; /* unknown */
515
/* 0xffffffff is an impossible value to register SPI_PS_INPUT_CNTL_n */
516
memset(ctx->tracked_regs.spi_ps_input_cntl, 0xff, sizeof(uint32_t) * 32);
519
if (ctx->scratch_buffer) {
520
si_context_add_resource_size(ctx, &ctx->scratch_buffer->b.b);
521
si_mark_atom_dirty(ctx, &ctx->atoms.s.scratch_state);
524
if (ctx->streamout.suspended) {
525
ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
526
si_streamout_buffers_dirty(ctx);
529
if (!list_is_empty(&ctx->active_queries))
530
si_resume_queries(ctx);
532
assert(!ctx->gfx_cs.prev_dw);
533
ctx->initial_gfx_cs_size = ctx->gfx_cs.current.cdw;
535
/* All buffer references are removed on a flush, so si_check_needs_implicit_sync
536
* cannot determine if si_make_CB_shader_coherent() needs to be called.
537
* ctx->force_cb_shader_coherent will be cleared by the first call to
538
* si_make_CB_shader_coherent.
540
ctx->force_cb_shader_coherent = true;
543
void si_trace_emit(struct si_context *sctx)
545
struct radeon_cmdbuf *cs = &sctx->gfx_cs;
546
uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
548
si_cp_write_data(sctx, sctx->current_saved_cs->trace_buf, 0, 4, V_370_MEM, V_370_ME, &trace_id);
551
radeon_emit(PKT3(PKT3_NOP, 0, 0));
552
radeon_emit(AC_ENCODE_TRACE_POINT(trace_id));
556
u_log_flush(sctx->log);
559
void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs, unsigned cp_coher_cntl)
561
bool compute_ib = !sctx->has_graphics;
563
assert(sctx->chip_class <= GFX9);
565
/* This seems problematic with GFX7 (see #4764) */
566
if (sctx->chip_class != GFX7)
567
cp_coher_cntl |= 1u << 31; /* don't sync PFP, i.e. execute the sync in ME */
571
if (sctx->chip_class == GFX9 || compute_ib) {
572
/* Flush caches and wait for the caches to assert idle. */
573
radeon_emit(PKT3(PKT3_ACQUIRE_MEM, 5, 0));
574
radeon_emit(cp_coher_cntl); /* CP_COHER_CNTL */
575
radeon_emit(0xffffffff); /* CP_COHER_SIZE */
576
radeon_emit(0xffffff); /* CP_COHER_SIZE_HI */
577
radeon_emit(0); /* CP_COHER_BASE */
578
radeon_emit(0); /* CP_COHER_BASE_HI */
579
radeon_emit(0x0000000A); /* POLL_INTERVAL */
581
/* ACQUIRE_MEM is only required on a compute ring. */
582
radeon_emit(PKT3(PKT3_SURFACE_SYNC, 3, 0));
583
radeon_emit(cp_coher_cntl); /* CP_COHER_CNTL */
584
radeon_emit(0xffffffff); /* CP_COHER_SIZE */
585
radeon_emit(0); /* CP_COHER_BASE */
586
radeon_emit(0x0000000A); /* POLL_INTERVAL */
590
/* ACQUIRE_MEM has an implicit context roll if the current context
593
sctx->context_roll = true;
596
static struct si_resource* si_get_wait_mem_scratch_bo(struct si_context *ctx, bool is_secure)
598
struct si_screen *sscreen = ctx->screen;
600
if (likely(!is_secure)) {
601
return ctx->wait_mem_scratch;
603
assert(sscreen->info.has_tmz_support);
604
if (!ctx->wait_mem_scratch_tmz)
605
ctx->wait_mem_scratch_tmz =
606
si_aligned_buffer_create(&sscreen->b,
607
PIPE_RESOURCE_FLAG_UNMAPPABLE |
608
SI_RESOURCE_FLAG_DRIVER_INTERNAL |
609
PIPE_RESOURCE_FLAG_ENCRYPTED,
610
PIPE_USAGE_DEFAULT, 8,
611
sscreen->info.tcc_cache_line_size);
613
return ctx->wait_mem_scratch_tmz;
617
void gfx10_emit_cache_flush(struct si_context *ctx, struct radeon_cmdbuf *cs)
619
uint32_t gcr_cntl = 0;
620
unsigned cb_db_event = 0;
621
unsigned flags = ctx->flags;
623
if (!ctx->has_graphics) {
624
/* Only process compute flags. */
625
flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
626
SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
627
SI_CONTEXT_CS_PARTIAL_FLUSH;
630
/* We don't need these. */
631
assert(!(flags & (SI_CONTEXT_VGT_STREAMOUT_SYNC | SI_CONTEXT_FLUSH_AND_INV_DB_META)));
635
if (flags & SI_CONTEXT_VGT_FLUSH) {
636
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
637
radeon_emit(EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
640
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
641
ctx->num_cb_cache_flushes++;
642
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
643
ctx->num_db_cache_flushes++;
645
if (flags & SI_CONTEXT_INV_ICACHE)
646
gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
647
if (flags & SI_CONTEXT_INV_SCACHE) {
648
/* TODO: When writing to the SMEM L1 cache, we need to set SEQ
649
* to FORWARD when both L1 and L2 are written out (WB or INV).
651
gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
653
if (flags & SI_CONTEXT_INV_VCACHE)
654
gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
656
/* The L2 cache ops are:
657
* - INV: - invalidate lines that reflect memory (were loaded from memory)
658
* - don't touch lines that were overwritten (were stored by gfx clients)
659
* - WB: - don't touch lines that reflect memory
660
* - write back lines that were overwritten
661
* - WB | INV: - invalidate lines that reflect memory
662
* - write back lines that were overwritten
664
* GLM doesn't support WB alone. If WB is set, INV must be set too.
666
if (flags & SI_CONTEXT_INV_L2) {
667
/* Writeback and invalidate everything in L2. */
668
gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) | S_586_GLM_INV(1) | S_586_GLM_WB(1);
669
ctx->num_L2_invalidates++;
670
} else if (flags & SI_CONTEXT_WB_L2) {
671
gcr_cntl |= S_586_GL2_WB(1) | S_586_GLM_WB(1) | S_586_GLM_INV(1);
672
} else if (flags & SI_CONTEXT_INV_L2_METADATA) {
673
gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
676
if (flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
677
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
678
/* Flush CMASK/FMASK/DCC. Will wait for idle later. */
679
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
680
radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
682
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
683
/* Flush HTILE. Will wait for idle later. */
684
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
685
radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
688
/* First flush CB/DB, then L1/L2. */
689
gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
691
if ((flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) ==
692
(SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
693
cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
694
} else if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
695
cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
696
} else if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
697
cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
702
/* Wait for graphics shaders to go idle if requested. */
703
if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
704
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
705
radeon_emit(EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
706
/* Only count explicit shader flushes, not implicit ones. */
707
ctx->num_vs_flushes++;
708
ctx->num_ps_flushes++;
709
} else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
710
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
711
radeon_emit(EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
712
ctx->num_vs_flushes++;
716
if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && ctx->compute_is_busy) {
717
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
718
radeon_emit(EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
719
ctx->num_cs_flushes++;
720
ctx->compute_is_busy = false;
725
struct si_resource* wait_mem_scratch =
726
si_get_wait_mem_scratch_bo(ctx, ctx->ws->cs_is_secure(cs));
727
/* CB/DB flush and invalidate (or possibly just a wait for a
728
* meta flush) via RELEASE_MEM.
730
* Combine this with other cache flushes when possible; this
731
* requires affected shaders to be idle, so do it after the
732
* CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
737
/* Do the flush (enqueue the event and wait for it). */
738
va = wait_mem_scratch->gpu_address;
739
ctx->wait_mem_number++;
741
/* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
742
unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
743
unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
744
unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
745
unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
746
assert(G_586_GL2_US(gcr_cntl) == 0);
747
assert(G_586_GL2_RANGE(gcr_cntl) == 0);
748
assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
749
unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
750
unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
751
unsigned gcr_seq = G_586_SEQ(gcr_cntl);
753
gcr_cntl &= C_586_GLM_WB & C_586_GLM_INV & C_586_GLV_INV & C_586_GL1_INV & C_586_GL2_INV &
754
C_586_GL2_WB; /* keep SEQ */
756
si_cp_release_mem(ctx, cs, cb_db_event,
757
S_490_GLM_WB(glm_wb) | S_490_GLM_INV(glm_inv) | S_490_GLV_INV(glv_inv) |
758
S_490_GL1_INV(gl1_inv) | S_490_GL2_INV(gl2_inv) | S_490_GL2_WB(gl2_wb) |
760
EOP_DST_SEL_MEM, EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
761
EOP_DATA_SEL_VALUE_32BIT, wait_mem_scratch, va, ctx->wait_mem_number,
764
if (unlikely(ctx->thread_trace_enabled)) {
765
si_sqtt_describe_barrier_start(ctx, &ctx->gfx_cs);
768
si_cp_wait_mem(ctx, cs, va, ctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
770
if (unlikely(ctx->thread_trace_enabled)) {
771
si_sqtt_describe_barrier_end(ctx, &ctx->gfx_cs, flags);
775
radeon_begin_again(cs);
777
/* Ignore fields that only modify the behavior of other fields. */
778
if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
779
unsigned dont_sync_pfp = (!(flags & SI_CONTEXT_PFP_SYNC_ME)) << 31;
781
/* Flush caches and wait for the caches to assert idle.
782
* The cache flush is executed in the ME, but the PFP waits
785
radeon_emit(PKT3(PKT3_ACQUIRE_MEM, 6, 0));
786
radeon_emit(dont_sync_pfp); /* CP_COHER_CNTL */
787
radeon_emit(0xffffffff); /* CP_COHER_SIZE */
788
radeon_emit(0xffffff); /* CP_COHER_SIZE_HI */
789
radeon_emit(0); /* CP_COHER_BASE */
790
radeon_emit(0); /* CP_COHER_BASE_HI */
791
radeon_emit(0x0000000A); /* POLL_INTERVAL */
792
radeon_emit(gcr_cntl); /* GCR_CNTL */
793
} else if (flags & SI_CONTEXT_PFP_SYNC_ME) {
794
/* Synchronize PFP with ME. (this stalls PFP) */
795
radeon_emit(PKT3(PKT3_PFP_SYNC_ME, 0, 0));
799
if (flags & SI_CONTEXT_START_PIPELINE_STATS && ctx->pipeline_stats_enabled != 1) {
800
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
801
radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
802
ctx->pipeline_stats_enabled = 1;
803
} else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS && ctx->pipeline_stats_enabled != 0) {
804
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
805
radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
806
ctx->pipeline_stats_enabled = 0;
813
void si_emit_cache_flush(struct si_context *sctx, struct radeon_cmdbuf *cs)
815
uint32_t flags = sctx->flags;
817
if (!sctx->has_graphics) {
818
/* Only process compute flags. */
819
flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
820
SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
821
SI_CONTEXT_CS_PARTIAL_FLUSH;
824
uint32_t cp_coher_cntl = 0;
825
const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB);
827
assert(sctx->chip_class <= GFX9);
829
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
830
sctx->num_cb_cache_flushes++;
831
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
832
sctx->num_db_cache_flushes++;
834
/* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
835
* bit is set. An alternative way is to write SQC_CACHES, but that
836
* doesn't seem to work reliably. Since the bug doesn't affect
837
* correctness (it only does more work than necessary) and
838
* the performance impact is likely negligible, there is no plan
839
* to add a workaround for it.
842
if (flags & SI_CONTEXT_INV_ICACHE)
843
cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
844
if (flags & SI_CONTEXT_INV_SCACHE)
845
cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
847
if (sctx->chip_class <= GFX8) {
848
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
849
cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) |
850
S_0085F0_CB1_DEST_BASE_ENA(1) | S_0085F0_CB2_DEST_BASE_ENA(1) |
851
S_0085F0_CB3_DEST_BASE_ENA(1) | S_0085F0_CB4_DEST_BASE_ENA(1) |
852
S_0085F0_CB5_DEST_BASE_ENA(1) | S_0085F0_CB6_DEST_BASE_ENA(1) |
853
S_0085F0_CB7_DEST_BASE_ENA(1);
855
/* Necessary for DCC */
856
if (sctx->chip_class == GFX8)
857
si_cp_release_mem(sctx, cs, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 0, EOP_DST_SEL_MEM,
858
EOP_INT_SEL_NONE, EOP_DATA_SEL_DISCARD, NULL, 0, 0, SI_NOT_QUERY);
860
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
861
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
866
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
867
/* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
868
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
869
radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
871
if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB | SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
872
/* Flush HTILE. SURFACE_SYNC will wait for idle. */
873
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
874
radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
877
/* Wait for shader engines to go idle.
878
* VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
879
* for everything including CB/DB cache flushes.
882
if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
883
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
884
radeon_emit(EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
885
/* Only count explicit shader flushes, not implicit ones
886
* done by SURFACE_SYNC.
888
sctx->num_vs_flushes++;
889
sctx->num_ps_flushes++;
890
} else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
891
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
892
radeon_emit(EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
893
sctx->num_vs_flushes++;
897
if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && sctx->compute_is_busy) {
898
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
899
radeon_emit(EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
900
sctx->num_cs_flushes++;
901
sctx->compute_is_busy = false;
904
/* VGT state synchronization. */
905
if (flags & SI_CONTEXT_VGT_FLUSH) {
906
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
907
radeon_emit(EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
909
if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
910
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
911
radeon_emit(EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
916
/* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
917
* wait for idle on GFX9. We have to use a TS event.
919
if (sctx->chip_class == GFX9 && flush_cb_db) {
921
unsigned tc_flags, cb_db_event;
923
/* Set the CB/DB flush event. */
924
switch (flush_cb_db) {
925
case SI_CONTEXT_FLUSH_AND_INV_CB:
926
cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
928
case SI_CONTEXT_FLUSH_AND_INV_DB:
929
cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
933
cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
936
/* These are the only allowed combinations. If you need to
937
* do multiple operations at once, do them separately.
938
* All operations that invalidate L2 also seem to invalidate
939
* metadata. Volatile (VOL) and WC flushes are not listed here.
941
* TC | TC_WB = writeback & invalidate L2 & L1
942
* TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
943
* TC_WB | TC_NC = writeback L2 for MTYPE == NC
944
* TC | TC_NC = invalidate L2 for MTYPE == NC
945
* TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
946
* TCL1 = invalidate L1
950
if (flags & SI_CONTEXT_INV_L2_METADATA) {
951
tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_MD_ACTION_ENA;
954
/* Ideally flush TC together with CB/DB. */
955
if (flags & SI_CONTEXT_INV_L2) {
956
/* Writeback and invalidate everything in L2 & L1. */
957
tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_WB_ACTION_ENA;
959
/* Clear the flags. */
960
flags &= ~(SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_VCACHE);
961
sctx->num_L2_invalidates++;
964
/* Do the flush (enqueue the event and wait for it). */
965
struct si_resource* wait_mem_scratch =
966
si_get_wait_mem_scratch_bo(sctx, sctx->ws->cs_is_secure(cs));
968
va = wait_mem_scratch->gpu_address;
969
sctx->wait_mem_number++;
971
si_cp_release_mem(sctx, cs, cb_db_event, tc_flags, EOP_DST_SEL_MEM,
972
EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM, EOP_DATA_SEL_VALUE_32BIT,
973
wait_mem_scratch, va, sctx->wait_mem_number, SI_NOT_QUERY);
975
if (unlikely(sctx->thread_trace_enabled)) {
976
si_sqtt_describe_barrier_start(sctx, &sctx->gfx_cs);
979
si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
981
if (unlikely(sctx->thread_trace_enabled)) {
982
si_sqtt_describe_barrier_end(sctx, &sctx->gfx_cs, sctx->flags);
987
* When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
988
* waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
990
* cp_coher_cntl should contain all necessary flags except TC and PFP flags
993
* GFX6-GFX7 don't support L2 write-back.
995
if (flags & SI_CONTEXT_INV_L2 || (sctx->chip_class <= GFX7 && (flags & SI_CONTEXT_WB_L2))) {
996
/* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
997
* WB must be set on GFX8+ when TC_ACTION is set.
999
si_emit_surface_sync(sctx, cs,
1000
cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
1001
S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= GFX8));
1003
sctx->num_L2_invalidates++;
1005
/* L1 invalidation and L2 writeback must be done separately,
1006
* because both operations can't be done together.
1008
if (flags & SI_CONTEXT_WB_L2) {
1010
* NC = apply to non-coherent MTYPEs
1011
* (i.e. MTYPE <= 1, which is what we use everywhere)
1013
* WB doesn't work without NC.
1015
si_emit_surface_sync(
1017
cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_NC_ACTION_ENA(1));
1019
sctx->num_L2_writebacks++;
1021
if (flags & SI_CONTEXT_INV_VCACHE) {
1022
/* Invalidate per-CU VMEM L1. */
1023
si_emit_surface_sync(sctx, cs, cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1));
1028
/* If TC flushes haven't cleared this... */
1030
si_emit_surface_sync(sctx, cs, cp_coher_cntl);
1032
if (flags & SI_CONTEXT_PFP_SYNC_ME) {
1034
radeon_emit(PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1039
if (flags & SI_CONTEXT_START_PIPELINE_STATS && sctx->pipeline_stats_enabled != 1) {
1041
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1042
radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
1044
sctx->pipeline_stats_enabled = 1;
1045
} else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS && sctx->pipeline_stats_enabled != 0) {
1047
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1048
radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
1050
sctx->pipeline_stats_enabled = 0;