2
* Copyright © 2016 Red Hat.
3
* Copyright © 2016 Bas Nieuwenhuizen
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30
#include "drm-uapi/amdgpu_drm.h"
32
#include "util/u_memory.h"
34
#include "radv_amdgpu_bo.h"
35
#include "radv_amdgpu_cs.h"
36
#include "radv_amdgpu_winsys.h"
37
#include "radv_debug.h"
38
#include "radv_radeon_winsys.h"
41
#include "vk_drm_syncobj.h"
43
#include "vk_sync_dummy.h"
45
#define GFX6_MAX_CS_SIZE 0xffff8 /* in dwords */
47
enum { VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024 };
49
struct radv_amdgpu_ib {
50
struct radeon_winsys_bo *bo;
54
struct radv_amdgpu_cs {
55
struct radeon_cmdbuf base;
56
struct radv_amdgpu_winsys *ws;
58
struct amdgpu_cs_ib_info ib;
60
struct radeon_winsys_bo *ib_buffer;
62
unsigned max_num_buffers;
64
struct drm_amdgpu_bo_list_entry *handles;
66
struct radv_amdgpu_ib *old_ib_buffers;
67
unsigned num_old_ib_buffers;
68
unsigned max_num_old_ib_buffers;
69
unsigned *ib_size_ptr;
74
int buffer_hash_table[1024];
77
unsigned num_virtual_buffers;
78
unsigned max_num_virtual_buffers;
79
struct radeon_winsys_bo **virtual_buffers;
80
int *virtual_buffer_hash_table;
82
/* For chips that don't support chaining. */
83
struct radeon_cmdbuf *old_cs_buffers;
84
unsigned num_old_cs_buffers;
87
struct radv_winsys_sem_counts {
88
uint32_t syncobj_count;
89
uint32_t timeline_syncobj_count;
94
struct radv_winsys_sem_info {
97
struct radv_winsys_sem_counts wait;
98
struct radv_winsys_sem_counts signal;
101
static uint32_t radv_amdgpu_ctx_queue_syncobj(struct radv_amdgpu_ctx *ctx, unsigned ip,
104
static inline struct radv_amdgpu_cs *
105
radv_amdgpu_cs(struct radeon_cmdbuf *base)
107
return (struct radv_amdgpu_cs *)base;
111
ring_can_use_ib_bos(const struct radv_amdgpu_winsys *ws,
112
enum ring_type ring_type)
114
if (ring_type == RING_UVD ||
115
ring_type == RING_VCE ||
116
ring_type == RING_UVD_ENC ||
117
ring_type == RING_VCN_DEC ||
118
ring_type == RING_VCN_ENC)
120
return ws->use_ib_bos;
124
ring_to_hw_ip(enum ring_type ring)
128
return AMDGPU_HW_IP_GFX;
130
return AMDGPU_HW_IP_COMPUTE;
132
return AMDGPU_HW_IP_DMA;
134
return AMDGPU_HW_IP_UVD;
136
return AMDGPU_HW_IP_VCE;
138
return AMDGPU_HW_IP_UVD_ENC;
140
return AMDGPU_HW_IP_VCN_DEC;
142
return AMDGPU_HW_IP_VCN_ENC;
144
return AMDGPU_HW_IP_VCN_JPEG;
146
unreachable("unsupported ring");
150
static enum ring_type
151
hw_ip_to_ring(int hw_ip)
154
case AMDGPU_HW_IP_GFX:
156
case AMDGPU_HW_IP_COMPUTE:
158
case AMDGPU_HW_IP_DMA:
160
case AMDGPU_HW_IP_UVD:
162
case AMDGPU_HW_IP_VCE:
164
case AMDGPU_HW_IP_UVD_ENC:
166
case AMDGPU_HW_IP_VCN_DEC:
168
case AMDGPU_HW_IP_VCN_ENC:
170
case AMDGPU_HW_IP_VCN_JPEG:
171
return RING_VCN_JPEG;
173
unreachable("unsupported hw ip");
177
struct radv_amdgpu_cs_request {
178
/** Specify HW IP block type to which to send the IB. */
181
/** IP instance index if there are several IPs of the same type. */
182
unsigned ip_instance;
185
* Specify ring index of the IP. We could have several rings
186
* in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
191
* BO list handles used by this request.
193
struct drm_amdgpu_bo_list_entry *handles;
194
uint32_t num_handles;
196
/** Number of IBs to submit in the field ibs. */
197
uint32_t number_of_ibs;
200
* IBs to submit. Those IBs will be submit together as single entity
202
struct amdgpu_cs_ib_info *ibs;
205
* The returned sequence number for the command submission
210
static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
211
struct radv_amdgpu_cs_request *request,
212
struct radv_winsys_sem_info *sem_info);
215
radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx, struct radv_amdgpu_fence *fence,
216
struct radv_amdgpu_cs_request *req)
218
fence->fence.context = ctx->ctx;
219
fence->fence.ip_type = req->ip_type;
220
fence->fence.ip_instance = req->ip_instance;
221
fence->fence.ring = req->ring;
222
fence->fence.fence = req->seq_no;
226
radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
228
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
231
cs->ws->base.buffer_destroy(&cs->ws->base, cs->ib_buffer);
235
for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
236
cs->ws->base.buffer_destroy(&cs->ws->base, cs->old_ib_buffers[i].bo);
238
for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
239
free(cs->old_cs_buffers[i].buf);
242
free(cs->old_cs_buffers);
243
free(cs->old_ib_buffers);
244
free(cs->virtual_buffers);
245
free(cs->virtual_buffer_hash_table);
251
radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs, enum ring_type ring_type)
253
for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
254
cs->buffer_hash_table[i] = -1;
256
cs->hw_ip = ring_to_hw_ip(ring_type);
259
static enum radeon_bo_domain
260
radv_amdgpu_cs_domain(const struct radeon_winsys *_ws)
262
const struct radv_amdgpu_winsys *ws = (const struct radv_amdgpu_winsys *)_ws;
264
bool enough_vram = ws->info.all_vram_visible ||
265
p_atomic_read_relaxed(&ws->allocated_vram_vis) * 2 <= ws->info.vram_vis_size;
267
(enough_vram && ws->info.has_dedicated_vram && !(ws->perftest & RADV_PERFTEST_NO_SAM)) ||
268
(ws->perftest & RADV_PERFTEST_SAM);
269
return use_sam ? RADEON_DOMAIN_VRAM : RADEON_DOMAIN_GTT;
272
static struct radeon_cmdbuf *
273
radv_amdgpu_cs_create(struct radeon_winsys *ws, enum ring_type ring_type)
275
struct radv_amdgpu_cs *cs;
276
uint32_t ib_pad_dw_mask = MAX2(3, radv_amdgpu_winsys(ws)->info.ib_pad_dw_mask[ring_type]);
277
uint32_t ib_size = align(20 * 1024 * 4, ib_pad_dw_mask + 1);
278
cs = calloc(1, sizeof(struct radv_amdgpu_cs));
282
cs->ws = radv_amdgpu_winsys(ws);
283
radv_amdgpu_init_cs(cs, ring_type);
285
cs->use_ib = ring_can_use_ib_bos(cs->ws, ring_type);
289
ws->buffer_create(ws, ib_size, 0, radv_amdgpu_cs_domain(ws),
290
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
291
RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
292
RADV_BO_PRIORITY_CS, 0, &cs->ib_buffer);
293
if (result != VK_SUCCESS) {
298
cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
299
if (!cs->ib_mapped) {
300
ws->buffer_destroy(ws, cs->ib_buffer);
305
cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
306
cs->base.buf = (uint32_t *)cs->ib_mapped;
307
cs->base.max_dw = ib_size / 4 - 4;
308
cs->ib_size_ptr = &cs->ib.size;
311
ws->cs_add_buffer(&cs->base, cs->ib_buffer);
313
uint32_t *buf = malloc(16384);
319
cs->base.max_dw = 4096;
325
static bool hw_can_chain(unsigned hw_ip)
327
return hw_ip == AMDGPU_HW_IP_GFX || hw_ip == AMDGPU_HW_IP_COMPUTE;
330
static uint32_t get_nop_packet(struct radv_amdgpu_cs *cs)
333
case AMDGPU_HW_IP_GFX:
334
case AMDGPU_HW_IP_COMPUTE:
335
return cs->ws->info.gfx_ib_pad_with_type2 ? PKT2_NOP_PAD : PKT3_NOP_PAD;
336
case AMDGPU_HW_IP_DMA:
337
return cs->ws->info.chip_class <= GFX6 ? 0xF0000000 : SDMA_NOP_PAD;
338
case AMDGPU_HW_IP_UVD:
339
case AMDGPU_HW_IP_UVD_ENC:
341
case AMDGPU_HW_IP_VCN_DEC:
344
unreachable("Unknown ring type");
349
radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
351
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
353
if (cs->status != VK_SUCCESS) {
359
const uint64_t limit_dws = GFX6_MAX_CS_SIZE;
360
uint64_t ib_dws = MAX2(cs->base.cdw + min_size, MIN2(cs->base.max_dw * 2, limit_dws));
362
/* The total ib size cannot exceed limit_dws dwords. */
363
if (ib_dws > limit_dws) {
364
/* The maximum size in dwords has been reached,
365
* try to allocate a new one.
367
struct radeon_cmdbuf *old_cs_buffers =
368
realloc(cs->old_cs_buffers, (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
369
if (!old_cs_buffers) {
370
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
374
cs->old_cs_buffers = old_cs_buffers;
376
/* Store the current one for submitting it later. */
377
cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
378
cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
379
cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
380
cs->num_old_cs_buffers++;
382
/* Reset the cs, it will be re-allocated below. */
386
/* Re-compute the number of dwords to allocate. */
387
ib_dws = MAX2(cs->base.cdw + min_size, MIN2(cs->base.max_dw * 2, limit_dws));
388
if (ib_dws > limit_dws) {
389
fprintf(stderr, "radv/amdgpu: Too high number of "
390
"dwords to allocate\n");
391
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
396
uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
398
cs->base.buf = new_buf;
399
cs->base.max_dw = ib_dws;
401
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
407
enum ring_type ring_type = hw_ip_to_ring(cs->hw_ip);
408
uint32_t ib_pad_dw_mask = MAX2(3, cs->ws->info.ib_pad_dw_mask[ring_type]);
409
uint32_t nop_packet = get_nop_packet(cs);
410
while (!cs->base.cdw || (cs->base.cdw & ib_pad_dw_mask) != ib_pad_dw_mask - 3)
411
radeon_emit(&cs->base, nop_packet);
413
*cs->ib_size_ptr |= cs->base.cdw + 4;
415
if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
416
unsigned max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
417
struct radv_amdgpu_ib *old_ib_buffers =
418
realloc(cs->old_ib_buffers, max_num_old_ib_buffers * sizeof(*old_ib_buffers));
419
if (!old_ib_buffers) {
420
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
423
cs->max_num_old_ib_buffers = max_num_old_ib_buffers;
424
cs->old_ib_buffers = old_ib_buffers;
427
cs->old_ib_buffers[cs->num_old_ib_buffers].bo = cs->ib_buffer;
428
cs->old_ib_buffers[cs->num_old_ib_buffers++].cdw = cs->base.cdw;
430
uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
432
/* max that fits in the chain size field. */
433
ib_size = align(MIN2(ib_size, 0xfffff), ib_pad_dw_mask + 1);
436
cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0, radv_amdgpu_cs_domain(&cs->ws->base),
437
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
438
RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
439
RADV_BO_PRIORITY_CS, 0, &cs->ib_buffer);
441
if (result != VK_SUCCESS) {
443
cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
444
cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers].bo;
447
cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
448
if (!cs->ib_mapped) {
449
cs->ws->base.buffer_destroy(&cs->ws->base, cs->ib_buffer);
452
/* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
453
cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
454
cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers].bo;
457
cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
459
assert(hw_can_chain(cs->hw_ip)); /* TODO: Implement growing other queues if needed. */
461
radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
462
radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
463
radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
464
radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
466
cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
468
cs->base.buf = (uint32_t *)cs->ib_mapped;
470
cs->base.max_dw = ib_size / 4 - 4;
474
radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
476
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
477
enum ring_type ring_type = hw_ip_to_ring(cs->hw_ip);
480
uint32_t ib_pad_dw_mask = MAX2(3, cs->ws->info.ib_pad_dw_mask[ring_type]);
481
uint32_t nop_packet = get_nop_packet(cs);
483
if (hw_can_chain(cs->hw_ip)) {
484
/* Ensure that with the 4 dword reservation we subtract from max_dw we always
485
* have 4 nops at the end for chaining. */
486
while (!cs->base.cdw || (cs->base.cdw & ib_pad_dw_mask) != ib_pad_dw_mask - 3)
487
radeon_emit(&cs->base, nop_packet);
489
radeon_emit(&cs->base, nop_packet);
490
radeon_emit(&cs->base, nop_packet);
491
radeon_emit(&cs->base, nop_packet);
492
radeon_emit(&cs->base, nop_packet);
494
while (!cs->base.cdw || (cs->base.cdw & ib_pad_dw_mask))
495
radeon_emit(&cs->base, nop_packet);
498
*cs->ib_size_ptr |= cs->base.cdw;
500
cs->is_chained = false;
502
assert(cs->base.cdw <= cs->base.max_dw + 4);
509
radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
511
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
513
cs->status = VK_SUCCESS;
515
for (unsigned i = 0; i < cs->num_buffers; ++i) {
516
unsigned hash = cs->handles[i].bo_handle & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
517
cs->buffer_hash_table[hash] = -1;
520
for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
522
((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
523
cs->virtual_buffer_hash_table[hash] = -1;
527
cs->num_virtual_buffers = 0;
530
cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
532
for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
533
cs->ws->base.buffer_destroy(&cs->ws->base, cs->old_ib_buffers[i].bo);
535
cs->num_old_ib_buffers = 0;
536
cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
537
cs->ib_size_ptr = &cs->ib.size;
540
for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
541
struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
545
free(cs->old_cs_buffers);
546
cs->old_cs_buffers = NULL;
547
cs->num_old_cs_buffers = 0;
552
radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs, uint32_t bo)
554
unsigned hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
555
int index = cs->buffer_hash_table[hash];
560
if (cs->handles[index].bo_handle == bo)
563
for (unsigned i = 0; i < cs->num_buffers; ++i) {
564
if (cs->handles[i].bo_handle == bo) {
565
cs->buffer_hash_table[hash] = i;
574
radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs, uint32_t bo, uint8_t priority)
577
int index = radv_amdgpu_cs_find_buffer(cs, bo);
582
if (cs->num_buffers == cs->max_num_buffers) {
583
unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
584
struct drm_amdgpu_bo_list_entry *new_entries =
585
realloc(cs->handles, new_count * sizeof(struct drm_amdgpu_bo_list_entry));
587
cs->max_num_buffers = new_count;
588
cs->handles = new_entries;
590
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
595
cs->handles[cs->num_buffers].bo_handle = bo;
596
cs->handles[cs->num_buffers].bo_priority = priority;
598
hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
599
cs->buffer_hash_table[hash] = cs->num_buffers;
605
radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs, struct radeon_winsys_bo *bo)
607
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
608
unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
610
if (!cs->virtual_buffer_hash_table) {
611
int *virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
612
if (!virtual_buffer_hash_table) {
613
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
616
cs->virtual_buffer_hash_table = virtual_buffer_hash_table;
618
for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
619
cs->virtual_buffer_hash_table[i] = -1;
622
if (cs->virtual_buffer_hash_table[hash] >= 0) {
623
int idx = cs->virtual_buffer_hash_table[hash];
624
if (cs->virtual_buffers[idx] == bo) {
627
for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
628
if (cs->virtual_buffers[i] == bo) {
629
cs->virtual_buffer_hash_table[hash] = i;
635
if (cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
636
unsigned max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
637
struct radeon_winsys_bo **virtual_buffers =
638
realloc(cs->virtual_buffers, sizeof(struct radeon_winsys_bo *) * max_num_virtual_buffers);
639
if (!virtual_buffers) {
640
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
643
cs->max_num_virtual_buffers = max_num_virtual_buffers;
644
cs->virtual_buffers = virtual_buffers;
647
cs->virtual_buffers[cs->num_virtual_buffers] = bo;
649
cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
650
++cs->num_virtual_buffers;
654
radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs, struct radeon_winsys_bo *_bo)
656
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
657
struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
659
if (cs->status != VK_SUCCESS)
662
if (bo->is_virtual) {
663
radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
667
radv_amdgpu_cs_add_buffer_internal(cs, bo->bo_handle, bo->priority);
671
radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent, struct radeon_cmdbuf *_child,
674
struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
675
struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
676
struct radv_amdgpu_winsys *ws = parent->ws;
677
bool use_ib2 = parent->use_ib && allow_ib2;
679
if (parent->status != VK_SUCCESS || child->status != VK_SUCCESS)
682
for (unsigned i = 0; i < child->num_buffers; ++i) {
683
radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i].bo_handle,
684
child->handles[i].bo_priority);
687
for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
688
radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
692
if (parent->base.cdw + 4 > parent->base.max_dw)
693
radv_amdgpu_cs_grow(&parent->base, 4);
695
/* Not setting the CHAIN bit will launch an IB2. */
696
radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
697
radeon_emit(&parent->base, child->ib.ib_mc_address);
698
radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
699
radeon_emit(&parent->base, child->ib.size);
701
if (parent->use_ib) {
702
/* Copy and chain old IB buffers from the child to the parent IB. */
703
for (unsigned i = 0; i < child->num_old_ib_buffers; i++) {
704
struct radv_amdgpu_ib *ib = &child->old_ib_buffers[i];
707
if (parent->base.cdw + ib->cdw > parent->base.max_dw)
708
radv_amdgpu_cs_grow(&parent->base, ib->cdw);
710
mapped = ws->base.buffer_map(ib->bo);
712
parent->status = VK_ERROR_OUT_OF_HOST_MEMORY;
716
/* Copy the IB data without the original chain link. */
717
memcpy(parent->base.buf + parent->base.cdw, mapped, 4 * ib->cdw);
718
parent->base.cdw += ib->cdw;
721
/* When the secondary command buffer is huge we have to copy the list of CS buffers to the
722
* parent to submit multiple IBs.
724
if (child->num_old_cs_buffers > 0) {
725
unsigned num_cs_buffers;
728
/* Compute the total number of CS buffers needed. */
729
num_cs_buffers = parent->num_old_cs_buffers + child->num_old_cs_buffers + 1;
731
struct radeon_cmdbuf *old_cs_buffers =
732
realloc(parent->old_cs_buffers, num_cs_buffers * sizeof(*parent->old_cs_buffers));
733
if (!old_cs_buffers) {
734
parent->status = VK_ERROR_OUT_OF_HOST_MEMORY;
735
parent->base.cdw = 0;
738
parent->old_cs_buffers = old_cs_buffers;
740
/* Copy the parent CS to its list of CS buffers, so submission ordering is maintained. */
741
new_buf = malloc(parent->base.max_dw * 4);
743
parent->status = VK_ERROR_OUT_OF_HOST_MEMORY;
744
parent->base.cdw = 0;
747
memcpy(new_buf, parent->base.buf, parent->base.max_dw * 4);
749
parent->old_cs_buffers[parent->num_old_cs_buffers].cdw = parent->base.cdw;
750
parent->old_cs_buffers[parent->num_old_cs_buffers].max_dw = parent->base.max_dw;
751
parent->old_cs_buffers[parent->num_old_cs_buffers].buf = new_buf;
752
parent->num_old_cs_buffers++;
754
/* Then, copy all child CS buffers to the parent list. */
755
for (unsigned i = 0; i < child->num_old_cs_buffers; i++) {
756
new_buf = malloc(child->old_cs_buffers[i].max_dw * 4);
758
parent->status = VK_ERROR_OUT_OF_HOST_MEMORY;
759
parent->base.cdw = 0;
762
memcpy(new_buf, child->old_cs_buffers[i].buf, child->old_cs_buffers[i].max_dw * 4);
764
parent->old_cs_buffers[parent->num_old_cs_buffers].cdw = child->old_cs_buffers[i].cdw;
765
parent->old_cs_buffers[parent->num_old_cs_buffers].max_dw = child->old_cs_buffers[i].max_dw;
766
parent->old_cs_buffers[parent->num_old_cs_buffers].buf = new_buf;
767
parent->num_old_cs_buffers++;
770
/* Reset the parent CS before copying the child CS into it. */
771
parent->base.cdw = 0;
775
if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
776
radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
778
memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
779
parent->base.cdw += child->base.cdw;
784
radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws, struct radeon_cmdbuf **cs_array,
785
unsigned count, struct radv_amdgpu_winsys_bo **extra_bo_array,
786
unsigned num_extra_bo, struct radeon_cmdbuf *extra_cs,
787
unsigned *rnum_handles, struct drm_amdgpu_bo_list_entry **rhandles)
789
struct drm_amdgpu_bo_list_entry *handles = NULL;
790
unsigned num_handles = 0;
792
if (ws->debug_all_bos) {
793
handles = malloc(sizeof(handles[0]) * ws->global_bo_list.count);
795
return VK_ERROR_OUT_OF_HOST_MEMORY;
798
for (uint32_t i = 0; i < ws->global_bo_list.count; i++) {
799
handles[i].bo_handle = ws->global_bo_list.bos[i]->bo_handle;
800
handles[i].bo_priority = ws->global_bo_list.bos[i]->priority;
803
} else if (count == 1 && !num_extra_bo && !extra_cs &&
804
!radv_amdgpu_cs(cs_array[0])->num_virtual_buffers && !ws->global_bo_list.count) {
805
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)cs_array[0];
806
if (cs->num_buffers == 0)
809
handles = malloc(sizeof(handles[0]) * cs->num_buffers);
811
return VK_ERROR_OUT_OF_HOST_MEMORY;
813
memcpy(handles, cs->handles, sizeof(handles[0]) * cs->num_buffers);
814
num_handles = cs->num_buffers;
816
unsigned total_buffer_count = num_extra_bo;
817
num_handles = num_extra_bo;
818
for (unsigned i = 0; i < count; ++i) {
819
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)cs_array[i];
820
total_buffer_count += cs->num_buffers;
821
for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
822
total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
826
total_buffer_count += ((struct radv_amdgpu_cs *)extra_cs)->num_buffers;
829
total_buffer_count += ws->global_bo_list.count;
831
if (total_buffer_count == 0)
834
handles = malloc(sizeof(handles[0]) * total_buffer_count);
836
return VK_ERROR_OUT_OF_HOST_MEMORY;
838
for (unsigned i = 0; i < num_extra_bo; i++) {
839
handles[i].bo_handle = extra_bo_array[i]->bo_handle;
840
handles[i].bo_priority = extra_bo_array[i]->priority;
843
for (unsigned i = 0; i < count + !!extra_cs; ++i) {
844
struct radv_amdgpu_cs *cs;
847
cs = (struct radv_amdgpu_cs *)extra_cs;
849
cs = (struct radv_amdgpu_cs *)cs_array[i];
851
if (!cs->num_buffers)
854
if (num_handles == 0 && !cs->num_virtual_buffers) {
855
memcpy(handles, cs->handles, cs->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
856
num_handles = cs->num_buffers;
859
int unique_bo_so_far = num_handles;
860
for (unsigned j = 0; j < cs->num_buffers; ++j) {
862
for (unsigned k = 0; k < unique_bo_so_far; ++k) {
863
if (handles[k].bo_handle == cs->handles[j].bo_handle) {
869
handles[num_handles] = cs->handles[j];
873
for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
874
struct radv_amdgpu_winsys_bo *virtual_bo =
875
radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
876
for (unsigned k = 0; k < virtual_bo->bo_count; ++k) {
877
struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
879
for (unsigned m = 0; m < num_handles; ++m) {
880
if (handles[m].bo_handle == bo->bo_handle) {
886
handles[num_handles].bo_handle = bo->bo_handle;
887
handles[num_handles].bo_priority = bo->priority;
894
unsigned unique_bo_so_far = num_handles;
895
for (unsigned i = 0; i < ws->global_bo_list.count; ++i) {
896
struct radv_amdgpu_winsys_bo *bo = ws->global_bo_list.bos[i];
898
for (unsigned j = 0; j < unique_bo_so_far; ++j) {
899
if (bo->bo_handle == handles[j].bo_handle) {
905
handles[num_handles].bo_handle = bo->bo_handle;
906
handles[num_handles].bo_priority = bo->priority;
913
*rnum_handles = num_handles;
919
radv_assign_last_submit(struct radv_amdgpu_ctx *ctx, struct radv_amdgpu_cs_request *request)
921
radv_amdgpu_request_to_fence(ctx, &ctx->last_submission[request->ip_type][request->ring],
926
radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx, int queue_idx,
927
struct radv_winsys_sem_info *sem_info,
928
struct radeon_cmdbuf **cs_array, unsigned cs_count,
929
struct radeon_cmdbuf *initial_preamble_cs)
931
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
932
struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
933
struct radv_amdgpu_winsys *aws = cs0->ws;
934
struct drm_amdgpu_bo_list_entry *handles = NULL;
935
struct radv_amdgpu_cs_request request;
936
struct amdgpu_cs_ib_info ibs[2];
937
unsigned number_of_ibs = 1;
938
unsigned num_handles = 0;
941
for (unsigned i = cs_count; i--;) {
942
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
944
if (cs->is_chained) {
945
assert(cs->base.cdw <= cs->base.max_dw + 4);
946
assert(get_nop_packet(cs) == PKT3_NOP_PAD); /* Other shouldn't chain. */
948
cs->is_chained = false;
949
cs->base.buf[cs->base.cdw - 4] = PKT3_NOP_PAD;
950
cs->base.buf[cs->base.cdw - 3] = PKT3_NOP_PAD;
951
cs->base.buf[cs->base.cdw - 2] = PKT3_NOP_PAD;
952
cs->base.buf[cs->base.cdw - 1] = PKT3_NOP_PAD;
955
if (i + 1 < cs_count) {
956
struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
957
assert(cs->base.cdw <= cs->base.max_dw + 4);
958
assert(get_nop_packet(cs) == PKT3_NOP_PAD); /* Other shouldn't chain. */
960
cs->is_chained = true;
962
cs->base.buf[cs->base.cdw - 4] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
963
cs->base.buf[cs->base.cdw - 3] = next->ib.ib_mc_address;
964
cs->base.buf[cs->base.cdw - 2] = next->ib.ib_mc_address >> 32;
965
cs->base.buf[cs->base.cdw - 1] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
969
u_rwlock_rdlock(&aws->global_bo_list.lock);
971
/* Get the BO list. */
972
result = radv_amdgpu_get_bo_list(cs0->ws, cs_array, cs_count, NULL, 0, initial_preamble_cs,
973
&num_handles, &handles);
974
if (result != VK_SUCCESS)
977
/* Configure the CS request. */
978
if (initial_preamble_cs) {
979
ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
986
request.ip_type = cs0->hw_ip;
987
request.ip_instance = 0;
988
request.ring = queue_idx;
989
request.number_of_ibs = number_of_ibs;
991
request.handles = handles;
992
request.num_handles = num_handles;
995
result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
997
free(request.handles);
999
if (result != VK_SUCCESS)
1002
radv_assign_last_submit(ctx, &request);
1005
u_rwlock_rdunlock(&aws->global_bo_list.lock);
1010
radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx, int queue_idx,
1011
struct radv_winsys_sem_info *sem_info,
1012
struct radeon_cmdbuf **cs_array, unsigned cs_count,
1013
struct radeon_cmdbuf *initial_preamble_cs)
1015
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1016
struct drm_amdgpu_bo_list_entry *handles = NULL;
1017
struct radv_amdgpu_cs_request request;
1018
struct amdgpu_cs_ib_info *ibs;
1019
struct radv_amdgpu_cs *cs0;
1020
struct radv_amdgpu_winsys *aws;
1021
unsigned num_handles = 0;
1022
unsigned number_of_ibs;
1026
cs0 = radv_amdgpu_cs(cs_array[0]);
1029
/* Compute the number of IBs for this submit. */
1030
number_of_ibs = cs_count + !!initial_preamble_cs;
1032
u_rwlock_rdlock(&aws->global_bo_list.lock);
1034
/* Get the BO list. */
1035
result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0, initial_preamble_cs,
1036
&num_handles, &handles);
1037
if (result != VK_SUCCESS) {
1041
ibs = malloc(number_of_ibs * sizeof(*ibs));
1044
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1048
/* Configure the CS request. */
1049
if (initial_preamble_cs)
1050
ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
1052
for (unsigned i = 0; i < cs_count; i++) {
1053
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1055
ibs[i + !!initial_preamble_cs] = cs->ib;
1057
if (cs->is_chained) {
1058
assert(get_nop_packet(cs) == PKT3_NOP_PAD); /* Other shouldn't chain. */
1060
cs->base.buf[cs->base.cdw - 4] = PKT3_NOP_PAD;
1061
cs->base.buf[cs->base.cdw - 3] = PKT3_NOP_PAD;
1062
cs->base.buf[cs->base.cdw - 2] = PKT3_NOP_PAD;
1063
cs->base.buf[cs->base.cdw - 1] = PKT3_NOP_PAD;
1064
cs->is_chained = false;
1068
request.ip_type = cs0->hw_ip;
1069
request.ip_instance = 0;
1070
request.ring = queue_idx;
1071
request.handles = handles;
1072
request.num_handles = num_handles;
1073
request.number_of_ibs = number_of_ibs;
1076
/* Submit the CS. */
1077
result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1079
free(request.handles);
1082
if (result != VK_SUCCESS)
1085
radv_assign_last_submit(ctx, &request);
1088
u_rwlock_rdunlock(&aws->global_bo_list.lock);
1093
radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, int queue_idx,
1094
struct radv_winsys_sem_info *sem_info,
1095
struct radeon_cmdbuf **cs_array, unsigned cs_count,
1096
struct radeon_cmdbuf *initial_preamble_cs,
1097
struct radeon_cmdbuf *continue_preamble_cs)
1099
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1100
struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
1101
struct radeon_winsys *ws = (struct radeon_winsys *)cs0->ws;
1102
struct radv_amdgpu_winsys *aws = cs0->ws;
1103
struct radv_amdgpu_cs_request request;
1104
uint32_t pad_word = get_nop_packet(cs0);
1105
enum ring_type ring_type = hw_ip_to_ring(cs0->hw_ip);
1106
uint32_t ib_pad_dw_mask = cs0->ws->info.ib_pad_dw_mask[ring_type];
1107
bool emit_signal_sem = sem_info->cs_emit_signal;
1112
for (unsigned i = 0; i < cs_count;) {
1113
struct amdgpu_cs_ib_info *ibs;
1114
struct radeon_winsys_bo **bos;
1115
struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
1116
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1117
struct drm_amdgpu_bo_list_entry *handles = NULL;
1118
unsigned num_handles = 0;
1119
unsigned number_of_ibs;
1123
/* Compute the number of IBs for this submit. */
1124
number_of_ibs = cs->num_old_cs_buffers + 1;
1126
ibs = malloc(number_of_ibs * sizeof(*ibs));
1128
return VK_ERROR_OUT_OF_HOST_MEMORY;
1130
bos = malloc(number_of_ibs * sizeof(*bos));
1133
return VK_ERROR_OUT_OF_HOST_MEMORY;
1136
if (number_of_ibs > 1) {
1137
/* Special path when the maximum size in dwords has
1138
* been reached because we need to handle more than one
1141
struct radeon_cmdbuf **new_cs_array;
1144
new_cs_array = malloc(number_of_ibs * sizeof(*new_cs_array));
1145
assert(new_cs_array);
1147
for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
1148
new_cs_array[idx++] = &cs->old_cs_buffers[j];
1149
new_cs_array[idx++] = cs_array[i];
1151
for (unsigned j = 0; j < number_of_ibs; j++) {
1152
struct radeon_cmdbuf *rcs = new_cs_array[j];
1153
bool needs_preamble = preamble_cs && j == 0;
1154
unsigned pad_words = 0;
1158
size += preamble_cs->cdw;
1161
assert(size < GFX6_MAX_CS_SIZE);
1163
while (!size || (size & ib_pad_dw_mask)) {
1169
ws, 4 * size, 4096, radv_amdgpu_cs_domain(ws),
1170
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY |
1171
RADEON_FLAG_GTT_WC, RADV_BO_PRIORITY_CS, 0, &bos[j]);
1172
ptr = ws->buffer_map(bos[j]);
1174
if (needs_preamble) {
1175
memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1176
ptr += preamble_cs->cdw;
1179
memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1182
for (unsigned k = 0; k < pad_words; ++k)
1186
ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1193
unsigned pad_words = 0;
1197
size += preamble_cs->cdw;
1199
while (i + cnt < cs_count &&
1200
GFX6_MAX_CS_SIZE - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1201
size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1205
while (!size || (size & ib_pad_dw_mask)) {
1212
ws, 4 * size, 4096, radv_amdgpu_cs_domain(ws),
1213
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY |
1214
RADEON_FLAG_GTT_WC, RADV_BO_PRIORITY_CS, 0, &bos[0]);
1215
ptr = ws->buffer_map(bos[0]);
1218
memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1219
ptr += preamble_cs->cdw;
1222
for (unsigned j = 0; j < cnt; ++j) {
1223
struct radv_amdgpu_cs *cs2 = radv_amdgpu_cs(cs_array[i + j]);
1224
memcpy(ptr, cs2->base.buf, 4 * cs2->base.cdw);
1225
ptr += cs2->base.cdw;
1228
for (unsigned j = 0; j < pad_words; ++j)
1232
ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1236
u_rwlock_rdlock(&aws->global_bo_list.lock);
1239
radv_amdgpu_get_bo_list(cs0->ws, &cs_array[i], cnt, (struct radv_amdgpu_winsys_bo **)bos,
1240
number_of_ibs, preamble_cs, &num_handles, &handles);
1241
if (result != VK_SUCCESS) {
1244
u_rwlock_rdunlock(&aws->global_bo_list.lock);
1248
request.ip_type = cs0->hw_ip;
1249
request.ip_instance = 0;
1250
request.ring = queue_idx;
1251
request.handles = handles;
1252
request.num_handles = num_handles;
1253
request.number_of_ibs = number_of_ibs;
1256
sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1257
result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1259
free(request.handles);
1260
u_rwlock_rdunlock(&aws->global_bo_list.lock);
1262
for (unsigned j = 0; j < number_of_ibs; j++) {
1263
ws->buffer_destroy(ws, bos[j]);
1269
if (result != VK_SUCCESS)
1275
radv_assign_last_submit(ctx, &request);
1281
radv_amdgpu_cs_submit_zero(struct radv_amdgpu_ctx *ctx, enum ring_type ring_type, int queue_idx,
1282
struct radv_winsys_sem_info *sem_info)
1284
unsigned hw_ip = ring_to_hw_ip(ring_type);
1285
unsigned queue_syncobj = radv_amdgpu_ctx_queue_syncobj(ctx, hw_ip, queue_idx);
1289
return VK_ERROR_OUT_OF_HOST_MEMORY;
1291
if (sem_info->wait.syncobj_count || sem_info->wait.timeline_syncobj_count) {
1293
ret = amdgpu_cs_syncobj_export_sync_file(ctx->ws->dev, queue_syncobj, &fd);
1295
return VK_ERROR_DEVICE_LOST;
1297
for (unsigned i = 0; i < sem_info->wait.syncobj_count; ++i) {
1299
ret = amdgpu_cs_syncobj_export_sync_file(ctx->ws->dev, sem_info->wait.syncobj[i], &fd2);
1302
return VK_ERROR_DEVICE_LOST;
1305
sync_accumulate("radv", &fd, fd2);
1308
for (unsigned i = 0; i < sem_info->wait.timeline_syncobj_count; ++i) {
1310
ret = amdgpu_cs_syncobj_export_sync_file2(
1311
ctx->ws->dev, sem_info->wait.syncobj[i + sem_info->wait.syncobj_count],
1312
sem_info->wait.points[i], 0, &fd2);
1314
/* This works around a kernel bug where the fence isn't copied if it is already
1315
* signalled. Since it is already signalled it is totally fine to not wait on it.
1317
* kernel patch: https://patchwork.freedesktop.org/patch/465583/ */
1319
ret = amdgpu_cs_syncobj_query2(
1320
ctx->ws->dev, &sem_info->wait.syncobj[i + sem_info->wait.syncobj_count], &point, 1,
1322
if (!ret && point >= sem_info->wait.points[i])
1326
return VK_ERROR_DEVICE_LOST;
1329
sync_accumulate("radv", &fd, fd2);
1332
ret = amdgpu_cs_syncobj_import_sync_file(ctx->ws->dev, queue_syncobj, fd);
1335
return VK_ERROR_DEVICE_LOST;
1337
ctx->queue_syncobj_wait[hw_ip][queue_idx] = true;
1340
for (unsigned i = 0; i < sem_info->signal.syncobj_count; ++i) {
1341
uint32_t dst_handle = sem_info->signal.syncobj[i];
1342
uint32_t src_handle = queue_syncobj;
1344
if (ctx->ws->info.has_timeline_syncobj) {
1345
ret = amdgpu_cs_syncobj_transfer(ctx->ws->dev, dst_handle, 0, src_handle, 0, 0);
1347
return VK_ERROR_DEVICE_LOST;
1350
ret = amdgpu_cs_syncobj_export_sync_file(ctx->ws->dev, src_handle, &fd);
1352
return VK_ERROR_DEVICE_LOST;
1354
ret = amdgpu_cs_syncobj_import_sync_file(ctx->ws->dev, dst_handle, fd);
1357
return VK_ERROR_DEVICE_LOST;
1360
for (unsigned i = 0; i < sem_info->signal.timeline_syncobj_count; ++i) {
1361
ret = amdgpu_cs_syncobj_transfer(ctx->ws->dev,
1362
sem_info->signal.syncobj[i + sem_info->signal.syncobj_count],
1363
sem_info->signal.points[i], queue_syncobj, 0, 0);
1365
return VK_ERROR_DEVICE_LOST;
1371
radv_amdgpu_winsys_cs_submit_internal(struct radeon_winsys_ctx *_ctx, enum ring_type ring_type,
1372
int queue_idx, struct radeon_cmdbuf **cs_array,
1373
unsigned cs_count, struct radeon_cmdbuf *initial_preamble_cs,
1374
struct radeon_cmdbuf *continue_preamble_cs,
1375
struct radv_winsys_sem_info *sem_info, bool can_patch)
1377
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1382
result = radv_amdgpu_cs_submit_zero(ctx, ring_type, queue_idx, sem_info);
1383
} else if (!ring_can_use_ib_bos(ctx->ws, ring_type)) {
1384
result = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, cs_array, cs_count,
1385
initial_preamble_cs, continue_preamble_cs);
1386
} else if (can_patch) {
1387
result = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, cs_array, cs_count,
1388
initial_preamble_cs);
1390
result = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, cs_array, cs_count,
1391
initial_preamble_cs);
1398
radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx, enum ring_type ring_type,
1399
int queue_idx, struct radeon_cmdbuf **cs_array, unsigned cs_count,
1400
struct radeon_cmdbuf *initial_preamble_cs,
1401
struct radeon_cmdbuf *continue_preamble_cs, uint32_t wait_count,
1402
const struct vk_sync_wait *waits, uint32_t signal_count,
1403
const struct vk_sync_signal *signals, bool can_patch)
1405
struct radv_amdgpu_winsys *ws = radv_amdgpu_ctx(_ctx)->ws;
1406
struct radv_winsys_sem_info sem_info;
1407
memset(&sem_info, 0, sizeof(sem_info));
1409
unsigned wait_idx = 0, signal_idx = 0;
1411
STACK_ARRAY(uint64_t, wait_points, wait_count);
1412
STACK_ARRAY(uint32_t, wait_syncobj, wait_count);
1413
STACK_ARRAY(uint64_t, signal_points, signal_count);
1414
STACK_ARRAY(uint32_t, signal_syncobj, signal_count);
1416
if (!wait_points || !wait_syncobj || !signal_points || !signal_syncobj) {
1417
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1421
sem_info.wait.points = wait_points;
1422
sem_info.wait.syncobj = wait_syncobj;
1423
sem_info.signal.points = signal_points;
1424
sem_info.signal.syncobj = signal_syncobj;
1426
for (uint32_t i = 0; i < wait_count; ++i) {
1427
if (waits[i].sync->type == &vk_sync_dummy_type)
1430
assert(waits[i].sync->type == &ws->syncobj_sync_type);
1431
sem_info.wait.syncobj[wait_idx] = ((struct vk_drm_syncobj *)waits[i].sync)->syncobj;
1432
sem_info.wait.points[wait_idx] = waits[i].wait_value;
1436
for (uint32_t i = 0; i < signal_count; ++i) {
1437
if (signals[i].sync->type == &vk_sync_dummy_type)
1440
assert(signals[i].sync->type == &ws->syncobj_sync_type);
1441
sem_info.signal.syncobj[signal_idx] = ((struct vk_drm_syncobj *)signals[i].sync)->syncobj;
1442
sem_info.signal.points[signal_idx] = signals[i].signal_value;
1446
assert(signal_idx <= signal_count);
1447
assert(wait_idx <= wait_count);
1448
sem_info.wait.timeline_syncobj_count =
1449
(ws->syncobj_sync_type.features & VK_SYNC_FEATURE_TIMELINE) ? wait_idx : 0;
1450
sem_info.wait.syncobj_count = wait_idx - sem_info.wait.timeline_syncobj_count;
1451
sem_info.cs_emit_wait = true;
1453
sem_info.signal.timeline_syncobj_count =
1454
(ws->syncobj_sync_type.features & VK_SYNC_FEATURE_TIMELINE) ? signal_idx : 0;
1455
sem_info.signal.syncobj_count = signal_idx - sem_info.signal.timeline_syncobj_count;
1456
sem_info.cs_emit_signal = true;
1458
result = radv_amdgpu_winsys_cs_submit_internal(_ctx, ring_type, queue_idx, cs_array, cs_count,
1459
initial_preamble_cs, continue_preamble_cs,
1460
&sem_info, can_patch);
1463
STACK_ARRAY_FINISH(wait_points);
1464
STACK_ARRAY_FINISH(wait_syncobj);
1465
STACK_ARRAY_FINISH(signal_points);
1466
STACK_ARRAY_FINISH(signal_syncobj);
1471
radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1473
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1478
for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1479
struct radv_amdgpu_winsys_bo *bo;
1481
bo = (struct radv_amdgpu_winsys_bo *)(i == cs->num_old_ib_buffers ? cs->ib_buffer
1482
: cs->old_ib_buffers[i].bo);
1483
if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1484
if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1485
return (char *)ret + (addr - bo->base.va);
1488
u_rwlock_rdlock(&cs->ws->global_bo_list.lock);
1489
for (uint32_t i = 0; i < cs->ws->global_bo_list.count; i++) {
1490
struct radv_amdgpu_winsys_bo *bo = cs->ws->global_bo_list.bos[i];
1491
if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1492
if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1493
u_rwlock_rdunlock(&cs->ws->global_bo_list.lock);
1494
return (char *)ret + (addr - bo->base.va);
1498
u_rwlock_rdunlock(&cs->ws->global_bo_list.lock);
1504
radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs, FILE *file, const int *trace_ids,
1507
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1508
void *ib = cs->base.buf;
1509
int num_dw = cs->base.cdw;
1512
ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1513
num_dw = cs->ib.size;
1516
ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB", cs->ws->info.chip_class,
1517
radv_amdgpu_winsys_get_cpu_addr, cs);
1521
radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1523
switch (radv_priority) {
1524
case RADEON_CTX_PRIORITY_REALTIME:
1525
return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1526
case RADEON_CTX_PRIORITY_HIGH:
1527
return AMDGPU_CTX_PRIORITY_HIGH;
1528
case RADEON_CTX_PRIORITY_MEDIUM:
1529
return AMDGPU_CTX_PRIORITY_NORMAL;
1530
case RADEON_CTX_PRIORITY_LOW:
1531
return AMDGPU_CTX_PRIORITY_LOW;
1533
unreachable("Invalid context priority");
1538
radv_amdgpu_ctx_create(struct radeon_winsys *_ws, enum radeon_ctx_priority priority,
1539
struct radeon_winsys_ctx **rctx)
1541
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1542
struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1543
uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1548
return VK_ERROR_OUT_OF_HOST_MEMORY;
1550
r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1551
if (r && r == -EACCES) {
1552
result = VK_ERROR_NOT_PERMITTED_EXT;
1555
fprintf(stderr, "radv/amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1556
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1561
assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1562
result = ws->base.buffer_create(&ws->base, 4096, 8, RADEON_DOMAIN_GTT,
1563
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING,
1564
RADV_BO_PRIORITY_CS, 0, &ctx->fence_bo);
1565
if (result != VK_SUCCESS) {
1569
*rctx = (struct radeon_winsys_ctx *)ctx;
1573
amdgpu_cs_ctx_free(ctx->ctx);
1580
radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1582
struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1584
for (unsigned ip = 0; ip <= AMDGPU_HW_IP_NUM; ++ip) {
1585
for (unsigned ring = 0; ring < MAX_RINGS_PER_TYPE; ++ring) {
1586
if (ctx->queue_syncobj[ip][ring])
1587
amdgpu_cs_destroy_syncobj(ctx->ws->dev, ctx->queue_syncobj[ip][ring]);
1591
ctx->ws->base.buffer_destroy(&ctx->ws->base, ctx->fence_bo);
1592
amdgpu_cs_ctx_free(ctx->ctx);
1597
radv_amdgpu_ctx_queue_syncobj(struct radv_amdgpu_ctx *ctx, unsigned ip, unsigned ring)
1599
uint32_t *syncobj = &ctx->queue_syncobj[ip][ring];
1601
amdgpu_cs_create_syncobj2(ctx->ws->dev, DRM_SYNCOBJ_CREATE_SIGNALED, syncobj);
1607
radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx, enum ring_type ring_type, int ring_index)
1609
struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1610
int ip_type = ring_to_hw_ip(ring_type);
1612
if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1614
int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1615
1000000000ull, 0, &expired);
1617
if (ret || !expired)
1625
radv_to_amdgpu_pstate(enum radeon_ctx_pstate radv_pstate)
1627
switch (radv_pstate) {
1628
case RADEON_CTX_PSTATE_NONE:
1629
return AMDGPU_CTX_STABLE_PSTATE_NONE;
1630
case RADEON_CTX_PSTATE_STANDARD:
1631
return AMDGPU_CTX_STABLE_PSTATE_STANDARD;
1632
case RADEON_CTX_PSTATE_MIN_SCLK:
1633
return AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
1634
case RADEON_CTX_PSTATE_MIN_MCLK:
1635
return AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
1636
case RADEON_CTX_PSTATE_PEAK:
1637
return AMDGPU_CTX_STABLE_PSTATE_PEAK;
1639
unreachable("Invalid pstate");
1644
radv_amdgpu_ctx_set_pstate(struct radeon_winsys_ctx *rwctx, enum radeon_ctx_pstate pstate)
1646
struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1647
uint32_t amdgpu_pstate = radv_to_amdgpu_pstate(pstate);
1648
return amdgpu_cs_ctx_stable_pstate(ctx->ctx, AMDGPU_CTX_OP_SET_STABLE_PSTATE, amdgpu_pstate, NULL);
1652
radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts, uint32_t queue_syncobj,
1653
struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1655
unsigned count = counts->syncobj_count + (queue_syncobj ? 1 : 0);
1656
struct drm_amdgpu_cs_chunk_sem *syncobj =
1657
malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * count);
1661
for (unsigned i = 0; i < counts->syncobj_count; i++) {
1662
struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1663
sem->handle = counts->syncobj[i];
1667
syncobj[counts->syncobj_count].handle = queue_syncobj;
1669
chunk->chunk_id = chunk_id;
1670
chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * count;
1671
chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1676
radv_amdgpu_cs_alloc_timeline_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1677
uint32_t queue_syncobj,
1678
struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1681
counts->syncobj_count + counts->timeline_syncobj_count + (queue_syncobj ? 1 : 0);
1682
struct drm_amdgpu_cs_chunk_syncobj *syncobj =
1683
malloc(sizeof(struct drm_amdgpu_cs_chunk_syncobj) * count);
1687
for (unsigned i = 0; i < counts->syncobj_count; i++) {
1688
struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i];
1689
sem->handle = counts->syncobj[i];
1694
for (unsigned i = 0; i < counts->timeline_syncobj_count; i++) {
1695
struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i + counts->syncobj_count];
1696
sem->handle = counts->syncobj[i + counts->syncobj_count];
1697
sem->flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
1698
sem->point = counts->points[i];
1701
if (queue_syncobj) {
1702
syncobj[count - 1].handle = queue_syncobj;
1703
syncobj[count - 1].flags = 0;
1704
syncobj[count - 1].point = 0;
1707
chunk->chunk_id = chunk_id;
1708
chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_syncobj) / 4 * count;
1709
chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1714
radv_amdgpu_cs_has_user_fence(struct radv_amdgpu_cs_request *request)
1716
return request->ip_type != AMDGPU_HW_IP_UVD &&
1717
request->ip_type != AMDGPU_HW_IP_VCE &&
1718
request->ip_type != AMDGPU_HW_IP_UVD_ENC &&
1719
request->ip_type != AMDGPU_HW_IP_VCN_DEC &&
1720
request->ip_type != AMDGPU_HW_IP_VCN_ENC &&
1721
request->ip_type != AMDGPU_HW_IP_VCN_JPEG;
1725
radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx, struct radv_amdgpu_cs_request *request,
1726
struct radv_winsys_sem_info *sem_info)
1731
struct drm_amdgpu_cs_chunk *chunks;
1732
struct drm_amdgpu_cs_chunk_data *chunk_data;
1733
bool use_bo_list_create = ctx->ws->info.drm_minor < 27;
1734
struct drm_amdgpu_bo_list_in bo_list_in;
1735
void *wait_syncobj = NULL, *signal_syncobj = NULL;
1737
uint32_t bo_list = 0;
1738
VkResult result = VK_SUCCESS;
1739
bool has_user_fence = radv_amdgpu_cs_has_user_fence(request);
1740
uint32_t queue_syncobj = radv_amdgpu_ctx_queue_syncobj(ctx, request->ip_type, request->ring);
1741
bool *queue_syncobj_wait = &ctx->queue_syncobj_wait[request->ip_type][request->ring];
1744
return VK_ERROR_OUT_OF_HOST_MEMORY;
1746
size = request->number_of_ibs + 1 + (has_user_fence ? 1 : 0) + (!use_bo_list_create ? 1 : 0) + 3;
1748
chunks = malloc(sizeof(chunks[0]) * size);
1750
return VK_ERROR_OUT_OF_HOST_MEMORY;
1752
size = request->number_of_ibs + (has_user_fence ? 1 : 0);
1754
chunk_data = malloc(sizeof(chunk_data[0]) * size);
1756
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1760
num_chunks = request->number_of_ibs;
1761
for (i = 0; i < request->number_of_ibs; i++) {
1762
struct amdgpu_cs_ib_info *ib;
1763
chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1764
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1765
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1767
ib = &request->ibs[i];
1769
chunk_data[i].ib_data._pad = 0;
1770
chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1771
chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1772
chunk_data[i].ib_data.ip_type = request->ip_type;
1773
chunk_data[i].ib_data.ip_instance = request->ip_instance;
1774
chunk_data[i].ib_data.ring = request->ring;
1775
chunk_data[i].ib_data.flags = ib->flags;
1778
if (has_user_fence) {
1780
chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1781
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1782
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1784
struct amdgpu_cs_fence_info fence_info;
1785
fence_info.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
1786
fence_info.offset = (request->ip_type * MAX_RINGS_PER_TYPE + request->ring) * sizeof(uint64_t);
1787
amdgpu_cs_chunk_fence_info_to_data(&fence_info, &chunk_data[i]);
1790
if (sem_info->cs_emit_wait && (sem_info->wait.timeline_syncobj_count ||
1791
sem_info->wait.syncobj_count || *queue_syncobj_wait)) {
1793
if (ctx->ws->info.has_timeline_syncobj) {
1794
wait_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(
1795
&sem_info->wait, queue_syncobj, &chunks[num_chunks],
1796
AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT);
1798
wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(
1799
&sem_info->wait, queue_syncobj, &chunks[num_chunks], AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1801
if (!wait_syncobj) {
1802
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1807
sem_info->cs_emit_wait = false;
1808
*queue_syncobj_wait = false;
1811
if (sem_info->cs_emit_signal) {
1812
if (ctx->ws->info.has_timeline_syncobj) {
1813
signal_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(
1814
&sem_info->signal, queue_syncobj, &chunks[num_chunks],
1815
AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL);
1817
signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(
1818
&sem_info->signal, queue_syncobj, &chunks[num_chunks], AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1820
if (!signal_syncobj) {
1821
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1827
if (use_bo_list_create) {
1828
/* Legacy path creating the buffer list handle and passing it
1831
r = amdgpu_bo_list_create_raw(ctx->ws->dev, request->num_handles,
1832
request->handles, &bo_list);
1835
fprintf(stderr, "radv/amdgpu: Not enough memory for buffer list creation.\n");
1836
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1838
fprintf(stderr, "radv/amdgpu: buffer list creation failed (%d).\n", r);
1839
result = VK_ERROR_UNKNOWN;
1844
/* Standard path passing the buffer list via the CS ioctl. */
1845
bo_list_in.operation = ~0;
1846
bo_list_in.list_handle = ~0;
1847
bo_list_in.bo_number = request->num_handles;
1848
bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
1849
bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)request->handles;
1851
chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
1852
chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
1853
chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
1857
r = amdgpu_cs_submit_raw2(ctx->ws->dev, ctx->ctx, bo_list, num_chunks, chunks, &request->seq_no);
1861
fprintf(stderr, "radv/amdgpu: Not enough memory for command submission.\n");
1862
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1863
} else if (r == -ECANCELED) {
1864
fprintf(stderr, "radv/amdgpu: The CS has been cancelled because the context is lost.\n");
1865
result = VK_ERROR_DEVICE_LOST;
1868
"amdgpu: The CS has been rejected, "
1869
"see dmesg for more information (%i).\n",
1871
result = VK_ERROR_UNKNOWN;
1876
amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
1882
free(signal_syncobj);
1887
radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1889
ws->base.ctx_create = radv_amdgpu_ctx_create;
1890
ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1891
ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1892
ws->base.ctx_set_pstate = radv_amdgpu_ctx_set_pstate;
1893
ws->base.cs_domain = radv_amdgpu_cs_domain;
1894
ws->base.cs_create = radv_amdgpu_cs_create;
1895
ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1896
ws->base.cs_grow = radv_amdgpu_cs_grow;
1897
ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1898
ws->base.cs_reset = radv_amdgpu_cs_reset;
1899
ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1900
ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1901
ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1902
ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;