2
* Copyright © 2016 Red Hat.
3
* Copyright © 2016 Bas Nieuwenhuizen
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27
#include "drm-uapi/amdgpu_drm.h"
32
#include "util/u_memory.h"
34
#include "radv_radeon_winsys.h"
35
#include "radv_amdgpu_cs.h"
36
#include "radv_amdgpu_bo.h"
41
VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
44
struct radv_amdgpu_cs {
45
struct radeon_cmdbuf base;
46
struct radv_amdgpu_winsys *ws;
48
struct amdgpu_cs_ib_info ib;
50
struct radeon_winsys_bo *ib_buffer;
52
unsigned max_num_buffers;
54
struct drm_amdgpu_bo_list_entry *handles;
56
struct radeon_winsys_bo **old_ib_buffers;
57
unsigned num_old_ib_buffers;
58
unsigned max_num_old_ib_buffers;
59
unsigned *ib_size_ptr;
63
int buffer_hash_table[1024];
66
unsigned num_virtual_buffers;
67
unsigned max_num_virtual_buffers;
68
struct radeon_winsys_bo **virtual_buffers;
69
int *virtual_buffer_hash_table;
71
/* For chips that don't support chaining. */
72
struct radeon_cmdbuf *old_cs_buffers;
73
unsigned num_old_cs_buffers;
76
static inline struct radv_amdgpu_cs *
77
radv_amdgpu_cs(struct radeon_cmdbuf *base)
79
return (struct radv_amdgpu_cs*)base;
82
static int ring_to_hw_ip(enum ring_type ring)
86
return AMDGPU_HW_IP_GFX;
88
return AMDGPU_HW_IP_DMA;
90
return AMDGPU_HW_IP_COMPUTE;
92
unreachable("unsupported ring");
96
struct radv_amdgpu_cs_request {
97
/** Specify flags with additional information */
100
/** Specify HW IP block type to which to send the IB. */
103
/** IP instance index if there are several IPs of the same type. */
104
unsigned ip_instance;
107
* Specify ring index of the IP. We could have several rings
108
* in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
113
* BO list handles used by this request.
115
struct drm_amdgpu_bo_list_entry *handles;
116
uint32_t num_handles;
119
* Number of dependencies this Command submission needs to
120
* wait for before starting execution.
122
uint32_t number_of_dependencies;
125
* Array of dependencies which need to be met before
126
* execution can start.
128
struct amdgpu_cs_fence *dependencies;
130
/** Number of IBs to submit in the field ibs. */
131
uint32_t number_of_ibs;
134
* IBs to submit. Those IBs will be submit together as single entity
136
struct amdgpu_cs_ib_info *ibs;
139
* The returned sequence number for the command submission
144
* The fence information
146
struct amdgpu_cs_fence_info fence_info;
150
static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
153
struct radv_winsys_sem_info *sem_info);
154
static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
155
struct radv_amdgpu_cs_request *request,
156
struct radv_winsys_sem_info *sem_info);
158
static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
159
struct radv_amdgpu_fence *fence,
160
struct radv_amdgpu_cs_request *req)
162
fence->fence.context = ctx->ctx;
163
fence->fence.ip_type = req->ip_type;
164
fence->fence.ip_instance = req->ip_instance;
165
fence->fence.ring = req->ring;
166
fence->fence.fence = req->seq_no;
167
fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + req->ip_type * MAX_RINGS_PER_TYPE + req->ring);
170
static struct radeon_winsys_fence *radv_amdgpu_create_fence()
172
struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
176
fence->fence.fence = UINT64_MAX;
177
return (struct radeon_winsys_fence*)fence;
180
static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
182
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
186
static void radv_amdgpu_reset_fence(struct radeon_winsys_fence *_fence)
188
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
189
fence->fence.fence = UINT64_MAX;
192
static void radv_amdgpu_signal_fence(struct radeon_winsys_fence *_fence)
194
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
195
fence->fence.fence = 0;
198
static bool radv_amdgpu_is_fence_waitable(struct radeon_winsys_fence *_fence)
200
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
201
return fence->fence.fence < UINT64_MAX;
204
static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
205
struct radeon_winsys_fence *_fence,
209
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
210
unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
212
uint32_t expired = 0;
214
/* Special casing 0 and UINT64_MAX so that they work without user_ptr/fence.ctx */
215
if (fence->fence.fence == UINT64_MAX)
218
if (fence->fence.fence == 0)
221
if (fence->user_ptr) {
222
if (*fence->user_ptr >= fence->fence.fence)
224
if (!absolute && !timeout)
228
/* Now use the libdrm query. */
229
r = amdgpu_cs_query_fence_status(&fence->fence,
235
fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
246
static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
247
struct radeon_winsys_fence *const *_fences,
248
uint32_t fence_count,
252
struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
254
uint32_t expired = 0, first = 0;
259
for (uint32_t i = 0; i < fence_count; ++i)
260
fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
262
/* Now use the libdrm query. */
263
r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
264
timeout, &expired, &first);
268
fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
278
static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
280
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
283
cs->ws->base.buffer_destroy(cs->ib_buffer);
287
for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
288
cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
290
for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
291
free(cs->old_cs_buffers[i].buf);
294
free(cs->old_cs_buffers);
295
free(cs->old_ib_buffers);
296
free(cs->virtual_buffers);
297
free(cs->virtual_buffer_hash_table);
302
static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
303
enum ring_type ring_type)
305
for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
306
cs->buffer_hash_table[i] = -1;
308
cs->hw_ip = ring_to_hw_ip(ring_type);
311
static struct radeon_cmdbuf *
312
radv_amdgpu_cs_create(struct radeon_winsys *ws,
313
enum ring_type ring_type)
315
struct radv_amdgpu_cs *cs;
316
uint32_t ib_size = 20 * 1024 * 4;
317
cs = calloc(1, sizeof(struct radv_amdgpu_cs));
321
cs->ws = radv_amdgpu_winsys(ws);
322
radv_amdgpu_init_cs(cs, ring_type);
324
if (cs->ws->use_ib_bos) {
325
cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
326
cs->ws->cs_bo_domain,
327
RADEON_FLAG_CPU_ACCESS |
328
RADEON_FLAG_NO_INTERPROCESS_SHARING |
329
RADEON_FLAG_READ_ONLY |
331
RADV_BO_PRIORITY_CS);
332
if (!cs->ib_buffer) {
337
cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
338
if (!cs->ib_mapped) {
339
ws->buffer_destroy(cs->ib_buffer);
344
cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
345
cs->base.buf = (uint32_t *)cs->ib_mapped;
346
cs->base.max_dw = ib_size / 4 - 4;
347
cs->ib_size_ptr = &cs->ib.size;
350
ws->cs_add_buffer(&cs->base, cs->ib_buffer);
352
uint32_t *buf = malloc(16384);
358
cs->base.max_dw = 4096;
364
static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
366
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
368
if (cs->status != VK_SUCCESS) {
373
if (!cs->ws->use_ib_bos) {
374
const uint64_t limit_dws = 0xffff8;
375
uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
376
MIN2(cs->base.max_dw * 2, limit_dws));
378
/* The total ib size cannot exceed limit_dws dwords. */
379
if (ib_dws > limit_dws)
381
/* The maximum size in dwords has been reached,
382
* try to allocate a new one.
384
struct radeon_cmdbuf *old_cs_buffers =
385
realloc(cs->old_cs_buffers,
386
(cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
387
if (!old_cs_buffers) {
388
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
392
cs->old_cs_buffers = old_cs_buffers;
394
/* Store the current one for submitting it later. */
395
cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
396
cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
397
cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
398
cs->num_old_cs_buffers++;
400
/* Reset the cs, it will be re-allocated below. */
404
/* Re-compute the number of dwords to allocate. */
405
ib_dws = MAX2(cs->base.cdw + min_size,
406
MIN2(cs->base.max_dw * 2, limit_dws));
407
if (ib_dws > limit_dws) {
408
fprintf(stderr, "amdgpu: Too high number of "
409
"dwords to allocate\n");
410
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
415
uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
417
cs->base.buf = new_buf;
418
cs->base.max_dw = ib_dws;
420
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
426
uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
428
/* max that fits in the chain size field. */
429
ib_size = MIN2(ib_size, 0xfffff);
431
while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
432
radeon_emit(&cs->base, PKT3_NOP_PAD);
434
*cs->ib_size_ptr |= cs->base.cdw + 4;
436
if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
437
unsigned max_num_old_ib_buffers =
438
MAX2(1, cs->max_num_old_ib_buffers * 2);
439
struct radeon_winsys_bo **old_ib_buffers =
440
realloc(cs->old_ib_buffers,
441
max_num_old_ib_buffers * sizeof(void*));
442
if (!old_ib_buffers) {
443
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
446
cs->max_num_old_ib_buffers = max_num_old_ib_buffers;
447
cs->old_ib_buffers = old_ib_buffers;
450
cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
452
cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
453
cs->ws->cs_bo_domain,
454
RADEON_FLAG_CPU_ACCESS |
455
RADEON_FLAG_NO_INTERPROCESS_SHARING |
456
RADEON_FLAG_READ_ONLY |
458
RADV_BO_PRIORITY_CS);
460
if (!cs->ib_buffer) {
462
cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
463
cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
466
cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
467
if (!cs->ib_mapped) {
468
cs->ws->base.buffer_destroy(cs->ib_buffer);
471
/* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
472
cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
473
cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
476
cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
478
radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
479
radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
480
radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
481
radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
483
cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
485
cs->base.buf = (uint32_t *)cs->ib_mapped;
487
cs->base.max_dw = ib_size / 4 - 4;
491
static VkResult radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
493
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
495
if (cs->ws->use_ib_bos) {
496
while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
497
radeon_emit(&cs->base, PKT3_NOP_PAD);
499
*cs->ib_size_ptr |= cs->base.cdw;
501
cs->is_chained = false;
507
static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
509
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
511
cs->status = VK_SUCCESS;
513
for (unsigned i = 0; i < cs->num_buffers; ++i) {
514
unsigned hash = cs->handles[i].bo_handle &
515
(ARRAY_SIZE(cs->buffer_hash_table) - 1);
516
cs->buffer_hash_table[hash] = -1;
519
for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
520
unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
521
cs->virtual_buffer_hash_table[hash] = -1;
525
cs->num_virtual_buffers = 0;
527
if (cs->ws->use_ib_bos) {
528
cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
530
for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
531
cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
533
cs->num_old_ib_buffers = 0;
534
cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
535
cs->ib_size_ptr = &cs->ib.size;
538
for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
539
struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
543
free(cs->old_cs_buffers);
544
cs->old_cs_buffers = NULL;
545
cs->num_old_cs_buffers = 0;
549
static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
552
unsigned hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
553
int index = cs->buffer_hash_table[hash];
558
if (cs->handles[index].bo_handle == bo)
561
for (unsigned i = 0; i < cs->num_buffers; ++i) {
562
if (cs->handles[i].bo_handle == bo) {
563
cs->buffer_hash_table[hash] = i;
571
static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
572
uint32_t bo, uint8_t priority)
575
int index = radv_amdgpu_cs_find_buffer(cs, bo);
580
if (cs->num_buffers == cs->max_num_buffers) {
581
unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
582
struct drm_amdgpu_bo_list_entry *new_entries =
583
realloc(cs->handles, new_count * sizeof(struct drm_amdgpu_bo_list_entry));
585
cs->max_num_buffers = new_count;
586
cs->handles = new_entries;
588
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
593
cs->handles[cs->num_buffers].bo_handle = bo;
594
cs->handles[cs->num_buffers].bo_priority = priority;
596
hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
597
cs->buffer_hash_table[hash] = cs->num_buffers;
602
static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
603
struct radeon_winsys_bo *bo)
605
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
606
unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
609
if (!cs->virtual_buffer_hash_table) {
610
int *virtual_buffer_hash_table =
611
malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
612
if (!virtual_buffer_hash_table) {
613
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
616
cs->virtual_buffer_hash_table = virtual_buffer_hash_table;
618
for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
619
cs->virtual_buffer_hash_table[i] = -1;
622
if (cs->virtual_buffer_hash_table[hash] >= 0) {
623
int idx = cs->virtual_buffer_hash_table[hash];
624
if (cs->virtual_buffers[idx] == bo) {
627
for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
628
if (cs->virtual_buffers[i] == bo) {
629
cs->virtual_buffer_hash_table[hash] = i;
635
if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
636
unsigned max_num_virtual_buffers =
637
MAX2(2, cs->max_num_virtual_buffers * 2);
638
struct radeon_winsys_bo **virtual_buffers =
639
realloc(cs->virtual_buffers,
640
sizeof(struct radeon_winsys_bo*) * max_num_virtual_buffers);
641
if (!virtual_buffers) {
642
cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
645
cs->max_num_virtual_buffers = max_num_virtual_buffers;
646
cs->virtual_buffers = virtual_buffers;
649
cs->virtual_buffers[cs->num_virtual_buffers] = bo;
651
cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
652
++cs->num_virtual_buffers;
656
static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
657
struct radeon_winsys_bo *_bo)
659
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
660
struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
662
if (cs->status != VK_SUCCESS)
665
if (bo->is_virtual) {
666
radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
670
if (bo->base.is_local)
673
radv_amdgpu_cs_add_buffer_internal(cs, bo->bo_handle, bo->priority);
676
static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
677
struct radeon_cmdbuf *_child)
679
struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
680
struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
682
if (parent->status != VK_SUCCESS || child->status != VK_SUCCESS)
685
for (unsigned i = 0; i < child->num_buffers; ++i) {
686
radv_amdgpu_cs_add_buffer_internal(parent,
687
child->handles[i].bo_handle,
688
child->handles[i].bo_priority);
691
for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
692
radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
695
if (parent->ws->use_ib_bos) {
696
if (parent->base.cdw + 4 > parent->base.max_dw)
697
radv_amdgpu_cs_grow(&parent->base, 4);
699
radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
700
radeon_emit(&parent->base, child->ib.ib_mc_address);
701
radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
702
radeon_emit(&parent->base, child->ib.size);
704
if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
705
radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
707
memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
708
parent->base.cdw += child->base.cdw;
713
radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws,
714
struct radeon_cmdbuf **cs_array,
716
struct radv_amdgpu_winsys_bo **extra_bo_array,
717
unsigned num_extra_bo,
718
struct radeon_cmdbuf *extra_cs,
719
const struct radv_winsys_bo_list *radv_bo_list,
720
unsigned *rnum_handles,
721
struct drm_amdgpu_bo_list_entry **rhandles)
723
struct drm_amdgpu_bo_list_entry *handles = NULL;
724
unsigned num_handles = 0;
726
if (ws->debug_all_bos) {
727
struct radv_amdgpu_winsys_bo *bo;
729
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
731
return VK_ERROR_OUT_OF_HOST_MEMORY;
734
LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
735
assert(num_handles < ws->num_buffers);
736
handles[num_handles].bo_handle = bo->bo_handle;
737
handles[num_handles].bo_priority = bo->priority;
740
} else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
741
!radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
742
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
743
if (cs->num_buffers == 0)
746
handles = malloc(sizeof(handles[0]) * cs->num_buffers);
748
return VK_ERROR_OUT_OF_HOST_MEMORY;
750
memcpy(handles, cs->handles,
751
sizeof(handles[0]) * cs->num_buffers);
752
num_handles = cs->num_buffers;
754
unsigned total_buffer_count = num_extra_bo;
755
num_handles = num_extra_bo;
756
for (unsigned i = 0; i < count; ++i) {
757
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
758
total_buffer_count += cs->num_buffers;
759
for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
760
total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
764
total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
768
total_buffer_count += radv_bo_list->count;
771
if (total_buffer_count == 0)
774
handles = malloc(sizeof(handles[0]) * total_buffer_count);
776
return VK_ERROR_OUT_OF_HOST_MEMORY;
778
for (unsigned i = 0; i < num_extra_bo; i++) {
779
handles[i].bo_handle = extra_bo_array[i]->bo_handle;
780
handles[i].bo_priority = extra_bo_array[i]->priority;
783
for (unsigned i = 0; i < count + !!extra_cs; ++i) {
784
struct radv_amdgpu_cs *cs;
787
cs = (struct radv_amdgpu_cs*)extra_cs;
789
cs = (struct radv_amdgpu_cs*)cs_array[i];
791
if (!cs->num_buffers)
794
if (num_handles == 0 && !cs->num_virtual_buffers) {
795
memcpy(handles, cs->handles, cs->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
796
num_handles = cs->num_buffers;
799
int unique_bo_so_far = num_handles;
800
for (unsigned j = 0; j < cs->num_buffers; ++j) {
802
for (unsigned k = 0; k < unique_bo_so_far; ++k) {
803
if (handles[k].bo_handle == cs->handles[j].bo_handle) {
809
handles[num_handles] = cs->handles[j];
813
for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
814
struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
815
for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
816
struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
818
for (unsigned m = 0; m < num_handles; ++m) {
819
if (handles[m].bo_handle == bo->bo_handle) {
825
handles[num_handles].bo_handle = bo->bo_handle;
826
handles[num_handles].bo_priority = bo->priority;
834
unsigned unique_bo_so_far = num_handles;
835
for (unsigned i = 0; i < radv_bo_list->count; ++i) {
836
struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
838
for (unsigned j = 0; j < unique_bo_so_far; ++j) {
839
if (bo->bo_handle == handles[j].bo_handle) {
845
handles[num_handles].bo_handle = bo->bo_handle;
846
handles[num_handles].bo_priority = bo->priority;
854
*rnum_handles = num_handles;
859
static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
861
struct amdgpu_cs_fence_info ret = {0};
862
if (ctx->fence_map) {
863
ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
864
ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
869
static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
870
struct radv_amdgpu_cs_request *request)
872
radv_amdgpu_request_to_fence(ctx,
873
&ctx->last_submission[request->ip_type][request->ring],
878
radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
880
struct radv_winsys_sem_info *sem_info,
881
const struct radv_winsys_bo_list *radv_bo_list,
882
struct radeon_cmdbuf **cs_array,
884
struct radeon_cmdbuf *initial_preamble_cs,
885
struct radeon_cmdbuf *continue_preamble_cs,
886
struct radeon_winsys_fence *_fence)
888
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
889
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
890
struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
891
struct radv_amdgpu_winsys *aws = cs0->ws;
892
struct drm_amdgpu_bo_list_entry *handles = NULL;
893
struct radv_amdgpu_cs_request request = {0};
894
struct amdgpu_cs_ib_info ibs[2];
895
unsigned number_of_ibs = 1;
896
unsigned num_handles = 0;
899
for (unsigned i = cs_count; i--;) {
900
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
902
if (cs->is_chained) {
903
*cs->ib_size_ptr -= 4;
904
cs->is_chained = false;
907
if (i + 1 < cs_count) {
908
struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
909
assert(cs->base.cdw + 4 <= cs->base.max_dw);
911
cs->is_chained = true;
912
*cs->ib_size_ptr += 4;
914
cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
915
cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
916
cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
917
cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
921
if (aws->debug_all_bos)
922
u_rwlock_rdlock(&aws->global_bo_list_lock);
924
/* Get the BO list. */
925
result = radv_amdgpu_get_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
926
initial_preamble_cs, radv_bo_list,
927
&num_handles, &handles);
928
if (result != VK_SUCCESS)
931
/* Configure the CS request. */
932
if (initial_preamble_cs) {
933
ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
940
request.ip_type = cs0->hw_ip;
941
request.ring = queue_idx;
942
request.number_of_ibs = number_of_ibs;
944
request.handles = handles;
945
request.num_handles = num_handles;
946
request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
949
result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
951
free(request.handles);
953
if (result != VK_SUCCESS)
957
radv_amdgpu_request_to_fence(ctx, fence, &request);
959
radv_assign_last_submit(ctx, &request);
962
if (aws->debug_all_bos)
963
u_rwlock_rdunlock(&aws->global_bo_list_lock);
968
radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
970
struct radv_winsys_sem_info *sem_info,
971
const struct radv_winsys_bo_list *radv_bo_list,
972
struct radeon_cmdbuf **cs_array,
974
struct radeon_cmdbuf *initial_preamble_cs,
975
struct radeon_cmdbuf *continue_preamble_cs,
976
struct radeon_winsys_fence *_fence)
978
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
979
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
980
struct drm_amdgpu_bo_list_entry *handles = NULL;
981
struct radv_amdgpu_cs_request request = {0};
982
struct amdgpu_cs_ib_info *ibs;
983
struct radv_amdgpu_cs *cs0;
984
struct radv_amdgpu_winsys *aws;
985
unsigned num_handles = 0;
986
unsigned number_of_ibs;
990
cs0 = radv_amdgpu_cs(cs_array[0]);
993
/* Compute the number of IBs for this submit. */
994
number_of_ibs = cs_count + !!initial_preamble_cs;
996
if (aws->debug_all_bos)
997
u_rwlock_rdlock(&aws->global_bo_list_lock);
999
/* Get the BO list. */
1000
result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0,
1001
initial_preamble_cs, radv_bo_list,
1002
&num_handles, &handles);
1003
if (result != VK_SUCCESS) {
1007
ibs = malloc(number_of_ibs * sizeof(*ibs));
1010
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1014
/* Configure the CS request. */
1015
if (initial_preamble_cs)
1016
ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
1018
for (unsigned i = 0; i < cs_count; i++) {
1019
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1021
ibs[i + !!initial_preamble_cs] = cs->ib;
1023
if (cs->is_chained) {
1024
*cs->ib_size_ptr -= 4;
1025
cs->is_chained = false;
1029
request.ip_type = cs0->hw_ip;
1030
request.ring = queue_idx;
1031
request.handles = handles;
1032
request.num_handles = num_handles;
1033
request.number_of_ibs = number_of_ibs;
1035
request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1037
/* Submit the CS. */
1038
result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1040
free(request.handles);
1043
if (result != VK_SUCCESS)
1047
radv_amdgpu_request_to_fence(ctx, fence, &request);
1049
radv_assign_last_submit(ctx, &request);
1052
if (aws->debug_all_bos)
1053
u_rwlock_rdunlock(&aws->global_bo_list_lock);
1058
radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
1060
struct radv_winsys_sem_info *sem_info,
1061
const struct radv_winsys_bo_list *radv_bo_list,
1062
struct radeon_cmdbuf **cs_array,
1064
struct radeon_cmdbuf *initial_preamble_cs,
1065
struct radeon_cmdbuf *continue_preamble_cs,
1066
struct radeon_winsys_fence *_fence)
1068
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1069
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
1070
struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
1071
struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
1072
struct radv_amdgpu_winsys *aws = cs0->ws;
1073
struct radv_amdgpu_cs_request request;
1074
uint32_t pad_word = PKT3_NOP_PAD;
1075
bool emit_signal_sem = sem_info->cs_emit_signal;
1078
if (radv_amdgpu_winsys(ws)->info.chip_class == GFX6)
1079
pad_word = 0x80000000;
1083
for (unsigned i = 0; i < cs_count;) {
1084
struct amdgpu_cs_ib_info *ibs;
1085
struct radeon_winsys_bo **bos;
1086
struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
1087
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1088
struct drm_amdgpu_bo_list_entry *handles = NULL;
1089
unsigned num_handles = 0;
1090
unsigned number_of_ibs;
1093
unsigned pad_words = 0;
1095
/* Compute the number of IBs for this submit. */
1096
number_of_ibs = cs->num_old_cs_buffers + 1;
1098
ibs = malloc(number_of_ibs * sizeof(*ibs));
1100
return VK_ERROR_OUT_OF_HOST_MEMORY;
1102
bos = malloc(number_of_ibs * sizeof(*bos));
1105
return VK_ERROR_OUT_OF_HOST_MEMORY;
1108
if (number_of_ibs > 1) {
1109
/* Special path when the maximum size in dwords has
1110
* been reached because we need to handle more than one
1113
struct radeon_cmdbuf **new_cs_array;
1116
new_cs_array = malloc(cs->num_old_cs_buffers *
1117
sizeof(*new_cs_array));
1118
assert(new_cs_array);
1120
for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
1121
new_cs_array[idx++] = &cs->old_cs_buffers[j];
1122
new_cs_array[idx++] = cs_array[i];
1124
for (unsigned j = 0; j < number_of_ibs; j++) {
1125
struct radeon_cmdbuf *rcs = new_cs_array[j];
1126
bool needs_preamble = preamble_cs && j == 0;
1130
size += preamble_cs->cdw;
1133
assert(size < 0xffff8);
1135
while (!size || (size & 7)) {
1140
bos[j] = ws->buffer_create(ws, 4 * size, 4096,
1142
RADEON_FLAG_CPU_ACCESS |
1143
RADEON_FLAG_NO_INTERPROCESS_SHARING |
1144
RADEON_FLAG_READ_ONLY,
1145
RADV_BO_PRIORITY_CS);
1146
ptr = ws->buffer_map(bos[j]);
1148
if (needs_preamble) {
1149
memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1150
ptr += preamble_cs->cdw;
1153
memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1156
for (unsigned k = 0; k < pad_words; ++k)
1160
ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1170
size += preamble_cs->cdw;
1172
while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1173
size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1177
while (!size || (size & 7)) {
1183
bos[0] = ws->buffer_create(ws, 4 * size, 4096,
1185
RADEON_FLAG_CPU_ACCESS |
1186
RADEON_FLAG_NO_INTERPROCESS_SHARING |
1187
RADEON_FLAG_READ_ONLY,
1188
RADV_BO_PRIORITY_CS);
1189
ptr = ws->buffer_map(bos[0]);
1192
memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1193
ptr += preamble_cs->cdw;
1196
for (unsigned j = 0; j < cnt; ++j) {
1197
struct radv_amdgpu_cs *cs2 = radv_amdgpu_cs(cs_array[i + j]);
1198
memcpy(ptr, cs2->base.buf, 4 * cs2->base.cdw);
1199
ptr += cs2->base.cdw;
1203
for (unsigned j = 0; j < pad_words; ++j)
1207
ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1211
if (aws->debug_all_bos)
1212
u_rwlock_rdlock(&aws->global_bo_list_lock);
1214
result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[i], cnt,
1215
(struct radv_amdgpu_winsys_bo **)bos,
1216
number_of_ibs, preamble_cs,
1218
&num_handles, &handles);
1219
if (result != VK_SUCCESS) {
1222
if (aws->debug_all_bos)
1223
u_rwlock_rdunlock(&aws->global_bo_list_lock);
1227
memset(&request, 0, sizeof(request));
1229
request.ip_type = cs0->hw_ip;
1230
request.ring = queue_idx;
1231
request.handles = handles;
1232
request.num_handles = num_handles;
1233
request.number_of_ibs = number_of_ibs;
1235
request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1237
sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1238
result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1240
free(request.handles);
1241
if (aws->debug_all_bos)
1242
u_rwlock_rdunlock(&aws->global_bo_list_lock);
1244
for (unsigned j = 0; j < number_of_ibs; j++) {
1245
ws->buffer_destroy(bos[j]);
1251
if (result != VK_SUCCESS)
1257
radv_amdgpu_request_to_fence(ctx, fence, &request);
1259
radv_assign_last_submit(ctx, &request);
1264
static VkResult radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
1266
struct radeon_cmdbuf **cs_array,
1268
struct radeon_cmdbuf *initial_preamble_cs,
1269
struct radeon_cmdbuf *continue_preamble_cs,
1270
struct radv_winsys_sem_info *sem_info,
1271
const struct radv_winsys_bo_list *bo_list,
1273
struct radeon_winsys_fence *_fence)
1275
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
1276
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1280
if (!cs->ws->use_ib_bos) {
1281
result = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
1282
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1283
} else if (can_patch) {
1284
result = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
1285
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1287
result = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
1288
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1291
radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1295
static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1297
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1302
for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1303
struct radv_amdgpu_winsys_bo *bo;
1305
bo = (struct radv_amdgpu_winsys_bo*)
1306
(i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1307
if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1308
if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1309
return (char *)ret + (addr - bo->base.va);
1312
if(cs->ws->debug_all_bos) {
1313
u_rwlock_rdlock(&cs->ws->global_bo_list_lock);
1314
list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1315
&cs->ws->global_bo_list, global_list_item) {
1316
if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1317
if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1318
u_rwlock_rdunlock(&cs->ws->global_bo_list_lock);
1319
return (char *)ret + (addr - bo->base.va);
1323
u_rwlock_rdunlock(&cs->ws->global_bo_list_lock);
1328
static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
1330
const int *trace_ids, int trace_id_count)
1332
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1333
void *ib = cs->base.buf;
1334
int num_dw = cs->base.cdw;
1336
if (cs->ws->use_ib_bos) {
1337
ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1338
num_dw = cs->ib.size;
1341
ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1342
cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1345
static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1347
switch (radv_priority) {
1348
case RADEON_CTX_PRIORITY_REALTIME:
1349
return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1350
case RADEON_CTX_PRIORITY_HIGH:
1351
return AMDGPU_CTX_PRIORITY_HIGH;
1352
case RADEON_CTX_PRIORITY_MEDIUM:
1353
return AMDGPU_CTX_PRIORITY_NORMAL;
1354
case RADEON_CTX_PRIORITY_LOW:
1355
return AMDGPU_CTX_PRIORITY_LOW;
1357
unreachable("Invalid context priority");
1361
static VkResult radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1362
enum radeon_ctx_priority priority,
1363
struct radeon_winsys_ctx **rctx)
1365
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1366
struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1367
uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1372
return VK_ERROR_OUT_OF_HOST_MEMORY;
1374
r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1375
if (r && r == -EACCES) {
1376
result = VK_ERROR_NOT_PERMITTED_EXT;
1379
fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1380
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1385
assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1386
ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1388
RADEON_FLAG_CPU_ACCESS |
1389
RADEON_FLAG_NO_INTERPROCESS_SHARING,
1390
RADV_BO_PRIORITY_CS);
1391
if (!ctx->fence_bo) {
1392
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1396
ctx->fence_map = (uint64_t *)ws->base.buffer_map(ctx->fence_bo);
1397
if (!ctx->fence_map) {
1398
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1402
memset(ctx->fence_map, 0, 4096);
1404
*rctx = (struct radeon_winsys_ctx *)ctx;
1408
ws->base.buffer_destroy(ctx->fence_bo);
1410
amdgpu_cs_ctx_free(ctx->ctx);
1416
static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1418
struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1419
ctx->ws->base.buffer_destroy(ctx->fence_bo);
1420
amdgpu_cs_ctx_free(ctx->ctx);
1424
static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1425
enum ring_type ring_type, int ring_index)
1427
struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1428
int ip_type = ring_to_hw_ip(ring_type);
1430
if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1432
int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1433
1000000000ull, 0, &expired);
1435
if (ret || !expired)
1442
static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1444
struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1448
return (struct radeon_winsys_sem *)sem;
1451
static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1453
struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1457
static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1460
struct radv_winsys_sem_info *sem_info)
1462
for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1463
struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1468
*sem = ctx->last_submission[ip_type][ring].fence;
1473
static void *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1474
const uint32_t *syncobj_override,
1475
struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1477
const uint32_t *src = syncobj_override ? syncobj_override : counts->syncobj;
1478
struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1482
for (unsigned i = 0; i < counts->syncobj_count; i++) {
1483
struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1484
sem->handle = src[i];
1487
chunk->chunk_id = chunk_id;
1488
chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1489
chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1494
radv_amdgpu_cs_alloc_timeline_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1495
const uint32_t *syncobj_override,
1496
struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1498
const uint32_t *src = syncobj_override ? syncobj_override : counts->syncobj;
1499
struct drm_amdgpu_cs_chunk_syncobj *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_syncobj) *
1500
(counts->syncobj_count + counts->timeline_syncobj_count));
1504
for (unsigned i = 0; i < counts->syncobj_count; i++) {
1505
struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i];
1506
sem->handle = src[i];
1511
for (unsigned i = 0; i < counts->timeline_syncobj_count; i++) {
1512
struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i + counts->syncobj_count];
1513
sem->handle = counts->syncobj[i + counts->syncobj_count];
1514
sem->flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
1515
sem->point = counts->points[i];
1518
chunk->chunk_id = chunk_id;
1519
chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_syncobj) / 4 *
1520
(counts->syncobj_count + counts->timeline_syncobj_count);
1521
chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1525
static int radv_amdgpu_cache_alloc_syncobjs(struct radv_amdgpu_winsys *ws, unsigned count, uint32_t *dst)
1527
pthread_mutex_lock(&ws->syncobj_lock);
1528
if (count > ws->syncobj_capacity) {
1529
if (ws->syncobj_capacity > UINT32_MAX / 2)
1532
unsigned new_capacity = MAX2(count, ws->syncobj_capacity * 2);
1533
uint32_t *n = realloc(ws->syncobj, new_capacity * sizeof(*ws->syncobj));
1536
ws->syncobj_capacity = new_capacity;
1540
while(ws->syncobj_count < count) {
1541
int r = amdgpu_cs_create_syncobj(ws->dev, ws->syncobj + ws->syncobj_count);
1544
++ws->syncobj_count;
1547
for (unsigned i = 0; i < count; ++i)
1548
dst[i] = ws->syncobj[--ws->syncobj_count];
1550
pthread_mutex_unlock(&ws->syncobj_lock);
1554
pthread_mutex_unlock(&ws->syncobj_lock);
1558
static void radv_amdgpu_cache_free_syncobjs(struct radv_amdgpu_winsys *ws, unsigned count, uint32_t *src)
1560
pthread_mutex_lock(&ws->syncobj_lock);
1562
uint32_t cache_count = MIN2(count, UINT32_MAX - ws->syncobj_count);
1563
if (cache_count + ws->syncobj_count > ws->syncobj_capacity) {
1564
unsigned new_capacity = MAX2(ws->syncobj_count + cache_count, ws->syncobj_capacity * 2);
1565
uint32_t* n = realloc(ws->syncobj, new_capacity * sizeof(*ws->syncobj));
1567
ws->syncobj_capacity = new_capacity;
1572
for (unsigned i = 0; i < count; ++i) {
1573
if (ws->syncobj_count < ws->syncobj_capacity)
1574
ws->syncobj[ws->syncobj_count++] = src[i];
1576
amdgpu_cs_destroy_syncobj(ws->dev, src[i]);
1579
pthread_mutex_unlock(&ws->syncobj_lock);
1583
static int radv_amdgpu_cs_prepare_syncobjs(struct radv_amdgpu_winsys *ws,
1584
struct radv_winsys_sem_counts *counts,
1585
uint32_t **out_syncobjs)
1589
if (!ws->info.has_timeline_syncobj || !counts->syncobj_count) {
1590
*out_syncobjs = NULL;
1594
*out_syncobjs = malloc(counts->syncobj_count * sizeof(**out_syncobjs));
1598
r = radv_amdgpu_cache_alloc_syncobjs(ws, counts->syncobj_count, *out_syncobjs);
1602
for (unsigned i = 0; i < counts->syncobj_count; ++i) {
1603
r = amdgpu_cs_syncobj_transfer(ws->dev, (*out_syncobjs)[i], 0, counts->syncobj[i], 0, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT);
1608
r = amdgpu_cs_syncobj_reset(ws->dev, counts->syncobj, counts->syncobj_reset_count);
1614
radv_amdgpu_cache_free_syncobjs(ws, counts->syncobj_count, *out_syncobjs);
1615
free(*out_syncobjs);
1616
*out_syncobjs = NULL;
1621
radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1622
struct radv_amdgpu_cs_request *request,
1623
struct radv_winsys_sem_info *sem_info)
1629
struct drm_amdgpu_cs_chunk *chunks;
1630
struct drm_amdgpu_cs_chunk_data *chunk_data;
1631
struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1632
bool use_bo_list_create = ctx->ws->info.drm_minor < 27;
1633
struct drm_amdgpu_bo_list_in bo_list_in;
1634
void *wait_syncobj = NULL, *signal_syncobj = NULL;
1635
uint32_t *in_syncobjs = NULL;
1637
struct amdgpu_cs_fence *sem;
1638
uint32_t bo_list = 0;
1639
VkResult result = VK_SUCCESS;
1641
user_fence = (request->fence_info.handle != NULL);
1642
size = request->number_of_ibs + (user_fence ? 2 : 1) + (!use_bo_list_create ? 1 : 0) + 3;
1644
chunks = malloc(sizeof(chunks[0]) * size);
1646
return VK_ERROR_OUT_OF_HOST_MEMORY;
1648
size = request->number_of_ibs + (user_fence ? 1 : 0);
1650
chunk_data = malloc(sizeof(chunk_data[0]) * size);
1652
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1656
num_chunks = request->number_of_ibs;
1657
for (i = 0; i < request->number_of_ibs; i++) {
1658
struct amdgpu_cs_ib_info *ib;
1659
chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1660
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1661
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1663
ib = &request->ibs[i];
1665
chunk_data[i].ib_data._pad = 0;
1666
chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1667
chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1668
chunk_data[i].ib_data.ip_type = request->ip_type;
1669
chunk_data[i].ib_data.ip_instance = request->ip_instance;
1670
chunk_data[i].ib_data.ring = request->ring;
1671
chunk_data[i].ib_data.flags = ib->flags;
1677
chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1678
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1679
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1681
amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1685
if ((sem_info->wait.syncobj_count || sem_info->wait.timeline_syncobj_count) && sem_info->cs_emit_wait) {
1686
r = radv_amdgpu_cs_prepare_syncobjs(ctx->ws, &sem_info->wait, &in_syncobjs);
1690
if (ctx->ws->info.has_timeline_syncobj) {
1691
wait_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(&sem_info->wait,
1693
&chunks[num_chunks],
1694
AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT);
1696
wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1698
&chunks[num_chunks],
1699
AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1701
if (!wait_syncobj) {
1702
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1707
if (sem_info->wait.sem_count == 0)
1708
sem_info->cs_emit_wait = false;
1712
if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1713
sem_dependencies = malloc(sizeof(sem_dependencies[0]) * sem_info->wait.sem_count);
1714
if (!sem_dependencies) {
1715
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1721
for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1722
sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1725
struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1727
amdgpu_cs_chunk_fence_to_dep(sem, dep);
1729
sem->context = NULL;
1733
/* dependencies chunk */
1734
chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1735
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1736
chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1738
sem_info->cs_emit_wait = false;
1741
if ((sem_info->signal.syncobj_count || sem_info->signal.timeline_syncobj_count) && sem_info->cs_emit_signal) {
1742
if (ctx->ws->info.has_timeline_syncobj) {
1743
signal_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(&sem_info->signal,
1745
&chunks[num_chunks],
1746
AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL);
1748
signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1750
&chunks[num_chunks],
1751
AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1753
if (!signal_syncobj) {
1754
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1760
if (use_bo_list_create) {
1761
/* Legacy path creating the buffer list handle and passing it
1764
r = amdgpu_bo_list_create_raw(ctx->ws->dev, request->num_handles,
1765
request->handles, &bo_list);
1768
fprintf(stderr, "amdgpu: Not enough memory for buffer list creation.\n");
1769
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1771
fprintf(stderr, "amdgpu: buffer list creation failed (%d).\n", r);
1772
result = VK_ERROR_UNKNOWN;
1777
/* Standard path passing the buffer list via the CS ioctl. */
1778
bo_list_in.operation = ~0;
1779
bo_list_in.list_handle = ~0;
1780
bo_list_in.bo_number = request->num_handles;
1781
bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
1782
bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)request->handles;
1784
chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
1785
chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
1786
chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
1790
r = amdgpu_cs_submit_raw2(ctx->ws->dev,
1799
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1800
result = VK_ERROR_OUT_OF_HOST_MEMORY;
1801
} else if (r == -ECANCELED) {
1802
fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
1803
result = VK_ERROR_DEVICE_LOST;
1805
fprintf(stderr, "amdgpu: The CS has been rejected, "
1806
"see dmesg for more information (%i).\n", r);
1807
result = VK_ERROR_UNKNOWN;
1812
amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
1816
radv_amdgpu_cache_free_syncobjs(ctx->ws, sem_info->wait.syncobj_count, in_syncobjs);
1821
free(sem_dependencies);
1823
free(signal_syncobj);
1827
static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1828
bool create_signaled,
1831
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1834
if (create_signaled)
1835
flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
1837
return amdgpu_cs_create_syncobj2(ws->dev, flags, handle);
1840
static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1843
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1844
amdgpu_cs_destroy_syncobj(ws->dev, handle);
1847
static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1850
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1851
amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1854
static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1855
uint32_t handle, uint64_t point)
1857
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1859
amdgpu_cs_syncobj_timeline_signal(ws->dev, &handle, &point, 1);
1861
amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1864
static VkResult radv_amdgpu_query_syncobj(struct radeon_winsys *_ws,
1865
uint32_t handle, uint64_t *point)
1867
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1868
int ret = amdgpu_cs_syncobj_query(ws->dev, &handle, point, 1);
1871
else if (ret == -ENOMEM)
1872
return VK_ERROR_OUT_OF_HOST_MEMORY;
1874
/* Remaining error are driver internal issues: EFAULT for
1875
* dangling pointers and ENOENT for non-existing syncobj. */
1876
fprintf(stderr, "amdgpu: internal error in radv_amdgpu_query_syncobj. (%d)\n", ret);
1877
return VK_ERROR_UNKNOWN;
1881
static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1882
uint32_t handle_count, bool wait_all, uint64_t timeout)
1884
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1887
/* The timeouts are signed, while vulkan timeouts are unsigned. */
1888
timeout = MIN2(timeout, INT64_MAX);
1890
int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1891
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1892
(wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1896
} else if (ret == -ETIME) {
1899
fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1904
static bool radv_amdgpu_wait_timeline_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1905
const uint64_t *points, uint32_t handle_count,
1906
bool wait_all, bool available, uint64_t timeout)
1908
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1910
/* The timeouts are signed, while vulkan timeouts are unsigned. */
1911
timeout = MIN2(timeout, INT64_MAX);
1913
int ret = amdgpu_cs_syncobj_timeline_wait(ws->dev, (uint32_t*)handles, (uint64_t*)points,
1914
handle_count, timeout,
1915
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1916
(wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0) |
1917
(available ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE : 0),
1921
} else if (ret == -ETIME) {
1924
fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed! (%d)\n", errno);
1930
static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1934
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1936
return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1939
static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1943
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1945
return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1949
static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1953
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1955
return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1958
static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1962
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1964
return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1967
void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1969
ws->base.ctx_create = radv_amdgpu_ctx_create;
1970
ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1971
ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1972
ws->base.cs_create = radv_amdgpu_cs_create;
1973
ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1974
ws->base.cs_grow = radv_amdgpu_cs_grow;
1975
ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1976
ws->base.cs_reset = radv_amdgpu_cs_reset;
1977
ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1978
ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1979
ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1980
ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1981
ws->base.create_fence = radv_amdgpu_create_fence;
1982
ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1983
ws->base.reset_fence = radv_amdgpu_reset_fence;
1984
ws->base.signal_fence = radv_amdgpu_signal_fence;
1985
ws->base.is_fence_waitable = radv_amdgpu_is_fence_waitable;
1986
ws->base.create_sem = radv_amdgpu_create_sem;
1987
ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1988
ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1989
ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1990
ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1991
ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1992
ws->base.query_syncobj = radv_amdgpu_query_syncobj;
1993
ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1994
ws->base.wait_timeline_syncobj = radv_amdgpu_wait_timeline_syncobj;
1995
ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1996
ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1997
ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1998
ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1999
ws->base.fence_wait = radv_amdgpu_fence_wait;
2000
ws->base.fences_wait = radv_amdgpu_fences_wait;