~mmach/netext73/mesa-haswell

« back to all changes in this revision

Viewing changes to src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c

  • Committer: mmach
  • Date: 2021-04-17 06:19:36 UTC
  • Revision ID: netbit73@gmail.com-20210417061936-peb5vc5ysl5zeoad
1

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
 * Copyright Â© 2016 Red Hat.
 
3
 * Copyright Â© 2016 Bas Nieuwenhuizen
 
4
 *
 
5
 * Permission is hereby granted, free of charge, to any person obtaining a
 
6
 * copy of this software and associated documentation files (the "Software"),
 
7
 * to deal in the Software without restriction, including without limitation
 
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 
9
 * and/or sell copies of the Software, and to permit persons to whom the
 
10
 * Software is furnished to do so, subject to the following conditions:
 
11
 *
 
12
 * The above copyright notice and this permission notice (including the next
 
13
 * paragraph) shall be included in all copies or substantial portions of the
 
14
 * Software.
 
15
 *
 
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 
21
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 
22
 * IN THE SOFTWARE.
 
23
 */
 
24
 
 
25
#include <stdlib.h>
 
26
#include <amdgpu.h>
 
27
#include "drm-uapi/amdgpu_drm.h"
 
28
#include <assert.h>
 
29
#include <pthread.h>
 
30
#include <errno.h>
 
31
 
 
32
#include "util/u_memory.h"
 
33
#include "ac_debug.h"
 
34
#include "radv_radeon_winsys.h"
 
35
#include "radv_amdgpu_cs.h"
 
36
#include "radv_amdgpu_bo.h"
 
37
#include "sid.h"
 
38
 
 
39
 
 
40
enum {
 
41
        VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
 
42
};
 
43
 
 
44
struct radv_amdgpu_cs {
 
45
        struct radeon_cmdbuf base;
 
46
        struct radv_amdgpu_winsys *ws;
 
47
 
 
48
        struct amdgpu_cs_ib_info    ib;
 
49
 
 
50
        struct radeon_winsys_bo     *ib_buffer;
 
51
        uint8_t                 *ib_mapped;
 
52
        unsigned                    max_num_buffers;
 
53
        unsigned                    num_buffers;
 
54
        struct drm_amdgpu_bo_list_entry *handles;
 
55
 
 
56
        struct radeon_winsys_bo     **old_ib_buffers;
 
57
        unsigned                    num_old_ib_buffers;
 
58
        unsigned                    max_num_old_ib_buffers;
 
59
        unsigned                    *ib_size_ptr;
 
60
        VkResult                    status;
 
61
        bool                        is_chained;
 
62
 
 
63
        int                         buffer_hash_table[1024];
 
64
        unsigned                    hw_ip;
 
65
 
 
66
        unsigned                    num_virtual_buffers;
 
67
        unsigned                    max_num_virtual_buffers;
 
68
        struct radeon_winsys_bo     **virtual_buffers;
 
69
        int                         *virtual_buffer_hash_table;
 
70
 
 
71
        /* For chips that don't support chaining. */
 
72
        struct radeon_cmdbuf     *old_cs_buffers;
 
73
        unsigned                    num_old_cs_buffers;
 
74
};
 
75
 
 
76
static inline struct radv_amdgpu_cs *
 
77
radv_amdgpu_cs(struct radeon_cmdbuf *base)
 
78
{
 
79
        return (struct radv_amdgpu_cs*)base;
 
80
}
 
81
 
 
82
static int ring_to_hw_ip(enum ring_type ring)
 
83
{
 
84
        switch (ring) {
 
85
        case RING_GFX:
 
86
                return AMDGPU_HW_IP_GFX;
 
87
        case RING_DMA:
 
88
                return AMDGPU_HW_IP_DMA;
 
89
        case RING_COMPUTE:
 
90
                return AMDGPU_HW_IP_COMPUTE;
 
91
        default:
 
92
                unreachable("unsupported ring");
 
93
        }
 
94
}
 
95
 
 
96
struct radv_amdgpu_cs_request {
 
97
        /** Specify flags with additional information */
 
98
        uint64_t flags;
 
99
 
 
100
        /** Specify HW IP block type to which to send the IB. */
 
101
        unsigned ip_type;
 
102
 
 
103
        /** IP instance index if there are several IPs of the same type. */
 
104
        unsigned ip_instance;
 
105
 
 
106
        /**
 
107
         * Specify ring index of the IP. We could have several rings
 
108
         * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
 
109
         */
 
110
        uint32_t ring;
 
111
 
 
112
        /**
 
113
         * BO list handles used by this request.
 
114
         */
 
115
        struct drm_amdgpu_bo_list_entry *handles;
 
116
        uint32_t num_handles;
 
117
 
 
118
        /**
 
119
         * Number of dependencies this Command submission needs to
 
120
         * wait for before starting execution.
 
121
         */
 
122
        uint32_t number_of_dependencies;
 
123
 
 
124
        /**
 
125
         * Array of dependencies which need to be met before
 
126
         * execution can start.
 
127
         */
 
128
        struct amdgpu_cs_fence *dependencies;
 
129
 
 
130
        /** Number of IBs to submit in the field ibs. */
 
131
        uint32_t number_of_ibs;
 
132
 
 
133
        /**
 
134
         * IBs to submit. Those IBs will be submit together as single entity
 
135
         */
 
136
        struct amdgpu_cs_ib_info *ibs;
 
137
 
 
138
        /**
 
139
         * The returned sequence number for the command submission
 
140
         */
 
141
        uint64_t seq_no;
 
142
 
 
143
        /**
 
144
         * The fence information
 
145
         */
 
146
        struct amdgpu_cs_fence_info fence_info;
 
147
};
 
148
 
 
149
 
 
150
static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
 
151
                                   uint32_t ip_type,
 
152
                                   uint32_t ring,
 
153
                                   struct radv_winsys_sem_info *sem_info);
 
154
static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
 
155
                                 struct radv_amdgpu_cs_request *request,
 
156
                                 struct radv_winsys_sem_info *sem_info);
 
157
 
 
158
static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
 
159
                                         struct radv_amdgpu_fence *fence,
 
160
                                         struct radv_amdgpu_cs_request *req)
 
161
{
 
162
        fence->fence.context = ctx->ctx;
 
163
        fence->fence.ip_type = req->ip_type;
 
164
        fence->fence.ip_instance = req->ip_instance;
 
165
        fence->fence.ring = req->ring;
 
166
        fence->fence.fence = req->seq_no;
 
167
        fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + req->ip_type * MAX_RINGS_PER_TYPE + req->ring);
 
168
}
 
169
 
 
170
static struct radeon_winsys_fence *radv_amdgpu_create_fence()
 
171
{
 
172
        struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
 
173
        if (!fence)
 
174
                return NULL;
 
175
 
 
176
        fence->fence.fence = UINT64_MAX;
 
177
        return (struct radeon_winsys_fence*)fence;
 
178
}
 
179
 
 
180
static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
 
181
{
 
182
        struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
 
183
        free(fence);
 
184
}
 
185
 
 
186
static void radv_amdgpu_reset_fence(struct radeon_winsys_fence *_fence)
 
187
{
 
188
        struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
 
189
        fence->fence.fence = UINT64_MAX;
 
190
}
 
191
 
 
192
static void radv_amdgpu_signal_fence(struct radeon_winsys_fence *_fence)
 
193
{
 
194
        struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
 
195
        fence->fence.fence = 0;
 
196
}
 
197
 
 
198
static bool radv_amdgpu_is_fence_waitable(struct radeon_winsys_fence *_fence)
 
199
{
 
200
        struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
 
201
        return fence->fence.fence < UINT64_MAX;
 
202
}
 
203
 
 
204
static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
 
205
                              struct radeon_winsys_fence *_fence,
 
206
                              bool absolute,
 
207
                              uint64_t timeout)
 
208
{
 
209
        struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
 
210
        unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
 
211
        int r;
 
212
        uint32_t expired = 0;
 
213
 
 
214
        /* Special casing 0 and UINT64_MAX so that they work without user_ptr/fence.ctx */
 
215
        if (fence->fence.fence == UINT64_MAX)
 
216
                return false;
 
217
 
 
218
        if (fence->fence.fence == 0)
 
219
                return true;
 
220
 
 
221
        if (fence->user_ptr) {
 
222
                if (*fence->user_ptr >= fence->fence.fence)
 
223
                        return true;
 
224
                if (!absolute && !timeout)
 
225
                        return false;
 
226
        }
 
227
 
 
228
        /* Now use the libdrm query. */
 
229
        r = amdgpu_cs_query_fence_status(&fence->fence,
 
230
                                         timeout,
 
231
                                         flags,
 
232
                                         &expired);
 
233
 
 
234
        if (r) {
 
235
                fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
 
236
                return false;
 
237
        }
 
238
 
 
239
        if (expired)
 
240
                return true;
 
241
 
 
242
        return false;
 
243
}
 
244
 
 
245
 
 
246
static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
 
247
                              struct radeon_winsys_fence *const *_fences,
 
248
                              uint32_t fence_count,
 
249
                              bool wait_all,
 
250
                              uint64_t timeout)
 
251
{
 
252
        struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
 
253
        int r;
 
254
        uint32_t expired = 0, first = 0;
 
255
 
 
256
        if (!fences)
 
257
                return false;
 
258
 
 
259
        for (uint32_t i = 0; i < fence_count; ++i)
 
260
                fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
 
261
 
 
262
        /* Now use the libdrm query. */
 
263
        r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
 
264
                                  timeout, &expired, &first);
 
265
 
 
266
        free(fences);
 
267
        if (r) {
 
268
                fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
 
269
                return false;
 
270
        }
 
271
 
 
272
        if (expired)
 
273
                return true;
 
274
 
 
275
        return false;
 
276
}
 
277
 
 
278
static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
 
279
{
 
280
        struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
 
281
 
 
282
        if (cs->ib_buffer)
 
283
                cs->ws->base.buffer_destroy(cs->ib_buffer);
 
284
        else
 
285
                free(cs->base.buf);
 
286
 
 
287
        for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
 
288
                cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
 
289
 
 
290
        for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
 
291
                free(cs->old_cs_buffers[i].buf);
 
292
        }
 
293
 
 
294
        free(cs->old_cs_buffers);
 
295
        free(cs->old_ib_buffers);
 
296
        free(cs->virtual_buffers);
 
297
        free(cs->virtual_buffer_hash_table);
 
298
        free(cs->handles);
 
299
        free(cs);
 
300
}
 
301
 
 
302
static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
 
303
                                enum ring_type ring_type)
 
304
{
 
305
        for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
 
306
                cs->buffer_hash_table[i] = -1;
 
307
 
 
308
        cs->hw_ip = ring_to_hw_ip(ring_type);
 
309
}
 
310
 
 
311
static struct radeon_cmdbuf *
 
312
radv_amdgpu_cs_create(struct radeon_winsys *ws,
 
313
                      enum ring_type ring_type)
 
314
{
 
315
        struct radv_amdgpu_cs *cs;
 
316
        uint32_t ib_size = 20 * 1024 * 4;
 
317
        cs = calloc(1, sizeof(struct radv_amdgpu_cs));
 
318
        if (!cs)
 
319
                return NULL;
 
320
 
 
321
        cs->ws = radv_amdgpu_winsys(ws);
 
322
        radv_amdgpu_init_cs(cs, ring_type);
 
323
 
 
324
        if (cs->ws->use_ib_bos) {
 
325
                cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
 
326
                                                  cs->ws->cs_bo_domain,
 
327
                                                  RADEON_FLAG_CPU_ACCESS |
 
328
                                                  RADEON_FLAG_NO_INTERPROCESS_SHARING |
 
329
                                                  RADEON_FLAG_READ_ONLY |
 
330
                                                  RADEON_FLAG_GTT_WC,
 
331
                                                  RADV_BO_PRIORITY_CS);
 
332
                if (!cs->ib_buffer) {
 
333
                        free(cs);
 
334
                        return NULL;
 
335
                }
 
336
 
 
337
                cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
 
338
                if (!cs->ib_mapped) {
 
339
                        ws->buffer_destroy(cs->ib_buffer);
 
340
                        free(cs);
 
341
                        return NULL;
 
342
                }
 
343
 
 
344
                cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
 
345
                cs->base.buf = (uint32_t *)cs->ib_mapped;
 
346
                cs->base.max_dw = ib_size / 4 - 4;
 
347
                cs->ib_size_ptr = &cs->ib.size;
 
348
                cs->ib.size = 0;
 
349
 
 
350
                ws->cs_add_buffer(&cs->base, cs->ib_buffer);
 
351
        } else {
 
352
                uint32_t *buf = malloc(16384);
 
353
                if (!buf) {
 
354
                        free(cs);
 
355
                        return NULL;
 
356
                }
 
357
                cs->base.buf = buf;
 
358
                cs->base.max_dw = 4096;
 
359
        }
 
360
 
 
361
        return &cs->base;
 
362
}
 
363
 
 
364
static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
 
365
{
 
366
        struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
 
367
 
 
368
        if (cs->status != VK_SUCCESS) {
 
369
                cs->base.cdw = 0;
 
370
                return;
 
371
        }
 
372
 
 
373
        if (!cs->ws->use_ib_bos) {
 
374
                const uint64_t limit_dws = 0xffff8;
 
375
                uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
 
376
                                       MIN2(cs->base.max_dw * 2, limit_dws));
 
377
 
 
378
                /* The total ib size cannot exceed limit_dws dwords. */
 
379
                if (ib_dws > limit_dws)
 
380
                {
 
381
                        /* The maximum size in dwords has been reached,
 
382
                         * try to allocate a new one.
 
383
                         */
 
384
                        struct radeon_cmdbuf *old_cs_buffers =
 
385
                                realloc(cs->old_cs_buffers,
 
386
                                        (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
 
387
                        if (!old_cs_buffers) {
 
388
                                cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
 
389
                                cs->base.cdw = 0;
 
390
                                return;
 
391
                        }
 
392
                        cs->old_cs_buffers = old_cs_buffers;
 
393
 
 
394
                        /* Store the current one for submitting it later. */
 
395
                        cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
 
396
                        cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
 
397
                        cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
 
398
                        cs->num_old_cs_buffers++;
 
399
 
 
400
                        /* Reset the cs, it will be re-allocated below. */
 
401
                        cs->base.cdw = 0;
 
402
                        cs->base.buf = NULL;
 
403
 
 
404
                        /* Re-compute the number of dwords to allocate. */
 
405
                        ib_dws = MAX2(cs->base.cdw + min_size,
 
406
                                      MIN2(cs->base.max_dw * 2, limit_dws));
 
407
                        if (ib_dws > limit_dws) {
 
408
                                fprintf(stderr, "amdgpu: Too high number of "
 
409
                                                "dwords to allocate\n");
 
410
                                cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
 
411
                                return;
 
412
                        }
 
413
                }
 
414
 
 
415
                uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
 
416
                if (new_buf) {
 
417
                        cs->base.buf = new_buf;
 
418
                        cs->base.max_dw = ib_dws;
 
419
                } else {
 
420
                        cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
 
421
                        cs->base.cdw = 0;
 
422
                }
 
423
                return;
 
424
        }
 
425
 
 
426
        uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
 
427
 
 
428
        /* max that fits in the chain size field. */
 
429
        ib_size = MIN2(ib_size, 0xfffff);
 
430
 
 
431
        while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
 
432
                radeon_emit(&cs->base, PKT3_NOP_PAD);
 
433
 
 
434
        *cs->ib_size_ptr |= cs->base.cdw + 4;
 
435
 
 
436
        if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
 
437
                unsigned max_num_old_ib_buffers =
 
438
                        MAX2(1, cs->max_num_old_ib_buffers * 2);
 
439
                struct radeon_winsys_bo **old_ib_buffers =
 
440
                        realloc(cs->old_ib_buffers,
 
441
                                max_num_old_ib_buffers * sizeof(void*));
 
442
                if (!old_ib_buffers) {
 
443
                        cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
 
444
                        return;
 
445
                }
 
446
                cs->max_num_old_ib_buffers = max_num_old_ib_buffers;
 
447
                cs->old_ib_buffers = old_ib_buffers;
 
448
        }
 
449
 
 
450
        cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
 
451
 
 
452
        cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
 
453
                                                   cs->ws->cs_bo_domain,
 
454
                                                   RADEON_FLAG_CPU_ACCESS |
 
455
                                                   RADEON_FLAG_NO_INTERPROCESS_SHARING |
 
456
                                                   RADEON_FLAG_READ_ONLY |
 
457
                                                   RADEON_FLAG_GTT_WC,
 
458
                                                   RADV_BO_PRIORITY_CS);
 
459
 
 
460
        if (!cs->ib_buffer) {
 
461
                cs->base.cdw = 0;
 
462
                cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
 
463
                cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
 
464
        }
 
465
 
 
466
        cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
 
467
        if (!cs->ib_mapped) {
 
468
                cs->ws->base.buffer_destroy(cs->ib_buffer);
 
469
                cs->base.cdw = 0;
 
470
 
 
471
                /* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
 
472
                cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
 
473
                cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
 
474
        }
 
475
 
 
476
        cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
 
477
 
 
478
        radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
 
479
        radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
 
480
        radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
 
481
        radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
 
482
 
 
483
        cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
 
484
 
 
485
        cs->base.buf = (uint32_t *)cs->ib_mapped;
 
486
        cs->base.cdw = 0;
 
487
        cs->base.max_dw = ib_size / 4 - 4;
 
488
 
 
489
}
 
490
 
 
491
static VkResult radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
 
492
{
 
493
        struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
 
494
 
 
495
        if (cs->ws->use_ib_bos) {
 
496
                while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
 
497
                        radeon_emit(&cs->base, PKT3_NOP_PAD);
 
498
 
 
499
                *cs->ib_size_ptr |= cs->base.cdw;
 
500
 
 
501
                cs->is_chained = false;
 
502
        }
 
503
 
 
504
        return cs->status;
 
505
}
 
506
 
 
507
static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
 
508
{
 
509
        struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
 
510
        cs->base.cdw = 0;
 
511
        cs->status = VK_SUCCESS;
 
512
 
 
513
        for (unsigned i = 0; i < cs->num_buffers; ++i) {
 
514
                unsigned hash = cs->handles[i].bo_handle &
 
515
                                (ARRAY_SIZE(cs->buffer_hash_table) - 1);
 
516
                cs->buffer_hash_table[hash] = -1;
 
517
        }
 
518
 
 
519
        for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
 
520
                unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
 
521
                cs->virtual_buffer_hash_table[hash] = -1;
 
522
        }
 
523
 
 
524
        cs->num_buffers = 0;
 
525
        cs->num_virtual_buffers = 0;
 
526
 
 
527
        if (cs->ws->use_ib_bos) {
 
528
                cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
 
529
 
 
530
                for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
 
531
                        cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
 
532
 
 
533
                cs->num_old_ib_buffers = 0;
 
534
                cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
 
535
                cs->ib_size_ptr = &cs->ib.size;
 
536
                cs->ib.size = 0;
 
537
        } else {
 
538
                for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
 
539
                        struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
 
540
                        free(rcs->buf);
 
541
                }
 
542
 
 
543
                free(cs->old_cs_buffers);
 
544
                cs->old_cs_buffers = NULL;
 
545
                cs->num_old_cs_buffers = 0;
 
546
        }
 
547
}
 
548
 
 
549
static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
 
550
                                      uint32_t bo)
 
551
{
 
552
        unsigned hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
 
553
        int index = cs->buffer_hash_table[hash];
 
554
 
 
555
        if (index == -1)
 
556
                return -1;
 
557
 
 
558
        if (cs->handles[index].bo_handle == bo)
 
559
                return index;
 
560
 
 
561
        for (unsigned i = 0; i < cs->num_buffers; ++i) {
 
562
                if (cs->handles[i].bo_handle == bo) {
 
563
                        cs->buffer_hash_table[hash] = i;
 
564
                        return i;
 
565
                }
 
566
        }
 
567
 
 
568
        return -1;
 
569
}
 
570
 
 
571
static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
 
572
                                               uint32_t bo, uint8_t priority)
 
573
{
 
574
        unsigned hash;
 
575
        int index = radv_amdgpu_cs_find_buffer(cs, bo);
 
576
 
 
577
        if (index != -1)
 
578
                return;
 
579
 
 
580
        if (cs->num_buffers == cs->max_num_buffers) {
 
581
                unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
 
582
                struct drm_amdgpu_bo_list_entry *new_entries =
 
583
                        realloc(cs->handles, new_count * sizeof(struct drm_amdgpu_bo_list_entry));
 
584
                if (new_entries) {
 
585
                        cs->max_num_buffers = new_count;
 
586
                        cs->handles = new_entries;
 
587
                } else {
 
588
                        cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
 
589
                        return;
 
590
                }
 
591
        }
 
592
 
 
593
        cs->handles[cs->num_buffers].bo_handle = bo;
 
594
        cs->handles[cs->num_buffers].bo_priority = priority;
 
595
 
 
596
        hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
 
597
        cs->buffer_hash_table[hash] = cs->num_buffers;
 
598
 
 
599
        ++cs->num_buffers;
 
600
}
 
601
 
 
602
static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
 
603
                                              struct radeon_winsys_bo *bo)
 
604
{
 
605
        struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
 
606
        unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
 
607
 
 
608
 
 
609
        if (!cs->virtual_buffer_hash_table) {
 
610
                int *virtual_buffer_hash_table =
 
611
                        malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
 
612
                if (!virtual_buffer_hash_table) {
 
613
                        cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
 
614
                        return;
 
615
                }
 
616
                cs->virtual_buffer_hash_table = virtual_buffer_hash_table;
 
617
 
 
618
                for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
 
619
                        cs->virtual_buffer_hash_table[i] = -1;
 
620
        }
 
621
 
 
622
        if (cs->virtual_buffer_hash_table[hash] >= 0) {
 
623
                int idx = cs->virtual_buffer_hash_table[hash];
 
624
                if (cs->virtual_buffers[idx] == bo) {
 
625
                        return;
 
626
                }
 
627
                for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
 
628
                        if (cs->virtual_buffers[i] == bo) {
 
629
                                cs->virtual_buffer_hash_table[hash] = i;
 
630
                                return;
 
631
                        }
 
632
                }
 
633
        }
 
634
 
 
635
        if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
 
636
                unsigned max_num_virtual_buffers =
 
637
                        MAX2(2, cs->max_num_virtual_buffers * 2);
 
638
                struct radeon_winsys_bo **virtual_buffers =
 
639
                        realloc(cs->virtual_buffers,
 
640
                                sizeof(struct radeon_winsys_bo*) * max_num_virtual_buffers);
 
641
                if (!virtual_buffers) {
 
642
                        cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
 
643
                        return;
 
644
                }
 
645
                cs->max_num_virtual_buffers = max_num_virtual_buffers;
 
646
                cs->virtual_buffers = virtual_buffers;
 
647
        }
 
648
 
 
649
        cs->virtual_buffers[cs->num_virtual_buffers] = bo;
 
650
 
 
651
        cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
 
652
        ++cs->num_virtual_buffers;
 
653
 
 
654
}
 
655
 
 
656
static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
 
657
                                      struct radeon_winsys_bo *_bo)
 
658
{
 
659
        struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
 
660
        struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
 
661
 
 
662
        if (cs->status != VK_SUCCESS)
 
663
                return;
 
664
 
 
665
        if (bo->is_virtual)  {
 
666
                radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
 
667
                return;
 
668
        }
 
669
 
 
670
        if (bo->base.is_local)
 
671
                return;
 
672
 
 
673
        radv_amdgpu_cs_add_buffer_internal(cs, bo->bo_handle, bo->priority);
 
674
}
 
675
 
 
676
static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
 
677
                                             struct radeon_cmdbuf *_child)
 
678
{
 
679
        struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
 
680
        struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
 
681
 
 
682
        if (parent->status != VK_SUCCESS || child->status != VK_SUCCESS)
 
683
                return;
 
684
 
 
685
        for (unsigned i = 0; i < child->num_buffers; ++i) {
 
686
                radv_amdgpu_cs_add_buffer_internal(parent,
 
687
                                                   child->handles[i].bo_handle,
 
688
                                                   child->handles[i].bo_priority);
 
689
        }
 
690
 
 
691
        for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
 
692
                radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
 
693
        }
 
694
 
 
695
        if (parent->ws->use_ib_bos) {
 
696
                if (parent->base.cdw + 4 > parent->base.max_dw)
 
697
                        radv_amdgpu_cs_grow(&parent->base, 4);
 
698
 
 
699
                radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
 
700
                radeon_emit(&parent->base, child->ib.ib_mc_address);
 
701
                radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
 
702
                radeon_emit(&parent->base, child->ib.size);
 
703
        } else {
 
704
                if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
 
705
                        radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
 
706
 
 
707
                memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
 
708
                parent->base.cdw += child->base.cdw;
 
709
        }
 
710
}
 
711
 
 
712
static VkResult
 
713
radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws,
 
714
                        struct radeon_cmdbuf **cs_array,
 
715
                        unsigned count,
 
716
                        struct radv_amdgpu_winsys_bo **extra_bo_array,
 
717
                        unsigned num_extra_bo,
 
718
                        struct radeon_cmdbuf *extra_cs,
 
719
                        const struct radv_winsys_bo_list *radv_bo_list,
 
720
                        unsigned *rnum_handles,
 
721
                        struct drm_amdgpu_bo_list_entry **rhandles)
 
722
{
 
723
        struct drm_amdgpu_bo_list_entry *handles = NULL;
 
724
        unsigned num_handles = 0;
 
725
 
 
726
        if (ws->debug_all_bos) {
 
727
                struct radv_amdgpu_winsys_bo *bo;
 
728
 
 
729
                handles = malloc(sizeof(handles[0]) * ws->num_buffers);
 
730
                if (!handles) {
 
731
                        return VK_ERROR_OUT_OF_HOST_MEMORY;
 
732
                }
 
733
 
 
734
                LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
 
735
                        assert(num_handles < ws->num_buffers);
 
736
                        handles[num_handles].bo_handle = bo->bo_handle;
 
737
                        handles[num_handles].bo_priority = bo->priority;
 
738
                        num_handles++;
 
739
                }
 
740
        } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
 
741
                   !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
 
742
                struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
 
743
                if (cs->num_buffers == 0)
 
744
                        return VK_SUCCESS;
 
745
 
 
746
                handles = malloc(sizeof(handles[0]) * cs->num_buffers);
 
747
                if (!handles)
 
748
                        return VK_ERROR_OUT_OF_HOST_MEMORY;
 
749
 
 
750
                memcpy(handles, cs->handles,
 
751
                       sizeof(handles[0]) * cs->num_buffers);
 
752
                num_handles = cs->num_buffers;
 
753
        } else {
 
754
                unsigned total_buffer_count = num_extra_bo;
 
755
                num_handles = num_extra_bo;
 
756
                for (unsigned i = 0; i < count; ++i) {
 
757
                        struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
 
758
                        total_buffer_count += cs->num_buffers;
 
759
                        for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
 
760
                                total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
 
761
                }
 
762
 
 
763
                if (extra_cs) {
 
764
                        total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
 
765
                }
 
766
 
 
767
                if (radv_bo_list) {
 
768
                        total_buffer_count += radv_bo_list->count;
 
769
                }
 
770
 
 
771
                if (total_buffer_count == 0)
 
772
                        return VK_SUCCESS;
 
773
 
 
774
                handles = malloc(sizeof(handles[0]) * total_buffer_count);
 
775
                if (!handles)
 
776
                        return VK_ERROR_OUT_OF_HOST_MEMORY;
 
777
 
 
778
                for (unsigned i = 0; i < num_extra_bo; i++) {
 
779
                        handles[i].bo_handle = extra_bo_array[i]->bo_handle;
 
780
                        handles[i].bo_priority = extra_bo_array[i]->priority;
 
781
                }
 
782
 
 
783
                for (unsigned i = 0; i < count + !!extra_cs; ++i) {
 
784
                        struct radv_amdgpu_cs *cs;
 
785
 
 
786
                        if (i == count)
 
787
                                cs = (struct radv_amdgpu_cs*)extra_cs;
 
788
                        else
 
789
                                cs = (struct radv_amdgpu_cs*)cs_array[i];
 
790
 
 
791
                        if (!cs->num_buffers)
 
792
                                continue;
 
793
 
 
794
                        if (num_handles == 0 && !cs->num_virtual_buffers) {
 
795
                                memcpy(handles, cs->handles, cs->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
 
796
                                num_handles = cs->num_buffers;
 
797
                                continue;
 
798
                        }
 
799
                        int unique_bo_so_far = num_handles;
 
800
                        for (unsigned j = 0; j < cs->num_buffers; ++j) {
 
801
                                bool found = false;
 
802
                                for (unsigned k = 0; k < unique_bo_so_far; ++k) {
 
803
                                        if (handles[k].bo_handle == cs->handles[j].bo_handle) {
 
804
                                                found = true;
 
805
                                                break;
 
806
                                        }
 
807
                                }
 
808
                                if (!found) {
 
809
                                        handles[num_handles] = cs->handles[j];
 
810
                                        ++num_handles;
 
811
                                }
 
812
                        }
 
813
                        for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
 
814
                                struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
 
815
                                for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
 
816
                                        struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
 
817
                                        bool found = false;
 
818
                                        for (unsigned m = 0; m < num_handles; ++m) {
 
819
                                                if (handles[m].bo_handle == bo->bo_handle) {
 
820
                                                        found = true;
 
821
                                                        break;
 
822
                                                }
 
823
                                        }
 
824
                                        if (!found) {
 
825
                                                handles[num_handles].bo_handle = bo->bo_handle;
 
826
                                                handles[num_handles].bo_priority = bo->priority;
 
827
                                                ++num_handles;
 
828
                                        }
 
829
                                }
 
830
                        }
 
831
                }
 
832
 
 
833
                if (radv_bo_list) {
 
834
                        unsigned unique_bo_so_far = num_handles;
 
835
                        for (unsigned i = 0; i < radv_bo_list->count; ++i) {
 
836
                                struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
 
837
                                bool found = false;
 
838
                                for (unsigned j = 0; j < unique_bo_so_far; ++j) {
 
839
                                        if (bo->bo_handle == handles[j].bo_handle) {
 
840
                                                found = true;
 
841
                                                break;
 
842
                                        }
 
843
                                }
 
844
                                if (!found) {
 
845
                                        handles[num_handles].bo_handle = bo->bo_handle;
 
846
                                        handles[num_handles].bo_priority = bo->priority;
 
847
                                        ++num_handles;
 
848
                                }
 
849
                        }
 
850
                }
 
851
        }
 
852
 
 
853
        *rhandles = handles;
 
854
        *rnum_handles = num_handles;
 
855
 
 
856
        return VK_SUCCESS;
 
857
}
 
858
 
 
859
static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
 
860
{
 
861
        struct amdgpu_cs_fence_info ret = {0};
 
862
        if (ctx->fence_map) {
 
863
                ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
 
864
                ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
 
865
        }
 
866
        return ret;
 
867
}
 
868
 
 
869
static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
 
870
                                    struct radv_amdgpu_cs_request *request)
 
871
{
 
872
        radv_amdgpu_request_to_fence(ctx,
 
873
                                     &ctx->last_submission[request->ip_type][request->ring],
 
874
                                     request);
 
875
}
 
876
 
 
877
static VkResult
 
878
radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
 
879
                                     int queue_idx,
 
880
                                     struct radv_winsys_sem_info *sem_info,
 
881
                                     const struct radv_winsys_bo_list *radv_bo_list,
 
882
                                     struct radeon_cmdbuf **cs_array,
 
883
                                     unsigned cs_count,
 
884
                                     struct radeon_cmdbuf *initial_preamble_cs,
 
885
                                     struct radeon_cmdbuf *continue_preamble_cs,
 
886
                                     struct radeon_winsys_fence *_fence)
 
887
{
 
888
        struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
 
889
        struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
 
890
        struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
 
891
        struct radv_amdgpu_winsys *aws = cs0->ws;
 
892
        struct drm_amdgpu_bo_list_entry *handles = NULL;
 
893
        struct radv_amdgpu_cs_request request = {0};
 
894
        struct amdgpu_cs_ib_info ibs[2];
 
895
        unsigned number_of_ibs = 1;
 
896
        unsigned num_handles = 0;
 
897
        VkResult result;
 
898
 
 
899
        for (unsigned i = cs_count; i--;) {
 
900
                struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
 
901
 
 
902
                if (cs->is_chained) {
 
903
                        *cs->ib_size_ptr -= 4;
 
904
                        cs->is_chained = false;
 
905
                }
 
906
 
 
907
                if (i + 1 < cs_count) {
 
908
                        struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
 
909
                        assert(cs->base.cdw + 4 <= cs->base.max_dw);
 
910
 
 
911
                        cs->is_chained = true;
 
912
                        *cs->ib_size_ptr += 4;
 
913
 
 
914
                        cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
 
915
                        cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
 
916
                        cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
 
917
                        cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
 
918
                }
 
919
        }
 
920
 
 
921
        if (aws->debug_all_bos)
 
922
                u_rwlock_rdlock(&aws->global_bo_list_lock);
 
923
 
 
924
        /* Get the BO list. */
 
925
        result = radv_amdgpu_get_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
 
926
                                         initial_preamble_cs, radv_bo_list,
 
927
                                         &num_handles, &handles);
 
928
        if (result != VK_SUCCESS)
 
929
                goto fail;
 
930
 
 
931
        /* Configure the CS request. */
 
932
        if (initial_preamble_cs) {
 
933
                ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
 
934
                ibs[1] = cs0->ib;
 
935
                number_of_ibs++;
 
936
        } else {
 
937
                ibs[0] = cs0->ib;
 
938
        }
 
939
 
 
940
        request.ip_type = cs0->hw_ip;
 
941
        request.ring = queue_idx;
 
942
        request.number_of_ibs = number_of_ibs;
 
943
        request.ibs = ibs;
 
944
        request.handles = handles;
 
945
        request.num_handles = num_handles;
 
946
        request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
 
947
 
 
948
        /* Submit the CS. */
 
949
        result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
 
950
 
 
951
        free(request.handles);
 
952
 
 
953
        if (result != VK_SUCCESS)
 
954
                goto fail;
 
955
 
 
956
        if (fence)
 
957
                radv_amdgpu_request_to_fence(ctx, fence, &request);
 
958
 
 
959
        radv_assign_last_submit(ctx, &request);
 
960
 
 
961
fail:
 
962
        if (aws->debug_all_bos)
 
963
                u_rwlock_rdunlock(&aws->global_bo_list_lock);
 
964
        return result;
 
965
}
 
966
 
 
967
static VkResult
 
968
radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
 
969
                                      int queue_idx,
 
970
                                      struct radv_winsys_sem_info *sem_info,
 
971
                                      const struct radv_winsys_bo_list *radv_bo_list,
 
972
                                      struct radeon_cmdbuf **cs_array,
 
973
                                      unsigned cs_count,
 
974
                                      struct radeon_cmdbuf *initial_preamble_cs,
 
975
                                      struct radeon_cmdbuf *continue_preamble_cs,
 
976
                                      struct radeon_winsys_fence *_fence)
 
977
{
 
978
        struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
 
979
        struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
 
980
        struct drm_amdgpu_bo_list_entry *handles = NULL;
 
981
        struct radv_amdgpu_cs_request request = {0};
 
982
        struct amdgpu_cs_ib_info *ibs;
 
983
        struct radv_amdgpu_cs *cs0;
 
984
        struct radv_amdgpu_winsys *aws;
 
985
        unsigned num_handles = 0;
 
986
        unsigned number_of_ibs;
 
987
        VkResult result;
 
988
 
 
989
        assert(cs_count);
 
990
        cs0 = radv_amdgpu_cs(cs_array[0]);
 
991
        aws = cs0->ws;
 
992
 
 
993
        /* Compute the number of IBs for this submit. */
 
994
        number_of_ibs = cs_count + !!initial_preamble_cs;
 
995
 
 
996
        if (aws->debug_all_bos)
 
997
                u_rwlock_rdlock(&aws->global_bo_list_lock);
 
998
 
 
999
        /* Get the BO list. */
 
1000
        result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0,
 
1001
                                         initial_preamble_cs, radv_bo_list,
 
1002
                                         &num_handles, &handles);
 
1003
        if (result != VK_SUCCESS) {
 
1004
                goto fail;
 
1005
        }
 
1006
 
 
1007
        ibs = malloc(number_of_ibs * sizeof(*ibs));
 
1008
        if (!ibs) {
 
1009
                free(handles);
 
1010
                result = VK_ERROR_OUT_OF_HOST_MEMORY;
 
1011
                goto fail;
 
1012
        }
 
1013
 
 
1014
        /* Configure the CS request. */
 
1015
        if (initial_preamble_cs)
 
1016
                ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
 
1017
 
 
1018
        for (unsigned i = 0; i < cs_count; i++) {
 
1019
                struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
 
1020
 
 
1021
                ibs[i + !!initial_preamble_cs] = cs->ib;
 
1022
 
 
1023
                if (cs->is_chained) {
 
1024
                        *cs->ib_size_ptr -= 4;
 
1025
                        cs->is_chained = false;
 
1026
                }
 
1027
        }
 
1028
 
 
1029
        request.ip_type = cs0->hw_ip;
 
1030
        request.ring = queue_idx;
 
1031
        request.handles = handles;
 
1032
        request.num_handles = num_handles;
 
1033
        request.number_of_ibs = number_of_ibs;
 
1034
        request.ibs = ibs;
 
1035
        request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
 
1036
 
 
1037
        /* Submit the CS. */
 
1038
        result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
 
1039
 
 
1040
        free(request.handles);
 
1041
        free(ibs);
 
1042
 
 
1043
        if (result != VK_SUCCESS)
 
1044
                goto fail;
 
1045
 
 
1046
        if (fence)
 
1047
                radv_amdgpu_request_to_fence(ctx, fence, &request);
 
1048
 
 
1049
        radv_assign_last_submit(ctx, &request);
 
1050
 
 
1051
fail:
 
1052
        if (aws->debug_all_bos)
 
1053
                u_rwlock_rdunlock(&aws->global_bo_list_lock);
 
1054
        return result;
 
1055
}
 
1056
 
 
1057
static VkResult
 
1058
radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
 
1059
                                    int queue_idx,
 
1060
                                    struct radv_winsys_sem_info *sem_info,
 
1061
                                    const struct radv_winsys_bo_list *radv_bo_list,
 
1062
                                    struct radeon_cmdbuf **cs_array,
 
1063
                                    unsigned cs_count,
 
1064
                                    struct radeon_cmdbuf *initial_preamble_cs,
 
1065
                                    struct radeon_cmdbuf *continue_preamble_cs,
 
1066
                                    struct radeon_winsys_fence *_fence)
 
1067
{
 
1068
        struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
 
1069
        struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
 
1070
        struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
 
1071
        struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
 
1072
        struct radv_amdgpu_winsys *aws = cs0->ws;
 
1073
        struct radv_amdgpu_cs_request request;
 
1074
        uint32_t pad_word = PKT3_NOP_PAD;
 
1075
        bool emit_signal_sem = sem_info->cs_emit_signal;
 
1076
        VkResult result;
 
1077
 
 
1078
        if (radv_amdgpu_winsys(ws)->info.chip_class == GFX6)
 
1079
                pad_word = 0x80000000;
 
1080
 
 
1081
        assert(cs_count);
 
1082
 
 
1083
        for (unsigned i = 0; i < cs_count;) {
 
1084
                struct amdgpu_cs_ib_info *ibs;
 
1085
                struct radeon_winsys_bo **bos;
 
1086
                struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
 
1087
                struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
 
1088
                struct drm_amdgpu_bo_list_entry *handles = NULL;
 
1089
                unsigned num_handles = 0;
 
1090
                unsigned number_of_ibs;
 
1091
                uint32_t *ptr;
 
1092
                unsigned cnt = 0;
 
1093
                unsigned pad_words = 0;
 
1094
 
 
1095
                /* Compute the number of IBs for this submit. */
 
1096
                number_of_ibs = cs->num_old_cs_buffers + 1;
 
1097
 
 
1098
                ibs = malloc(number_of_ibs * sizeof(*ibs));
 
1099
                if (!ibs)
 
1100
                        return VK_ERROR_OUT_OF_HOST_MEMORY;
 
1101
 
 
1102
                bos = malloc(number_of_ibs * sizeof(*bos));
 
1103
                if (!bos) {
 
1104
                        free(ibs);
 
1105
                        return VK_ERROR_OUT_OF_HOST_MEMORY;
 
1106
                }
 
1107
 
 
1108
                if (number_of_ibs > 1) {
 
1109
                        /* Special path when the maximum size in dwords has
 
1110
                         * been reached because we need to handle more than one
 
1111
                         * IB per submit.
 
1112
                         */
 
1113
                        struct radeon_cmdbuf **new_cs_array;
 
1114
                        unsigned idx = 0;
 
1115
 
 
1116
                        new_cs_array = malloc(cs->num_old_cs_buffers *
 
1117
                                              sizeof(*new_cs_array));
 
1118
                        assert(new_cs_array);
 
1119
 
 
1120
                        for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
 
1121
                                new_cs_array[idx++] = &cs->old_cs_buffers[j];
 
1122
                        new_cs_array[idx++] = cs_array[i];
 
1123
 
 
1124
                        for (unsigned j = 0; j < number_of_ibs; j++) {
 
1125
                                struct radeon_cmdbuf *rcs = new_cs_array[j];
 
1126
                                bool needs_preamble = preamble_cs && j == 0;
 
1127
                                unsigned size = 0;
 
1128
 
 
1129
                                if (needs_preamble)
 
1130
                                        size += preamble_cs->cdw;
 
1131
                                size += rcs->cdw;
 
1132
 
 
1133
                                assert(size < 0xffff8);
 
1134
 
 
1135
                                while (!size || (size & 7)) {
 
1136
                                        size++;
 
1137
                                        pad_words++;
 
1138
                                }
 
1139
 
 
1140
                                bos[j] = ws->buffer_create(ws, 4 * size, 4096,
 
1141
                                                           aws->cs_bo_domain,
 
1142
                                                           RADEON_FLAG_CPU_ACCESS |
 
1143
                                                           RADEON_FLAG_NO_INTERPROCESS_SHARING |
 
1144
                                                           RADEON_FLAG_READ_ONLY,
 
1145
                                                           RADV_BO_PRIORITY_CS);
 
1146
                                ptr = ws->buffer_map(bos[j]);
 
1147
 
 
1148
                                if (needs_preamble) {
 
1149
                                        memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
 
1150
                                        ptr += preamble_cs->cdw;
 
1151
                                }
 
1152
 
 
1153
                                memcpy(ptr, rcs->buf, 4 * rcs->cdw);
 
1154
                                ptr += rcs->cdw;
 
1155
 
 
1156
                                for (unsigned k = 0; k < pad_words; ++k)
 
1157
                                        *ptr++ = pad_word;
 
1158
 
 
1159
                                ibs[j].size = size;
 
1160
                                ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
 
1161
                                ibs[j].flags = 0;
 
1162
                        }
 
1163
 
 
1164
                        cnt++;
 
1165
                        free(new_cs_array);
 
1166
                } else {
 
1167
                        unsigned size = 0;
 
1168
 
 
1169
                        if (preamble_cs)
 
1170
                                size += preamble_cs->cdw;
 
1171
 
 
1172
                        while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
 
1173
                                size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
 
1174
                                ++cnt;
 
1175
                        }
 
1176
 
 
1177
                        while (!size || (size & 7)) {
 
1178
                                size++;
 
1179
                                pad_words++;
 
1180
                        }
 
1181
                        assert(cnt);
 
1182
 
 
1183
                        bos[0] = ws->buffer_create(ws, 4 * size, 4096,
 
1184
                                                   aws->cs_bo_domain,
 
1185
                                                   RADEON_FLAG_CPU_ACCESS |
 
1186
                                                   RADEON_FLAG_NO_INTERPROCESS_SHARING |
 
1187
                                                   RADEON_FLAG_READ_ONLY,
 
1188
                                                   RADV_BO_PRIORITY_CS);
 
1189
                        ptr = ws->buffer_map(bos[0]);
 
1190
 
 
1191
                        if (preamble_cs) {
 
1192
                                memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
 
1193
                                ptr += preamble_cs->cdw;
 
1194
                        }
 
1195
 
 
1196
                        for (unsigned j = 0; j < cnt; ++j) {
 
1197
                                struct radv_amdgpu_cs *cs2 = radv_amdgpu_cs(cs_array[i + j]);
 
1198
                                memcpy(ptr, cs2->base.buf, 4 * cs2->base.cdw);
 
1199
                                ptr += cs2->base.cdw;
 
1200
 
 
1201
                        }
 
1202
 
 
1203
                        for (unsigned j = 0; j < pad_words; ++j)
 
1204
                                *ptr++ = pad_word;
 
1205
 
 
1206
                        ibs[0].size = size;
 
1207
                        ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
 
1208
                        ibs[0].flags = 0;
 
1209
                }
 
1210
 
 
1211
                if (aws->debug_all_bos)
 
1212
                        u_rwlock_rdlock(&aws->global_bo_list_lock);
 
1213
 
 
1214
                result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[i], cnt,
 
1215
                                                 (struct radv_amdgpu_winsys_bo **)bos,
 
1216
                                                 number_of_ibs, preamble_cs,
 
1217
                                                 radv_bo_list,
 
1218
                                                 &num_handles, &handles);
 
1219
                if (result != VK_SUCCESS) {
 
1220
                        free(ibs);
 
1221
                        free(bos);
 
1222
                        if (aws->debug_all_bos)
 
1223
                                u_rwlock_rdunlock(&aws->global_bo_list_lock);
 
1224
                        return result;
 
1225
                }
 
1226
 
 
1227
                memset(&request, 0, sizeof(request));
 
1228
 
 
1229
                request.ip_type = cs0->hw_ip;
 
1230
                request.ring = queue_idx;
 
1231
                request.handles = handles;
 
1232
                request.num_handles = num_handles;
 
1233
                request.number_of_ibs = number_of_ibs;
 
1234
                request.ibs = ibs;
 
1235
                request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
 
1236
 
 
1237
                sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
 
1238
                result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
 
1239
 
 
1240
                free(request.handles);
 
1241
                if (aws->debug_all_bos)
 
1242
                        u_rwlock_rdunlock(&aws->global_bo_list_lock);
 
1243
 
 
1244
                for (unsigned j = 0; j < number_of_ibs; j++) {
 
1245
                        ws->buffer_destroy(bos[j]);
 
1246
                }
 
1247
 
 
1248
                free(ibs);
 
1249
                free(bos);
 
1250
 
 
1251
                if (result != VK_SUCCESS)
 
1252
                        return result;
 
1253
 
 
1254
                i += cnt;
 
1255
        }
 
1256
        if (fence)
 
1257
                radv_amdgpu_request_to_fence(ctx, fence, &request);
 
1258
 
 
1259
        radv_assign_last_submit(ctx, &request);
 
1260
 
 
1261
        return VK_SUCCESS;
 
1262
}
 
1263
 
 
1264
static VkResult radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
 
1265
                                             int queue_idx,
 
1266
                                             struct radeon_cmdbuf **cs_array,
 
1267
                                             unsigned cs_count,
 
1268
                                             struct radeon_cmdbuf *initial_preamble_cs,
 
1269
                                             struct radeon_cmdbuf *continue_preamble_cs,
 
1270
                                             struct radv_winsys_sem_info *sem_info,
 
1271
                                             const struct radv_winsys_bo_list *bo_list,
 
1272
                                             bool can_patch,
 
1273
                                             struct radeon_winsys_fence *_fence)
 
1274
{
 
1275
        struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
 
1276
        struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
 
1277
        VkResult result;
 
1278
 
 
1279
        assert(sem_info);
 
1280
        if (!cs->ws->use_ib_bos) {
 
1281
                result = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
 
1282
                                                             cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
 
1283
        } else if (can_patch) {
 
1284
                result = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
 
1285
                                                              cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
 
1286
        } else {
 
1287
                result = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
 
1288
                                                               cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
 
1289
        }
 
1290
 
 
1291
        radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
 
1292
        return result;
 
1293
}
 
1294
 
 
1295
static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
 
1296
{
 
1297
        struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
 
1298
        void *ret = NULL;
 
1299
 
 
1300
        if (!cs->ib_buffer)
 
1301
                return NULL;
 
1302
        for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
 
1303
                struct radv_amdgpu_winsys_bo *bo;
 
1304
 
 
1305
                bo = (struct radv_amdgpu_winsys_bo*)
 
1306
                       (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
 
1307
                if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
 
1308
                        if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
 
1309
                                return (char *)ret + (addr - bo->base.va);
 
1310
                }
 
1311
        }
 
1312
        if(cs->ws->debug_all_bos) {
 
1313
                u_rwlock_rdlock(&cs->ws->global_bo_list_lock);
 
1314
                list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
 
1315
                                    &cs->ws->global_bo_list, global_list_item) {
 
1316
                        if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
 
1317
                                if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
 
1318
                                        u_rwlock_rdunlock(&cs->ws->global_bo_list_lock);
 
1319
                                        return (char *)ret + (addr - bo->base.va);
 
1320
                                }
 
1321
                        }
 
1322
                }
 
1323
                u_rwlock_rdunlock(&cs->ws->global_bo_list_lock);
 
1324
        }
 
1325
        return ret;
 
1326
}
 
1327
 
 
1328
static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
 
1329
                                       FILE* file,
 
1330
                                       const int *trace_ids, int trace_id_count)
 
1331
{
 
1332
        struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
 
1333
        void *ib = cs->base.buf;
 
1334
        int num_dw = cs->base.cdw;
 
1335
 
 
1336
        if (cs->ws->use_ib_bos) {
 
1337
                ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
 
1338
                num_dw = cs->ib.size;
 
1339
        }
 
1340
        assert(ib);
 
1341
        ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count,  "main IB",
 
1342
                    cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
 
1343
}
 
1344
 
 
1345
static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
 
1346
{
 
1347
        switch (radv_priority) {
 
1348
                case RADEON_CTX_PRIORITY_REALTIME:
 
1349
                        return AMDGPU_CTX_PRIORITY_VERY_HIGH;
 
1350
                case RADEON_CTX_PRIORITY_HIGH:
 
1351
                        return AMDGPU_CTX_PRIORITY_HIGH;
 
1352
                case RADEON_CTX_PRIORITY_MEDIUM:
 
1353
                        return AMDGPU_CTX_PRIORITY_NORMAL;
 
1354
                case RADEON_CTX_PRIORITY_LOW:
 
1355
                        return AMDGPU_CTX_PRIORITY_LOW;
 
1356
                default:
 
1357
                        unreachable("Invalid context priority");
 
1358
        }
 
1359
}
 
1360
 
 
1361
static VkResult radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
 
1362
                                       enum radeon_ctx_priority priority,
 
1363
                                       struct radeon_winsys_ctx **rctx)
 
1364
{
 
1365
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1366
        struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
 
1367
        uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
 
1368
        VkResult result;
 
1369
        int r;
 
1370
 
 
1371
        if (!ctx)
 
1372
                return VK_ERROR_OUT_OF_HOST_MEMORY;
 
1373
 
 
1374
        r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
 
1375
        if (r && r == -EACCES) {
 
1376
                result = VK_ERROR_NOT_PERMITTED_EXT;
 
1377
                goto fail_create;
 
1378
        } else if (r) {
 
1379
                fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
 
1380
                result = VK_ERROR_OUT_OF_HOST_MEMORY;
 
1381
                goto fail_create;
 
1382
        }
 
1383
        ctx->ws = ws;
 
1384
 
 
1385
        assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
 
1386
        ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
 
1387
                                              RADEON_DOMAIN_GTT,
 
1388
                                              RADEON_FLAG_CPU_ACCESS |
 
1389
                                              RADEON_FLAG_NO_INTERPROCESS_SHARING,
 
1390
                                              RADV_BO_PRIORITY_CS);
 
1391
        if (!ctx->fence_bo) {
 
1392
                result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
 
1393
                goto fail_alloc;
 
1394
        }
 
1395
 
 
1396
        ctx->fence_map = (uint64_t *)ws->base.buffer_map(ctx->fence_bo);
 
1397
        if (!ctx->fence_map) {
 
1398
                result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
 
1399
                goto fail_map;
 
1400
        }
 
1401
 
 
1402
        memset(ctx->fence_map, 0, 4096);
 
1403
 
 
1404
        *rctx = (struct radeon_winsys_ctx *)ctx;
 
1405
        return VK_SUCCESS;
 
1406
 
 
1407
fail_map:
 
1408
        ws->base.buffer_destroy(ctx->fence_bo);
 
1409
fail_alloc:
 
1410
        amdgpu_cs_ctx_free(ctx->ctx);
 
1411
fail_create:
 
1412
        FREE(ctx);
 
1413
        return result;
 
1414
}
 
1415
 
 
1416
static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
 
1417
{
 
1418
        struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
 
1419
        ctx->ws->base.buffer_destroy(ctx->fence_bo);
 
1420
        amdgpu_cs_ctx_free(ctx->ctx);
 
1421
        FREE(ctx);
 
1422
}
 
1423
 
 
1424
static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
 
1425
                                      enum ring_type ring_type, int ring_index)
 
1426
{
 
1427
        struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
 
1428
        int ip_type = ring_to_hw_ip(ring_type);
 
1429
 
 
1430
        if (ctx->last_submission[ip_type][ring_index].fence.fence) {
 
1431
                uint32_t expired;
 
1432
                int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
 
1433
                                                       1000000000ull, 0, &expired);
 
1434
 
 
1435
                if (ret || !expired)
 
1436
                        return false;
 
1437
        }
 
1438
 
 
1439
        return true;
 
1440
}
 
1441
 
 
1442
static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
 
1443
{
 
1444
        struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
 
1445
        if (!sem)
 
1446
                return NULL;
 
1447
 
 
1448
        return (struct radeon_winsys_sem *)sem;
 
1449
}
 
1450
 
 
1451
static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
 
1452
{
 
1453
        struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
 
1454
        FREE(sem);
 
1455
}
 
1456
 
 
1457
static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
 
1458
                                   uint32_t ip_type,
 
1459
                                   uint32_t ring,
 
1460
                                   struct radv_winsys_sem_info *sem_info)
 
1461
{
 
1462
        for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
 
1463
                struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
 
1464
 
 
1465
                if (sem->context)
 
1466
                        return -EINVAL;
 
1467
 
 
1468
                *sem = ctx->last_submission[ip_type][ring].fence;
 
1469
        }
 
1470
        return 0;
 
1471
}
 
1472
 
 
1473
static void *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
 
1474
                                                const uint32_t *syncobj_override,
 
1475
                                                struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
 
1476
{
 
1477
        const uint32_t *src = syncobj_override ? syncobj_override : counts->syncobj;
 
1478
        struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
 
1479
        if (!syncobj)
 
1480
                return NULL;
 
1481
 
 
1482
        for (unsigned i = 0; i < counts->syncobj_count; i++) {
 
1483
                struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
 
1484
                sem->handle = src[i];
 
1485
        }
 
1486
 
 
1487
        chunk->chunk_id = chunk_id;
 
1488
        chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
 
1489
        chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
 
1490
        return syncobj;
 
1491
}
 
1492
 
 
1493
static void *
 
1494
radv_amdgpu_cs_alloc_timeline_syncobj_chunk(struct radv_winsys_sem_counts *counts,
 
1495
                                            const uint32_t *syncobj_override,
 
1496
                                            struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
 
1497
{
 
1498
        const uint32_t *src = syncobj_override ? syncobj_override : counts->syncobj;
 
1499
        struct drm_amdgpu_cs_chunk_syncobj *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_syncobj) *
 
1500
                                                             (counts->syncobj_count + counts->timeline_syncobj_count));
 
1501
        if (!syncobj)
 
1502
                return NULL;
 
1503
 
 
1504
        for (unsigned i = 0; i < counts->syncobj_count; i++) {
 
1505
                struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i];
 
1506
                sem->handle = src[i];
 
1507
                sem->flags = 0;
 
1508
                sem->point = 0;
 
1509
        }
 
1510
 
 
1511
        for (unsigned i = 0; i < counts->timeline_syncobj_count; i++) {
 
1512
                struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i + counts->syncobj_count];
 
1513
                sem->handle = counts->syncobj[i + counts->syncobj_count];
 
1514
                sem->flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
 
1515
                sem->point = counts->points[i];
 
1516
        }
 
1517
 
 
1518
        chunk->chunk_id = chunk_id;
 
1519
        chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_syncobj) / 4 *
 
1520
                (counts->syncobj_count + counts->timeline_syncobj_count);
 
1521
        chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
 
1522
        return syncobj;
 
1523
}
 
1524
 
 
1525
static int radv_amdgpu_cache_alloc_syncobjs(struct radv_amdgpu_winsys *ws, unsigned count, uint32_t *dst)
 
1526
{
 
1527
        pthread_mutex_lock(&ws->syncobj_lock);
 
1528
        if (count > ws->syncobj_capacity) {
 
1529
                if (ws->syncobj_capacity > UINT32_MAX / 2)
 
1530
                        goto fail;
 
1531
 
 
1532
                unsigned new_capacity = MAX2(count, ws->syncobj_capacity * 2);
 
1533
                uint32_t *n = realloc(ws->syncobj, new_capacity * sizeof(*ws->syncobj));
 
1534
                if (!n)
 
1535
                        goto fail;
 
1536
                ws->syncobj_capacity = new_capacity;
 
1537
                ws->syncobj = n;
 
1538
        }
 
1539
 
 
1540
        while(ws->syncobj_count < count) {
 
1541
                int r = amdgpu_cs_create_syncobj(ws->dev, ws->syncobj + ws->syncobj_count);
 
1542
                if (r)
 
1543
                        goto fail;
 
1544
                ++ws->syncobj_count;
 
1545
        }
 
1546
 
 
1547
        for (unsigned i = 0; i < count; ++i)
 
1548
                dst[i] = ws->syncobj[--ws->syncobj_count];
 
1549
 
 
1550
        pthread_mutex_unlock(&ws->syncobj_lock);
 
1551
        return 0;
 
1552
 
 
1553
fail:
 
1554
        pthread_mutex_unlock(&ws->syncobj_lock);
 
1555
        return -ENOMEM;
 
1556
}
 
1557
 
 
1558
static void radv_amdgpu_cache_free_syncobjs(struct radv_amdgpu_winsys *ws, unsigned count, uint32_t *src)
 
1559
{
 
1560
        pthread_mutex_lock(&ws->syncobj_lock);
 
1561
 
 
1562
        uint32_t cache_count = MIN2(count, UINT32_MAX - ws->syncobj_count);
 
1563
        if (cache_count + ws->syncobj_count > ws->syncobj_capacity) {
 
1564
                unsigned new_capacity = MAX2(ws->syncobj_count + cache_count, ws->syncobj_capacity * 2);
 
1565
                uint32_t* n = realloc(ws->syncobj, new_capacity * sizeof(*ws->syncobj));
 
1566
                if (n) {
 
1567
                        ws->syncobj_capacity = new_capacity;
 
1568
                        ws->syncobj = n;
 
1569
                }
 
1570
        }
 
1571
 
 
1572
        for (unsigned i = 0; i < count; ++i) {
 
1573
                if (ws->syncobj_count < ws->syncobj_capacity)
 
1574
                        ws->syncobj[ws->syncobj_count++] = src[i];
 
1575
                else
 
1576
                        amdgpu_cs_destroy_syncobj(ws->dev, src[i]);
 
1577
        }
 
1578
 
 
1579
        pthread_mutex_unlock(&ws->syncobj_lock);
 
1580
 
 
1581
}
 
1582
 
 
1583
static int radv_amdgpu_cs_prepare_syncobjs(struct radv_amdgpu_winsys *ws,
 
1584
                                           struct radv_winsys_sem_counts *counts,
 
1585
                                           uint32_t **out_syncobjs)
 
1586
{
 
1587
        int r = 0;
 
1588
 
 
1589
        if (!ws->info.has_timeline_syncobj || !counts->syncobj_count) {
 
1590
                *out_syncobjs = NULL;
 
1591
                return 0;
 
1592
        }
 
1593
 
 
1594
        *out_syncobjs = malloc(counts->syncobj_count * sizeof(**out_syncobjs));
 
1595
        if (!*out_syncobjs)
 
1596
                return -ENOMEM;
 
1597
 
 
1598
        r = radv_amdgpu_cache_alloc_syncobjs(ws, counts->syncobj_count, *out_syncobjs);
 
1599
        if (r)
 
1600
                return r;
 
1601
        
 
1602
        for (unsigned i = 0; i < counts->syncobj_count; ++i) {
 
1603
                r = amdgpu_cs_syncobj_transfer(ws->dev, (*out_syncobjs)[i], 0, counts->syncobj[i], 0, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT);
 
1604
                if (r)
 
1605
                        goto fail;
 
1606
        }
 
1607
 
 
1608
        r = amdgpu_cs_syncobj_reset(ws->dev, counts->syncobj, counts->syncobj_reset_count);
 
1609
        if (r)
 
1610
                goto fail;
 
1611
 
 
1612
        return 0;
 
1613
fail:
 
1614
        radv_amdgpu_cache_free_syncobjs(ws, counts->syncobj_count, *out_syncobjs);
 
1615
        free(*out_syncobjs);
 
1616
        *out_syncobjs = NULL;
 
1617
        return r;
 
1618
}
 
1619
 
 
1620
static VkResult
 
1621
radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
 
1622
                      struct radv_amdgpu_cs_request *request,
 
1623
                      struct radv_winsys_sem_info *sem_info)
 
1624
{
 
1625
        int r;
 
1626
        int num_chunks;
 
1627
        int size;
 
1628
        bool user_fence;
 
1629
        struct drm_amdgpu_cs_chunk *chunks;
 
1630
        struct drm_amdgpu_cs_chunk_data *chunk_data;
 
1631
        struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
 
1632
        bool use_bo_list_create = ctx->ws->info.drm_minor < 27;
 
1633
        struct drm_amdgpu_bo_list_in bo_list_in;
 
1634
        void *wait_syncobj = NULL, *signal_syncobj = NULL;
 
1635
        uint32_t *in_syncobjs = NULL;
 
1636
        int i;
 
1637
        struct amdgpu_cs_fence *sem;
 
1638
        uint32_t bo_list = 0;
 
1639
        VkResult result = VK_SUCCESS;
 
1640
 
 
1641
        user_fence = (request->fence_info.handle != NULL);
 
1642
        size = request->number_of_ibs + (user_fence ? 2 : 1) + (!use_bo_list_create ? 1 : 0) + 3;
 
1643
 
 
1644
        chunks = malloc(sizeof(chunks[0]) * size);
 
1645
        if (!chunks)
 
1646
                return VK_ERROR_OUT_OF_HOST_MEMORY;
 
1647
 
 
1648
        size = request->number_of_ibs + (user_fence ? 1 : 0);
 
1649
 
 
1650
        chunk_data = malloc(sizeof(chunk_data[0]) * size);
 
1651
        if (!chunk_data) {
 
1652
                result = VK_ERROR_OUT_OF_HOST_MEMORY;
 
1653
                goto error_out;
 
1654
        }
 
1655
 
 
1656
        num_chunks = request->number_of_ibs;
 
1657
        for (i = 0; i < request->number_of_ibs; i++) {
 
1658
                struct amdgpu_cs_ib_info *ib;
 
1659
                chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
 
1660
                chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
 
1661
                chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
 
1662
 
 
1663
                ib = &request->ibs[i];
 
1664
 
 
1665
                chunk_data[i].ib_data._pad = 0;
 
1666
                chunk_data[i].ib_data.va_start = ib->ib_mc_address;
 
1667
                chunk_data[i].ib_data.ib_bytes = ib->size * 4;
 
1668
                chunk_data[i].ib_data.ip_type = request->ip_type;
 
1669
                chunk_data[i].ib_data.ip_instance = request->ip_instance;
 
1670
                chunk_data[i].ib_data.ring = request->ring;
 
1671
                chunk_data[i].ib_data.flags = ib->flags;
 
1672
        }
 
1673
 
 
1674
        if (user_fence) {
 
1675
                i = num_chunks++;
 
1676
 
 
1677
                chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
 
1678
                chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
 
1679
                chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
 
1680
 
 
1681
                amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
 
1682
                                                   &chunk_data[i]);
 
1683
        }
 
1684
 
 
1685
        if ((sem_info->wait.syncobj_count || sem_info->wait.timeline_syncobj_count) && sem_info->cs_emit_wait) {
 
1686
                r = radv_amdgpu_cs_prepare_syncobjs(ctx->ws, &sem_info->wait, &in_syncobjs);
 
1687
                if (r)
 
1688
                        goto error_out;
 
1689
 
 
1690
                if (ctx->ws->info.has_timeline_syncobj) {
 
1691
                        wait_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(&sem_info->wait,
 
1692
                                                                                   in_syncobjs,
 
1693
                                                                                   &chunks[num_chunks],
 
1694
                                                                                   AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT);
 
1695
                } else {
 
1696
                        wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
 
1697
                                                                          in_syncobjs,
 
1698
                                                                          &chunks[num_chunks],
 
1699
                                                                          AMDGPU_CHUNK_ID_SYNCOBJ_IN);
 
1700
                }
 
1701
                if (!wait_syncobj) {
 
1702
                        result = VK_ERROR_OUT_OF_HOST_MEMORY;
 
1703
                        goto error_out;
 
1704
                }
 
1705
                num_chunks++;
 
1706
 
 
1707
                if (sem_info->wait.sem_count == 0)
 
1708
                        sem_info->cs_emit_wait = false;
 
1709
 
 
1710
        }
 
1711
 
 
1712
        if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
 
1713
                sem_dependencies = malloc(sizeof(sem_dependencies[0]) * sem_info->wait.sem_count);
 
1714
                if (!sem_dependencies) {
 
1715
                        result = VK_ERROR_OUT_OF_HOST_MEMORY;
 
1716
                        goto error_out;
 
1717
                }
 
1718
 
 
1719
                int sem_count = 0;
 
1720
 
 
1721
                for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
 
1722
                        sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
 
1723
                        if (!sem->context)
 
1724
                                continue;
 
1725
                        struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
 
1726
 
 
1727
                        amdgpu_cs_chunk_fence_to_dep(sem, dep);
 
1728
 
 
1729
                        sem->context = NULL;
 
1730
                }
 
1731
                i = num_chunks++;
 
1732
 
 
1733
                /* dependencies chunk */
 
1734
                chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
 
1735
                chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
 
1736
                chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
 
1737
 
 
1738
                sem_info->cs_emit_wait = false;
 
1739
        }
 
1740
 
 
1741
        if ((sem_info->signal.syncobj_count || sem_info->signal.timeline_syncobj_count) && sem_info->cs_emit_signal) {
 
1742
                if (ctx->ws->info.has_timeline_syncobj) {
 
1743
                        signal_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(&sem_info->signal,
 
1744
                                                                                     NULL,
 
1745
                                                                                     &chunks[num_chunks],
 
1746
                                                                                     AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL);
 
1747
                } else {
 
1748
                        signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
 
1749
                                                                            NULL,
 
1750
                                                                            &chunks[num_chunks],
 
1751
                                                                            AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
 
1752
                }
 
1753
                if (!signal_syncobj) {
 
1754
                        result = VK_ERROR_OUT_OF_HOST_MEMORY;
 
1755
                        goto error_out;
 
1756
                }
 
1757
                num_chunks++;
 
1758
        }
 
1759
 
 
1760
        if (use_bo_list_create) {
 
1761
                /* Legacy path creating the buffer list handle and passing it
 
1762
                 * to the CS ioctl.
 
1763
                 */
 
1764
                r = amdgpu_bo_list_create_raw(ctx->ws->dev, request->num_handles,
 
1765
                                              request->handles, &bo_list);
 
1766
                if (r) {
 
1767
                        if (r == -ENOMEM) {
 
1768
                                fprintf(stderr, "amdgpu: Not enough memory for buffer list creation.\n");
 
1769
                                result = VK_ERROR_OUT_OF_HOST_MEMORY;
 
1770
                        } else {
 
1771
                                fprintf(stderr, "amdgpu: buffer list creation failed (%d).\n", r);
 
1772
                                result = VK_ERROR_UNKNOWN;
 
1773
                        }
 
1774
                        goto error_out;
 
1775
                }
 
1776
        } else {
 
1777
                /* Standard path passing the buffer list via the CS ioctl. */
 
1778
                bo_list_in.operation = ~0;
 
1779
                bo_list_in.list_handle = ~0;
 
1780
                bo_list_in.bo_number = request->num_handles;
 
1781
                bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
 
1782
                bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)request->handles;
 
1783
 
 
1784
                chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
 
1785
                chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
 
1786
                chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
 
1787
                num_chunks++;
 
1788
        }
 
1789
 
 
1790
        r = amdgpu_cs_submit_raw2(ctx->ws->dev,
 
1791
                                 ctx->ctx,
 
1792
                                 bo_list,
 
1793
                                 num_chunks,
 
1794
                                 chunks,
 
1795
                                 &request->seq_no);
 
1796
 
 
1797
        if (r) {
 
1798
                if (r == -ENOMEM) {
 
1799
                        fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
 
1800
                        result = VK_ERROR_OUT_OF_HOST_MEMORY;
 
1801
                } else if (r == -ECANCELED) {
 
1802
                        fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
 
1803
                        result = VK_ERROR_DEVICE_LOST;
 
1804
                } else {
 
1805
                        fprintf(stderr, "amdgpu: The CS has been rejected, "
 
1806
                                        "see dmesg for more information (%i).\n", r);
 
1807
                        result = VK_ERROR_UNKNOWN;
 
1808
                }
 
1809
        }
 
1810
 
 
1811
        if (bo_list)
 
1812
                amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
 
1813
 
 
1814
error_out:
 
1815
        if (in_syncobjs) {
 
1816
                radv_amdgpu_cache_free_syncobjs(ctx->ws, sem_info->wait.syncobj_count, in_syncobjs);
 
1817
                free(in_syncobjs);
 
1818
        }
 
1819
        free(chunks);
 
1820
        free(chunk_data);
 
1821
        free(sem_dependencies);
 
1822
        free(wait_syncobj);
 
1823
        free(signal_syncobj);
 
1824
        return result;
 
1825
}
 
1826
 
 
1827
static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
 
1828
                                      bool create_signaled,
 
1829
                                      uint32_t *handle)
 
1830
{
 
1831
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1832
        uint32_t flags = 0;
 
1833
 
 
1834
        if (create_signaled)
 
1835
                flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
 
1836
 
 
1837
        return amdgpu_cs_create_syncobj2(ws->dev, flags, handle);
 
1838
}
 
1839
 
 
1840
static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
 
1841
                                    uint32_t handle)
 
1842
{
 
1843
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1844
        amdgpu_cs_destroy_syncobj(ws->dev, handle);
 
1845
}
 
1846
 
 
1847
static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
 
1848
                                    uint32_t handle)
 
1849
{
 
1850
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1851
        amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
 
1852
}
 
1853
 
 
1854
static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
 
1855
                                    uint32_t handle, uint64_t point)
 
1856
{
 
1857
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1858
        if (point)
 
1859
                amdgpu_cs_syncobj_timeline_signal(ws->dev, &handle, &point, 1);
 
1860
        else
 
1861
                amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
 
1862
}
 
1863
 
 
1864
static VkResult radv_amdgpu_query_syncobj(struct radeon_winsys *_ws,
 
1865
                                      uint32_t handle, uint64_t *point)
 
1866
{
 
1867
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1868
        int ret = amdgpu_cs_syncobj_query(ws->dev, &handle, point, 1);
 
1869
        if (ret == 0)
 
1870
                return VK_SUCCESS;
 
1871
        else if (ret == -ENOMEM)
 
1872
                return VK_ERROR_OUT_OF_HOST_MEMORY;
 
1873
        else {
 
1874
                /* Remaining error are driver internal issues: EFAULT for
 
1875
                 * dangling pointers and ENOENT for non-existing syncobj. */
 
1876
                fprintf(stderr, "amdgpu: internal error in radv_amdgpu_query_syncobj. (%d)\n", ret);
 
1877
                return VK_ERROR_UNKNOWN;
 
1878
        }
 
1879
}
 
1880
 
 
1881
static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
 
1882
                                     uint32_t handle_count, bool wait_all, uint64_t timeout)
 
1883
{
 
1884
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1885
        uint32_t tmp;
 
1886
 
 
1887
        /* The timeouts are signed, while vulkan timeouts are unsigned. */
 
1888
        timeout = MIN2(timeout, INT64_MAX);
 
1889
 
 
1890
        int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
 
1891
                                         DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
 
1892
                                         (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
 
1893
                                         &tmp);
 
1894
        if (ret == 0) {
 
1895
                return true;
 
1896
        } else if (ret == -ETIME) {
 
1897
                return false;
 
1898
        } else {
 
1899
                fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
 
1900
                return false;
 
1901
        }
 
1902
}
 
1903
 
 
1904
static bool radv_amdgpu_wait_timeline_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
 
1905
                                              const uint64_t *points, uint32_t handle_count,
 
1906
                                              bool wait_all, bool available, uint64_t timeout)
 
1907
{
 
1908
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1909
 
 
1910
        /* The timeouts are signed, while vulkan timeouts are unsigned. */
 
1911
        timeout = MIN2(timeout, INT64_MAX);
 
1912
 
 
1913
        int ret = amdgpu_cs_syncobj_timeline_wait(ws->dev, (uint32_t*)handles, (uint64_t*)points,
 
1914
                                                  handle_count, timeout,
 
1915
                                                  DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
 
1916
                                                  (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0) |
 
1917
                                                  (available ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE : 0),
 
1918
                                                  NULL);
 
1919
        if (ret == 0) {
 
1920
                return true;
 
1921
        } else if (ret == -ETIME) {
 
1922
                return false;
 
1923
        } else {
 
1924
                fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed! (%d)\n", errno);
 
1925
                return false;
 
1926
        }
 
1927
}
 
1928
 
 
1929
 
 
1930
static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
 
1931
                                      uint32_t syncobj,
 
1932
                                      int *fd)
 
1933
{
 
1934
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1935
 
 
1936
        return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
 
1937
}
 
1938
 
 
1939
static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
 
1940
                                      int fd,
 
1941
                                      uint32_t *syncobj)
 
1942
{
 
1943
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1944
 
 
1945
        return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
 
1946
}
 
1947
 
 
1948
 
 
1949
static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
 
1950
                                                   uint32_t syncobj,
 
1951
                                                   int *fd)
 
1952
{
 
1953
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1954
 
 
1955
        return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
 
1956
}
 
1957
 
 
1958
static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
 
1959
                                                     uint32_t syncobj,
 
1960
                                                     int fd)
 
1961
{
 
1962
        struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
 
1963
 
 
1964
        return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
 
1965
}
 
1966
 
 
1967
void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
 
1968
{
 
1969
        ws->base.ctx_create = radv_amdgpu_ctx_create;
 
1970
        ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
 
1971
        ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
 
1972
        ws->base.cs_create = radv_amdgpu_cs_create;
 
1973
        ws->base.cs_destroy = radv_amdgpu_cs_destroy;
 
1974
        ws->base.cs_grow = radv_amdgpu_cs_grow;
 
1975
        ws->base.cs_finalize = radv_amdgpu_cs_finalize;
 
1976
        ws->base.cs_reset = radv_amdgpu_cs_reset;
 
1977
        ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
 
1978
        ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
 
1979
        ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
 
1980
        ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
 
1981
        ws->base.create_fence = radv_amdgpu_create_fence;
 
1982
        ws->base.destroy_fence = radv_amdgpu_destroy_fence;
 
1983
        ws->base.reset_fence = radv_amdgpu_reset_fence;
 
1984
        ws->base.signal_fence = radv_amdgpu_signal_fence;
 
1985
        ws->base.is_fence_waitable = radv_amdgpu_is_fence_waitable;
 
1986
        ws->base.create_sem = radv_amdgpu_create_sem;
 
1987
        ws->base.destroy_sem = radv_amdgpu_destroy_sem;
 
1988
        ws->base.create_syncobj = radv_amdgpu_create_syncobj;
 
1989
        ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
 
1990
        ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
 
1991
        ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
 
1992
        ws->base.query_syncobj = radv_amdgpu_query_syncobj;
 
1993
        ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
 
1994
        ws->base.wait_timeline_syncobj = radv_amdgpu_wait_timeline_syncobj;
 
1995
        ws->base.export_syncobj = radv_amdgpu_export_syncobj;
 
1996
        ws->base.import_syncobj = radv_amdgpu_import_syncobj;
 
1997
        ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
 
1998
        ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
 
1999
        ws->base.fence_wait = radv_amdgpu_fence_wait;
 
2000
        ws->base.fences_wait = radv_amdgpu_fences_wait;
 
2001
}