2
* Copyright 2014, 2015 Red Hat.
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
28
#include <sys/ioctl.h>
31
#include "os/os_mman.h"
32
#include "util/os_file.h"
33
#include "util/os_time.h"
34
#include "util/u_memory.h"
35
#include "util/format/u_format.h"
36
#include "util/u_hash_table.h"
37
#include "util/u_inlines.h"
38
#include "util/u_pointer.h"
39
#include "frontend/drm_driver.h"
40
#include "virgl/virgl_screen.h"
41
#include "virgl/virgl_public.h"
42
#include "virtio-gpu/virgl_protocol.h"
46
#include "drm-uapi/virtgpu_drm.h"
48
#include "virgl_drm_winsys.h"
49
#include "virgl_drm_public.h"
51
// Delete local definitions when virglrenderer_hw.h becomes public
52
#define VIRGL_DRM_CAPSET_VIRGL 1
53
#define VIRGL_DRM_CAPSET_VIRGL2 2
55
#define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
56
#define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
58
/* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
59
#define cache_entry_container_res(ptr) \
60
(struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
62
static inline boolean can_cache_resource(uint32_t bind)
64
return bind == VIRGL_BIND_CONSTANT_BUFFER ||
65
bind == VIRGL_BIND_INDEX_BUFFER ||
66
bind == VIRGL_BIND_VERTEX_BUFFER ||
67
bind == VIRGL_BIND_CUSTOM ||
68
bind == VIRGL_BIND_STAGING ||
69
bind == VIRGL_BIND_DEPTH_STENCIL ||
70
bind == VIRGL_BIND_RENDER_TARGET ||
74
static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
75
struct virgl_hw_res *res)
77
struct drm_gem_close args;
79
mtx_lock(&qdws->bo_handles_mutex);
81
/* We intentionally avoid taking the lock in
82
* virgl_drm_resource_reference. Now that the
83
* lock is taken, we need to check the refcount
85
if (pipe_is_referenced(&res->reference)) {
86
mtx_unlock(&qdws->bo_handles_mutex);
90
_mesa_hash_table_remove_key(qdws->bo_handles,
91
(void *)(uintptr_t)res->bo_handle);
93
_mesa_hash_table_remove_key(qdws->bo_names,
94
(void *)(uintptr_t)res->flink_name);
95
mtx_unlock(&qdws->bo_handles_mutex);
97
os_munmap(res->ptr, res->size);
99
memset(&args, 0, sizeof(args));
100
args.handle = res->bo_handle;
101
drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
105
static boolean virgl_drm_resource_is_busy(struct virgl_winsys *vws,
106
struct virgl_hw_res *res)
108
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
109
struct drm_virtgpu_3d_wait waitcmd;
112
if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
115
memset(&waitcmd, 0, sizeof(waitcmd));
116
waitcmd.handle = res->bo_handle;
117
waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
119
ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
120
if (ret && errno == EBUSY)
123
p_atomic_set(&res->maybe_busy, false);
129
virgl_drm_winsys_destroy(struct virgl_winsys *qws)
131
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
133
virgl_resource_cache_flush(&qdws->cache);
135
_mesa_hash_table_destroy(qdws->bo_handles, NULL);
136
_mesa_hash_table_destroy(qdws->bo_names, NULL);
137
mtx_destroy(&qdws->bo_handles_mutex);
138
mtx_destroy(&qdws->mutex);
143
static void virgl_drm_resource_reference(struct virgl_winsys *qws,
144
struct virgl_hw_res **dres,
145
struct virgl_hw_res *sres)
147
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
148
struct virgl_hw_res *old = *dres;
150
if (pipe_reference(&(*dres)->reference, &sres->reference)) {
152
if (!can_cache_resource(old->bind) ||
153
p_atomic_read(&old->external)) {
154
virgl_hw_res_destroy(qdws, old);
156
mtx_lock(&qdws->mutex);
157
virgl_resource_cache_add(&qdws->cache, &old->cache_entry);
158
mtx_unlock(&qdws->mutex);
164
static struct virgl_hw_res *
165
virgl_drm_winsys_resource_create_blob(struct virgl_winsys *qws,
166
enum pipe_texture_target target,
180
uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
181
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
182
struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
183
struct virgl_hw_res *res;
184
struct virgl_resource_params params = { .size = size,
188
.nr_samples = nr_samples,
192
.array_size = array_size,
193
.last_level = last_level,
196
res = CALLOC_STRUCT(virgl_hw_res);
200
/* Make sure blob is page aligned. */
201
if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
202
VIRGL_RESOURCE_FLAG_MAP_COHERENT)) {
203
width = ALIGN(width, getpagesize());
204
size = ALIGN(size, getpagesize());
207
blob_id = p_atomic_inc_return(&qdws->blob_id);
208
cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
209
cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = format;
210
cmd[VIRGL_PIPE_RES_CREATE_BIND] = bind;
211
cmd[VIRGL_PIPE_RES_CREATE_TARGET] = target;
212
cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = width;
213
cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = height;
214
cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = depth;
215
cmd[VIRGL_PIPE_RES_CREATE_ARRAY_SIZE] = array_size;
216
cmd[VIRGL_PIPE_RES_CREATE_LAST_LEVEL] = last_level;
217
cmd[VIRGL_PIPE_RES_CREATE_NR_SAMPLES] = nr_samples;
218
cmd[VIRGL_PIPE_RES_CREATE_FLAGS] = flags;
219
cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = blob_id;
221
drm_rc_blob.cmd = (unsigned long)(void *)&cmd;
222
drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
223
drm_rc_blob.size = size;
224
drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
225
drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
226
drm_rc_blob.blob_id = (uint64_t) blob_id;
228
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
235
res->res_handle = drm_rc_blob.res_handle;
236
res->bo_handle = drm_rc_blob.bo_handle;
239
res->maybe_untyped = false;
240
pipe_reference_init(&res->reference, 1);
241
p_atomic_set(&res->external, false);
242
p_atomic_set(&res->num_cs_references, 0);
243
virgl_resource_cache_entry_init(&res->cache_entry, params);
247
static struct virgl_hw_res *
248
virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
249
enum pipe_texture_target target,
261
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
262
struct drm_virtgpu_resource_create createcmd;
264
struct virgl_hw_res *res;
265
uint32_t stride = width * util_format_get_blocksize(format);
266
struct virgl_resource_params params = { .size = size,
270
.nr_samples = nr_samples,
274
.array_size = array_size,
275
.last_level = last_level,
278
res = CALLOC_STRUCT(virgl_hw_res);
282
memset(&createcmd, 0, sizeof(createcmd));
283
createcmd.target = target;
284
createcmd.format = pipe_to_virgl_format(format);
285
createcmd.bind = bind;
286
createcmd.width = width;
287
createcmd.height = height;
288
createcmd.depth = depth;
289
createcmd.array_size = array_size;
290
createcmd.last_level = last_level;
291
createcmd.nr_samples = nr_samples;
292
createcmd.stride = stride;
293
createcmd.size = size;
295
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
303
res->res_handle = createcmd.res_handle;
304
res->bo_handle = createcmd.bo_handle;
306
res->target = target;
307
res->maybe_untyped = false;
308
pipe_reference_init(&res->reference, 1);
309
p_atomic_set(&res->external, false);
310
p_atomic_set(&res->num_cs_references, 0);
312
/* A newly created resource is considered busy by the kernel until the
313
* command is retired. But for our purposes, we can consider it idle
314
* unless it is used for fencing.
316
p_atomic_set(&res->maybe_busy, for_fencing);
318
virgl_resource_cache_entry_init(&res->cache_entry, params);
324
* Previously, with DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, all host resources had
325
* a guest memory shadow resource with size = stride * bpp. Virglrenderer
326
* would guess the stride implicitly when performing transfer operations, if
327
* the stride wasn't specified. Interestingly, vtest would specify the stride.
329
* Guessing the stride breaks down with YUV images, which may be imported into
330
* Mesa as 3R8 images. It also doesn't work if an external allocator
331
* (i.e, minigbm) decides to use a stride not equal to stride * bpp. With blob
332
* resources, the size = stride * bpp restriction no longer holds, so use
333
* explicit strides passed into Mesa.
335
static inline bool use_explicit_stride(struct virgl_hw_res *res, uint32_t level,
338
return (params[param_resource_blob].value &&
339
res->blob_mem == VIRTGPU_BLOB_MEM_HOST3D_GUEST &&
340
res->target == PIPE_TEXTURE_2D &&
341
level == 0 && depth == 1);
345
virgl_bo_transfer_put(struct virgl_winsys *vws,
346
struct virgl_hw_res *res,
347
const struct pipe_box *box,
348
uint32_t stride, uint32_t layer_stride,
349
uint32_t buf_offset, uint32_t level)
351
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
352
struct drm_virtgpu_3d_transfer_to_host tohostcmd;
354
p_atomic_set(&res->maybe_busy, true);
356
memset(&tohostcmd, 0, sizeof(tohostcmd));
357
tohostcmd.bo_handle = res->bo_handle;
358
tohostcmd.box.x = box->x;
359
tohostcmd.box.y = box->y;
360
tohostcmd.box.z = box->z;
361
tohostcmd.box.w = box->width;
362
tohostcmd.box.h = box->height;
363
tohostcmd.box.d = box->depth;
364
tohostcmd.offset = buf_offset;
365
tohostcmd.level = level;
367
if (use_explicit_stride(res, level, box->depth))
368
tohostcmd.stride = stride;
370
return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
374
virgl_bo_transfer_get(struct virgl_winsys *vws,
375
struct virgl_hw_res *res,
376
const struct pipe_box *box,
377
uint32_t stride, uint32_t layer_stride,
378
uint32_t buf_offset, uint32_t level)
380
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
381
struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
383
p_atomic_set(&res->maybe_busy, true);
385
memset(&fromhostcmd, 0, sizeof(fromhostcmd));
386
fromhostcmd.bo_handle = res->bo_handle;
387
fromhostcmd.level = level;
388
fromhostcmd.offset = buf_offset;
389
fromhostcmd.box.x = box->x;
390
fromhostcmd.box.y = box->y;
391
fromhostcmd.box.z = box->z;
392
fromhostcmd.box.w = box->width;
393
fromhostcmd.box.h = box->height;
394
fromhostcmd.box.d = box->depth;
396
if (use_explicit_stride(res, level, box->depth))
397
fromhostcmd.stride = stride;
399
return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
402
static struct virgl_hw_res *
403
virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
404
enum pipe_texture_target target,
405
const void *map_front_private,
417
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
418
struct virgl_hw_res *res;
419
struct virgl_resource_cache_entry *entry;
420
struct virgl_resource_params params = { .size = size,
424
.nr_samples = nr_samples,
428
.array_size = array_size,
429
.last_level = last_level,
432
if (!can_cache_resource(bind))
435
mtx_lock(&qdws->mutex);
437
entry = virgl_resource_cache_remove_compatible(&qdws->cache, params);
439
res = cache_entry_container_res(entry);
440
mtx_unlock(&qdws->mutex);
441
pipe_reference_init(&res->reference, 1);
445
mtx_unlock(&qdws->mutex);
448
if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
449
VIRGL_RESOURCE_FLAG_MAP_COHERENT))
450
res = virgl_drm_winsys_resource_create_blob(qws, target, format, bind,
451
width, height, depth,
452
array_size, last_level,
453
nr_samples, flags, size);
455
res = virgl_drm_winsys_resource_create(qws, target, format, bind, width,
456
height, depth, array_size,
457
last_level, nr_samples, size,
462
static struct virgl_hw_res *
463
virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
464
struct winsys_handle *whandle,
467
uint32_t *plane_offset,
471
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
472
struct drm_gem_open open_arg = {};
473
struct drm_virtgpu_resource_info info_arg = {};
474
struct virgl_hw_res *res = NULL;
475
uint32_t handle = whandle->handle;
477
if (whandle->plane >= VIRGL_MAX_PLANE_COUNT) {
481
if (whandle->offset != 0 && whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
482
_debug_printf("attempt to import unsupported winsys offset %u\n",
485
} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
486
*plane = whandle->plane;
487
*stride = whandle->stride;
488
*plane_offset = whandle->offset;
489
*modifier = whandle->modifier;
492
mtx_lock(&qdws->bo_handles_mutex);
494
/* We must maintain a list of pairs <handle, bo>, so that we always return
495
* the same BO for one particular handle. If we didn't do that and created
496
* more than one BO for the same handle and then relocated them in a CS,
497
* we would hit a deadlock in the kernel.
499
* The list of pairs is guarded by a mutex, of course. */
500
if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
501
res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
502
} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
504
r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
507
res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
509
/* Unknown handle type */
514
/* qdws->bo_{names,handles} hold weak pointers to virgl_hw_res. Because
515
* virgl_drm_resource_reference does not take qdws->bo_handles_mutex
516
* until it enters virgl_hw_res_destroy, there is a small window that
517
* the refcount can drop to zero. Call p_atomic_inc directly instead of
518
* virgl_drm_resource_reference to avoid hitting assert failures.
520
p_atomic_inc(&res->reference.count);
524
res = CALLOC_STRUCT(virgl_hw_res);
528
if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
529
res->bo_handle = handle;
531
memset(&open_arg, 0, sizeof(open_arg));
532
open_arg.name = whandle->handle;
533
if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
538
res->bo_handle = open_arg.handle;
539
res->flink_name = whandle->handle;
542
memset(&info_arg, 0, sizeof(info_arg));
543
info_arg.bo_handle = res->bo_handle;
545
if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
552
res->res_handle = info_arg.res_handle;
553
res->blob_mem = info_arg.blob_mem;
554
*blob_mem = info_arg.blob_mem;
556
res->size = info_arg.size;
557
res->maybe_untyped = info_arg.blob_mem ? true : false;
558
pipe_reference_init(&res->reference, 1);
559
p_atomic_set(&res->external, true);
560
res->num_cs_references = 0;
563
_mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
564
_mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
567
mtx_unlock(&qdws->bo_handles_mutex);
572
virgl_drm_winsys_resource_set_type(struct virgl_winsys *qws,
573
struct virgl_hw_res *res,
574
uint32_t format, uint32_t bind,
575
uint32_t width, uint32_t height,
576
uint32_t usage, uint64_t modifier,
577
uint32_t plane_count,
578
const uint32_t *plane_strides,
579
const uint32_t *plane_offsets)
581
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
582
uint32_t cmd[VIRGL_PIPE_RES_SET_TYPE_SIZE(VIRGL_MAX_PLANE_COUNT)];
583
struct drm_virtgpu_execbuffer eb;
586
mtx_lock(&qdws->bo_handles_mutex);
588
if (!res->maybe_untyped) {
589
mtx_unlock(&qdws->bo_handles_mutex);
592
res->maybe_untyped = false;
594
assert(plane_count && plane_count <= VIRGL_MAX_PLANE_COUNT);
596
cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE, 0, VIRGL_PIPE_RES_SET_TYPE_SIZE(plane_count));
597
cmd[VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE] = res->res_handle,
598
cmd[VIRGL_PIPE_RES_SET_TYPE_FORMAT] = format;
599
cmd[VIRGL_PIPE_RES_SET_TYPE_BIND] = bind;
600
cmd[VIRGL_PIPE_RES_SET_TYPE_WIDTH] = width;
601
cmd[VIRGL_PIPE_RES_SET_TYPE_HEIGHT] = height;
602
cmd[VIRGL_PIPE_RES_SET_TYPE_USAGE] = usage;
603
cmd[VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO] = (uint32_t)modifier;
604
cmd[VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI] = (uint32_t)(modifier >> 32);
605
for (uint32_t i = 0; i < plane_count; i++) {
606
cmd[VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(i)] = plane_strides[i];
607
cmd[VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(i)] = plane_offsets[i];
610
memset(&eb, 0, sizeof(eb));
611
eb.command = (uintptr_t)cmd;
612
eb.size = (1 + VIRGL_PIPE_RES_SET_TYPE_SIZE(plane_count)) * 4;
613
eb.num_bo_handles = 1;
614
eb.bo_handles = (uintptr_t)&res->bo_handle;
616
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
618
_debug_printf("failed to set resource type: %s", strerror(errno));
620
mtx_unlock(&qdws->bo_handles_mutex);
623
static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
624
struct virgl_hw_res *res,
626
struct winsys_handle *whandle)
628
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
629
struct drm_gem_flink flink;
634
if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
635
if (!res->flink_name) {
636
memset(&flink, 0, sizeof(flink));
637
flink.handle = res->bo_handle;
639
if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
642
res->flink_name = flink.name;
644
mtx_lock(&qdws->bo_handles_mutex);
645
_mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
646
mtx_unlock(&qdws->bo_handles_mutex);
648
whandle->handle = res->flink_name;
649
} else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
650
whandle->handle = res->bo_handle;
651
} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
652
if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
654
mtx_lock(&qdws->bo_handles_mutex);
655
_mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
656
mtx_unlock(&qdws->bo_handles_mutex);
659
p_atomic_set(&res->external, true);
661
whandle->stride = stride;
665
static void *virgl_drm_resource_map(struct virgl_winsys *qws,
666
struct virgl_hw_res *res)
668
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
669
struct drm_virtgpu_map mmap_arg;
675
memset(&mmap_arg, 0, sizeof(mmap_arg));
676
mmap_arg.handle = res->bo_handle;
677
if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
680
ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
681
qdws->fd, mmap_arg.offset);
682
if (ptr == MAP_FAILED)
690
static void virgl_drm_resource_wait(struct virgl_winsys *qws,
691
struct virgl_hw_res *res)
693
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
694
struct drm_virtgpu_3d_wait waitcmd;
697
if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
700
memset(&waitcmd, 0, sizeof(waitcmd));
701
waitcmd.handle = res->bo_handle;
703
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
705
_debug_printf("waiting got error - %d, slow gpu or hang?\n", errno);
707
p_atomic_set(&res->maybe_busy, false);
710
static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf *cbuf,
713
cbuf->nres = initial_size;
716
cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
720
cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
721
if (!cbuf->res_hlist) {
729
static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf *cbuf)
733
for (i = 0; i < cbuf->cres; i++) {
734
p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
735
virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
737
FREE(cbuf->res_hlist);
741
static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
742
struct virgl_hw_res *res)
744
unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
747
if (cbuf->is_handle_added[hash]) {
748
i = cbuf->reloc_indices_hashlist[hash];
749
if (cbuf->res_bo[i] == res)
752
for (i = 0; i < cbuf->cres; i++) {
753
if (cbuf->res_bo[i] == res) {
754
cbuf->reloc_indices_hashlist[hash] = i;
762
static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
763
struct virgl_drm_cmd_buf *cbuf,
764
struct virgl_hw_res *res)
766
unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
768
if (cbuf->cres >= cbuf->nres) {
769
unsigned new_nres = cbuf->nres + 256;
770
void *new_ptr = REALLOC(cbuf->res_bo,
771
cbuf->nres * sizeof(struct virgl_hw_buf*),
772
new_nres * sizeof(struct virgl_hw_buf*));
774
_debug_printf("failure to add relocation %d, %d\n", cbuf->cres, new_nres);
777
cbuf->res_bo = new_ptr;
779
new_ptr = REALLOC(cbuf->res_hlist,
780
cbuf->nres * sizeof(uint32_t),
781
new_nres * sizeof(uint32_t));
783
_debug_printf("failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
786
cbuf->res_hlist = new_ptr;
787
cbuf->nres = new_nres;
790
cbuf->res_bo[cbuf->cres] = NULL;
791
virgl_drm_resource_reference(&qdws->base, &cbuf->res_bo[cbuf->cres], res);
792
cbuf->res_hlist[cbuf->cres] = res->bo_handle;
793
cbuf->is_handle_added[hash] = TRUE;
795
cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
796
p_atomic_inc(&res->num_cs_references);
800
/* This is called after the cbuf is submitted. */
801
static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf *cbuf)
805
for (i = 0; i < cbuf->cres; i++) {
806
/* mark all BOs busy after submission */
807
p_atomic_set(&cbuf->res_bo[i]->maybe_busy, true);
809
p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
810
virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
815
memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
818
static void virgl_drm_emit_res(struct virgl_winsys *qws,
819
struct virgl_cmd_buf *_cbuf,
820
struct virgl_hw_res *res, boolean write_buf)
822
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
823
struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
824
boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
827
cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
829
if (!already_in_list)
830
virgl_drm_add_res(qdws, cbuf, res);
833
static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
834
struct virgl_cmd_buf *_cbuf,
835
struct virgl_hw_res *res)
837
if (!p_atomic_read(&res->num_cs_references))
843
static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
846
struct virgl_drm_cmd_buf *cbuf;
848
cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
854
if (!virgl_drm_alloc_res_list(cbuf, 512)) {
859
cbuf->buf = CALLOC(size, sizeof(uint32_t));
861
FREE(cbuf->res_hlist);
867
cbuf->in_fence_fd = -1;
868
cbuf->base.buf = cbuf->buf;
872
static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
874
struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
876
virgl_drm_free_res_list(cbuf);
882
static struct pipe_fence_handle *
883
virgl_drm_fence_create(struct virgl_winsys *vws, int fd, bool external)
885
struct virgl_drm_fence *fence;
887
assert(vws->supports_fences);
890
fd = os_dupfd_cloexec(fd);
895
fence = CALLOC_STRUCT(virgl_drm_fence);
902
fence->external = external;
904
pipe_reference_init(&fence->reference, 1);
906
return (struct pipe_fence_handle *)fence;
909
static struct pipe_fence_handle *
910
virgl_drm_fence_create_legacy(struct virgl_winsys *vws)
912
struct virgl_drm_fence *fence;
914
assert(!vws->supports_fences);
916
fence = CALLOC_STRUCT(virgl_drm_fence);
921
/* Resources for fences should not be from the cache, since we are basing
922
* the fence status on the resource creation busy status.
924
fence->hw_res = virgl_drm_winsys_resource_create(vws, PIPE_BUFFER,
925
PIPE_FORMAT_R8_UNORM, VIRGL_BIND_CUSTOM, 8, 1, 1, 0, 0, 0, 8, true);
926
if (!fence->hw_res) {
931
pipe_reference_init(&fence->reference, 1);
933
return (struct pipe_fence_handle *)fence;
936
static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
937
struct virgl_cmd_buf *_cbuf,
938
struct pipe_fence_handle **fence)
940
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
941
struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
942
struct drm_virtgpu_execbuffer eb;
945
if (cbuf->base.cdw == 0)
948
memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
949
eb.command = (unsigned long)(void*)cbuf->buf;
950
eb.size = cbuf->base.cdw * 4;
951
eb.num_bo_handles = cbuf->cres;
952
eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
955
if (qws->supports_fences) {
956
if (cbuf->in_fence_fd >= 0) {
957
eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
958
eb.fence_fd = cbuf->in_fence_fd;
962
eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
964
assert(cbuf->in_fence_fd < 0);
967
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
969
_debug_printf("got error from kernel - expect bad rendering %d\n", errno);
972
if (qws->supports_fences) {
973
if (cbuf->in_fence_fd >= 0) {
974
close(cbuf->in_fence_fd);
975
cbuf->in_fence_fd = -1;
978
if (fence != NULL && ret == 0)
979
*fence = virgl_drm_fence_create(qws, eb.fence_fd, false);
981
if (fence != NULL && ret == 0)
982
*fence = virgl_drm_fence_create_legacy(qws);
985
virgl_drm_clear_res_list(cbuf);
990
static int virgl_drm_get_caps(struct virgl_winsys *vws,
991
struct virgl_drm_caps *caps)
993
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
994
struct drm_virtgpu_get_caps args;
997
virgl_ws_fill_new_caps_defaults(caps);
999
memset(&args, 0, sizeof(args));
1000
if (params[param_capset_fix].value) {
1001
/* if we have the query fix - try and get cap set id 2 first */
1002
args.cap_set_id = 2;
1003
args.size = sizeof(union virgl_caps);
1005
args.cap_set_id = 1;
1006
args.size = sizeof(struct virgl_caps_v1);
1008
args.addr = (unsigned long)&caps->caps;
1010
ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
1011
if (ret == -1 && errno == EINVAL) {
1012
/* Fallback to v1 */
1013
args.cap_set_id = 1;
1014
args.size = sizeof(struct virgl_caps_v1);
1015
ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
1022
static struct pipe_fence_handle *
1023
virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
1025
if (!vws->supports_fences)
1028
return virgl_drm_fence_create(vws, fd, true);
1031
static bool virgl_fence_wait(struct virgl_winsys *vws,
1032
struct pipe_fence_handle *_fence,
1035
struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1037
if (vws->supports_fences) {
1038
uint64_t timeout_ms;
1042
return sync_wait(fence->fd, 0) == 0;
1044
timeout_ms = timeout / 1000000;
1046
if (timeout_ms * 1000000 < timeout)
1049
timeout_poll = timeout_ms <= INT_MAX ? (int) timeout_ms : -1;
1051
return sync_wait(fence->fd, timeout_poll) == 0;
1055
return !virgl_drm_resource_is_busy(vws, fence->hw_res);
1057
if (timeout != PIPE_TIMEOUT_INFINITE) {
1058
int64_t start_time = os_time_get();
1060
while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
1061
if (os_time_get() - start_time >= timeout)
1067
virgl_drm_resource_wait(vws, fence->hw_res);
1072
static void virgl_fence_reference(struct virgl_winsys *vws,
1073
struct pipe_fence_handle **dst,
1074
struct pipe_fence_handle *src)
1076
struct virgl_drm_fence *dfence = virgl_drm_fence(*dst);
1077
struct virgl_drm_fence *sfence = virgl_drm_fence(src);
1079
if (pipe_reference(&dfence->reference, &sfence->reference)) {
1080
if (vws->supports_fences) {
1083
virgl_drm_resource_reference(vws, &dfence->hw_res, NULL);
1091
static void virgl_fence_server_sync(struct virgl_winsys *vws,
1092
struct virgl_cmd_buf *_cbuf,
1093
struct pipe_fence_handle *_fence)
1095
struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
1096
struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1098
if (!vws->supports_fences)
1101
/* if not an external fence, then nothing more to do without preemption: */
1102
if (!fence->external)
1105
sync_accumulate("virgl", &cbuf->in_fence_fd, fence->fd);
1108
static int virgl_fence_get_fd(struct virgl_winsys *vws,
1109
struct pipe_fence_handle *_fence)
1111
struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1113
if (!vws->supports_fences)
1116
return os_dupfd_cloexec(fence->fd);
1119
static int virgl_drm_get_version(int fd)
1122
drmVersionPtr version;
1124
version = drmGetVersion(fd);
1128
else if (version->version_major != 0)
1131
ret = VIRGL_DRM_VERSION(0, version->version_minor);
1133
drmFreeVersion(version);
1139
virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry *entry,
1142
struct virgl_drm_winsys *qdws = user_data;
1143
struct virgl_hw_res *res = cache_entry_container_res(entry);
1145
return virgl_drm_resource_is_busy(&qdws->base, res);
1149
virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry *entry,
1152
struct virgl_drm_winsys *qdws = user_data;
1153
struct virgl_hw_res *res = cache_entry_container_res(entry);
1155
virgl_hw_res_destroy(qdws, res);
1158
static int virgl_init_context(int drmFD)
1161
struct drm_virtgpu_context_init init = { 0 };
1162
struct drm_virtgpu_context_set_param ctx_set_param = { 0 };
1163
uint64_t supports_capset_virgl, supports_capset_virgl2;
1164
supports_capset_virgl = supports_capset_virgl2 = 0;
1166
supports_capset_virgl = ((1 << VIRGL_DRM_CAPSET_VIRGL) &
1167
params[param_supported_capset_ids].value);
1169
supports_capset_virgl2 = ((1 << VIRGL_DRM_CAPSET_VIRGL2) &
1170
params[param_supported_capset_ids].value);
1172
if (!supports_capset_virgl && !supports_capset_virgl2) {
1173
_debug_printf("No virgl contexts available on host");
1177
ctx_set_param.param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
1178
ctx_set_param.value = (supports_capset_virgl2) ?
1179
VIRGL_DRM_CAPSET_VIRGL2 :
1180
VIRGL_DRM_CAPSET_VIRGL;
1182
init.ctx_set_params = (unsigned long)(void *)&ctx_set_param;
1183
init.num_params = 1;
1185
ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
1187
* EEXIST happens when a compositor does DUMB_CREATE before initializing
1190
if (ret && errno != EEXIST) {
1191
_debug_printf("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n",
1199
static struct virgl_winsys *
1200
virgl_drm_winsys_create(int drmFD)
1202
static const unsigned CACHE_TIMEOUT_USEC = 1000000;
1203
struct virgl_drm_winsys *qdws;
1207
for (uint32_t i = 0; i < ARRAY_SIZE(params); i++) {
1208
struct drm_virtgpu_getparam getparam = { 0 };
1210
getparam.param = params[i].param;
1211
getparam.value = (uint64_t)(uintptr_t)&value;
1212
ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
1213
params[i].value = (ret == 0) ? value : 0;
1216
if (!params[param_3d_features].value)
1219
drm_version = virgl_drm_get_version(drmFD);
1220
if (drm_version < 0)
1223
if (params[param_context_init].value) {
1224
ret = virgl_init_context(drmFD);
1229
qdws = CALLOC_STRUCT(virgl_drm_winsys);
1234
virgl_resource_cache_init(&qdws->cache, CACHE_TIMEOUT_USEC,
1235
virgl_drm_resource_cache_entry_is_busy,
1236
virgl_drm_resource_cache_entry_release,
1238
(void) mtx_init(&qdws->mutex, mtx_plain);
1239
(void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
1240
p_atomic_set(&qdws->blob_id, 0);
1242
qdws->bo_handles = util_hash_table_create_ptr_keys();
1243
qdws->bo_names = util_hash_table_create_ptr_keys();
1244
qdws->base.destroy = virgl_drm_winsys_destroy;
1246
qdws->base.transfer_put = virgl_bo_transfer_put;
1247
qdws->base.transfer_get = virgl_bo_transfer_get;
1248
qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
1249
qdws->base.resource_reference = virgl_drm_resource_reference;
1250
qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
1251
qdws->base.resource_set_type = virgl_drm_winsys_resource_set_type;
1252
qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
1253
qdws->base.resource_map = virgl_drm_resource_map;
1254
qdws->base.resource_wait = virgl_drm_resource_wait;
1255
qdws->base.resource_is_busy = virgl_drm_resource_is_busy;
1256
qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
1257
qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
1258
qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
1259
qdws->base.emit_res = virgl_drm_emit_res;
1260
qdws->base.res_is_referenced = virgl_drm_res_is_ref;
1262
qdws->base.cs_create_fence = virgl_cs_create_fence;
1263
qdws->base.fence_wait = virgl_fence_wait;
1264
qdws->base.fence_reference = virgl_fence_reference;
1265
qdws->base.fence_server_sync = virgl_fence_server_sync;
1266
qdws->base.fence_get_fd = virgl_fence_get_fd;
1267
qdws->base.get_caps = virgl_drm_get_caps;
1268
qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
1269
qdws->base.supports_encoded_transfers = 1;
1271
qdws->base.supports_coherent = params[param_resource_blob].value &&
1272
params[param_host_visible].value;
1277
static struct hash_table *fd_tab = NULL;
1278
static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
1281
virgl_drm_screen_destroy(struct pipe_screen *pscreen)
1283
struct virgl_screen *screen = virgl_screen(pscreen);
1286
mtx_lock(&virgl_screen_mutex);
1287
destroy = --screen->refcnt == 0;
1289
int fd = virgl_drm_winsys(screen->vws)->fd;
1290
_mesa_hash_table_remove_key(fd_tab, intptr_to_pointer(fd));
1293
mtx_unlock(&virgl_screen_mutex);
1296
pscreen->destroy = screen->winsys_priv;
1297
pscreen->destroy(pscreen);
1302
hash_fd(const void *key)
1304
int fd = pointer_to_intptr(key);
1306
return _mesa_hash_int(&fd);
1310
equal_fd(const void *key1, const void *key2)
1313
int fd1 = pointer_to_intptr(key1);
1314
int fd2 = pointer_to_intptr(key2);
1316
/* Since the scope of prime handle is limited to drm_file,
1317
* virgl_screen is only shared at the drm_file level,
1318
* not at the device (/dev/dri/cardX) level.
1320
ret = os_same_file_description(fd1, fd2);
1323
} else if (ret < 0) {
1327
_debug_printf("virgl: os_same_file_description couldn't "
1328
"determine if two DRM fds reference the same "
1329
"file description.\n"
1330
"If they do, bad things may happen!\n");
1338
struct pipe_screen *
1339
virgl_drm_screen_create(int fd, const struct pipe_screen_config *config)
1341
struct pipe_screen *pscreen = NULL;
1343
mtx_lock(&virgl_screen_mutex);
1345
fd_tab = _mesa_hash_table_create(NULL, hash_fd, equal_fd);
1350
pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1352
virgl_screen(pscreen)->refcnt++;
1354
struct virgl_winsys *vws;
1355
int dup_fd = os_dupfd_cloexec(fd);
1357
vws = virgl_drm_winsys_create(dup_fd);
1363
pscreen = virgl_create_screen(vws, config);
1365
_mesa_hash_table_insert(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1367
/* Bit of a hack, to avoid circular linkage dependency,
1368
* ie. pipe driver having to call in to winsys, we
1369
* override the pipe drivers screen->destroy():
1371
virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1372
pscreen->destroy = virgl_drm_screen_destroy;
1377
mtx_unlock(&virgl_screen_mutex);