2
* Copyright 2019 Google LLC
3
* SPDX-License-Identifier: MIT
5
* based in part on virgl which is:
6
* Copyright 2014, 2015 Red Hat.
10
#include <netinet/in.h>
13
#include <sys/socket.h>
14
#include <sys/types.h>
18
#include "util/os_file.h"
19
#include "util/sparse_array.h"
20
#include "util/u_process.h"
21
#define VIRGL_RENDERER_UNSTABLE_APIS
22
#include "virtio-gpu/virglrenderer_hw.h"
23
#include "vtest/vtest_protocol.h"
25
#include "vn_renderer_internal.h"
27
#define VTEST_PCI_VENDOR_ID 0x1af4
28
#define VTEST_PCI_DEVICE_ID 0x1050
33
struct vn_renderer_shmem base;
37
struct vn_renderer_bo base;
40
/* might be closed after mmap */
45
struct vn_renderer_sync base;
49
struct vn_renderer base;
51
struct vn_instance *instance;
56
uint32_t protocol_version;
57
uint32_t max_sync_queue_count;
60
enum virgl_renderer_capset id;
62
struct virgl_renderer_capset_venus data;
65
uint32_t shmem_blob_mem;
67
struct util_sparse_array shmem_array;
68
struct util_sparse_array bo_array;
70
struct vn_renderer_shmem_cache shmem_cache;
74
vtest_connect_socket(struct vn_instance *instance, const char *path)
76
struct sockaddr_un un;
79
sock = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
81
vn_log(instance, "failed to create a socket");
85
memset(&un, 0, sizeof(un));
86
un.sun_family = AF_UNIX;
87
memcpy(un.sun_path, path, strlen(path));
89
if (connect(sock, (struct sockaddr *)&un, sizeof(un)) == -1) {
90
vn_log(instance, "failed to connect to %s: %s", path, strerror(errno));
99
vtest_read(struct vtest *vtest, void *buf, size_t size)
102
const ssize_t ret = read(vtest->sock_fd, buf, size);
103
if (unlikely(ret < 0)) {
104
vn_log(vtest->instance,
105
"lost connection to rendering server on %zu read %zi %d",
116
vtest_receive_fd(struct vtest *vtest)
118
char cmsg_buf[CMSG_SPACE(sizeof(int))];
120
struct msghdr msg = {
124
.iov_len = sizeof(dummy),
127
.msg_control = cmsg_buf,
128
.msg_controllen = sizeof(cmsg_buf),
131
if (recvmsg(vtest->sock_fd, &msg, 0) < 0) {
132
vn_log(vtest->instance, "recvmsg failed: %s", strerror(errno));
136
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
137
if (!cmsg || cmsg->cmsg_level != SOL_SOCKET ||
138
cmsg->cmsg_type != SCM_RIGHTS) {
139
vn_log(vtest->instance, "invalid cmsghdr");
143
return *((int *)CMSG_DATA(cmsg));
147
vtest_write(struct vtest *vtest, const void *buf, size_t size)
150
const ssize_t ret = write(vtest->sock_fd, buf, size);
151
if (unlikely(ret < 0)) {
152
vn_log(vtest->instance,
153
"lost connection to rendering server on %zu write %zi %d",
164
vtest_vcmd_create_renderer(struct vtest *vtest, const char *name)
166
const size_t size = strlen(name) + 1;
168
uint32_t vtest_hdr[VTEST_HDR_SIZE];
169
vtest_hdr[VTEST_CMD_LEN] = size;
170
vtest_hdr[VTEST_CMD_ID] = VCMD_CREATE_RENDERER;
172
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
173
vtest_write(vtest, name, size);
177
vtest_vcmd_ping_protocol_version(struct vtest *vtest)
179
uint32_t vtest_hdr[VTEST_HDR_SIZE];
180
vtest_hdr[VTEST_CMD_LEN] = VCMD_PING_PROTOCOL_VERSION_SIZE;
181
vtest_hdr[VTEST_CMD_ID] = VCMD_PING_PROTOCOL_VERSION;
183
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
185
/* send a dummy busy wait to avoid blocking in vtest_read in case ping
186
* protocol version is not supported
188
uint32_t vcmd_busy_wait[VCMD_BUSY_WAIT_SIZE];
189
vtest_hdr[VTEST_CMD_LEN] = VCMD_BUSY_WAIT_SIZE;
190
vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_BUSY_WAIT;
191
vcmd_busy_wait[VCMD_BUSY_WAIT_HANDLE] = 0;
192
vcmd_busy_wait[VCMD_BUSY_WAIT_FLAGS] = 0;
194
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
195
vtest_write(vtest, vcmd_busy_wait, sizeof(vcmd_busy_wait));
198
vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
199
if (vtest_hdr[VTEST_CMD_ID] == VCMD_PING_PROTOCOL_VERSION) {
200
/* consume the dummy busy wait result */
201
vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
202
assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_BUSY_WAIT);
203
vtest_read(vtest, &dummy, sizeof(dummy));
206
/* no ping protocol version support */
207
assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_BUSY_WAIT);
208
vtest_read(vtest, &dummy, sizeof(dummy));
214
vtest_vcmd_protocol_version(struct vtest *vtest)
216
uint32_t vtest_hdr[VTEST_HDR_SIZE];
217
uint32_t vcmd_protocol_version[VCMD_PROTOCOL_VERSION_SIZE];
218
vtest_hdr[VTEST_CMD_LEN] = VCMD_PROTOCOL_VERSION_SIZE;
219
vtest_hdr[VTEST_CMD_ID] = VCMD_PROTOCOL_VERSION;
220
vcmd_protocol_version[VCMD_PROTOCOL_VERSION_VERSION] =
221
VTEST_PROTOCOL_VERSION;
223
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
224
vtest_write(vtest, vcmd_protocol_version, sizeof(vcmd_protocol_version));
226
vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
227
assert(vtest_hdr[VTEST_CMD_LEN] == VCMD_PROTOCOL_VERSION_SIZE);
228
assert(vtest_hdr[VTEST_CMD_ID] == VCMD_PROTOCOL_VERSION);
229
vtest_read(vtest, vcmd_protocol_version, sizeof(vcmd_protocol_version));
231
return vcmd_protocol_version[VCMD_PROTOCOL_VERSION_VERSION];
235
vtest_vcmd_get_param(struct vtest *vtest, enum vcmd_param param)
237
uint32_t vtest_hdr[VTEST_HDR_SIZE];
238
uint32_t vcmd_get_param[VCMD_GET_PARAM_SIZE];
239
vtest_hdr[VTEST_CMD_LEN] = VCMD_GET_PARAM_SIZE;
240
vtest_hdr[VTEST_CMD_ID] = VCMD_GET_PARAM;
241
vcmd_get_param[VCMD_GET_PARAM_PARAM] = param;
243
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
244
vtest_write(vtest, vcmd_get_param, sizeof(vcmd_get_param));
246
vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
247
assert(vtest_hdr[VTEST_CMD_LEN] == 2);
248
assert(vtest_hdr[VTEST_CMD_ID] == VCMD_GET_PARAM);
251
vtest_read(vtest, resp, sizeof(resp));
253
return resp[0] ? resp[1] : 0;
257
vtest_vcmd_get_capset(struct vtest *vtest,
258
enum virgl_renderer_capset id,
263
uint32_t vtest_hdr[VTEST_HDR_SIZE];
264
uint32_t vcmd_get_capset[VCMD_GET_CAPSET_SIZE];
265
vtest_hdr[VTEST_CMD_LEN] = VCMD_GET_CAPSET_SIZE;
266
vtest_hdr[VTEST_CMD_ID] = VCMD_GET_CAPSET;
267
vcmd_get_capset[VCMD_GET_CAPSET_ID] = id;
268
vcmd_get_capset[VCMD_GET_CAPSET_VERSION] = version;
270
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
271
vtest_write(vtest, vcmd_get_capset, sizeof(vcmd_get_capset));
273
vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
274
assert(vtest_hdr[VTEST_CMD_ID] == VCMD_GET_CAPSET);
277
vtest_read(vtest, &valid, sizeof(valid));
281
size_t read_size = (vtest_hdr[VTEST_CMD_LEN] - 1) * 4;
282
if (capset_size >= read_size) {
283
vtest_read(vtest, capset, read_size);
284
memset(capset + read_size, 0, capset_size - read_size);
286
vtest_read(vtest, capset, capset_size);
289
read_size -= capset_size;
291
const size_t temp_size = MIN2(read_size, ARRAY_SIZE(temp));
292
vtest_read(vtest, temp, temp_size);
293
read_size -= temp_size;
301
vtest_vcmd_context_init(struct vtest *vtest,
302
enum virgl_renderer_capset capset_id)
304
uint32_t vtest_hdr[VTEST_HDR_SIZE];
305
uint32_t vcmd_context_init[VCMD_CONTEXT_INIT_SIZE];
306
vtest_hdr[VTEST_CMD_LEN] = VCMD_CONTEXT_INIT_SIZE;
307
vtest_hdr[VTEST_CMD_ID] = VCMD_CONTEXT_INIT;
308
vcmd_context_init[VCMD_CONTEXT_INIT_CAPSET_ID] = capset_id;
310
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
311
vtest_write(vtest, vcmd_context_init, sizeof(vcmd_context_init));
315
vtest_vcmd_resource_create_blob(struct vtest *vtest,
316
enum vcmd_blob_type type,
319
vn_object_id blob_id,
322
uint32_t vtest_hdr[VTEST_HDR_SIZE];
323
uint32_t vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE];
325
vtest_hdr[VTEST_CMD_LEN] = VCMD_RES_CREATE_BLOB_SIZE;
326
vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_CREATE_BLOB;
328
vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_TYPE] = type;
329
vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_FLAGS] = flags;
330
vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE_LO] = (uint32_t)size;
331
vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE_HI] =
332
(uint32_t)(size >> 32);
333
vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_ID_LO] = (uint32_t)blob_id;
334
vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_ID_HI] =
335
(uint32_t)(blob_id >> 32);
337
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
338
vtest_write(vtest, vcmd_res_create_blob, sizeof(vcmd_res_create_blob));
340
vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
341
assert(vtest_hdr[VTEST_CMD_LEN] == 1);
342
assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_CREATE_BLOB);
345
vtest_read(vtest, &res_id, sizeof(res_id));
347
*res_fd = vtest_receive_fd(vtest);
353
vtest_vcmd_resource_unref(struct vtest *vtest, uint32_t res_id)
355
uint32_t vtest_hdr[VTEST_HDR_SIZE];
356
uint32_t vcmd_res_unref[VCMD_RES_UNREF_SIZE];
358
vtest_hdr[VTEST_CMD_LEN] = VCMD_RES_UNREF_SIZE;
359
vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_UNREF;
360
vcmd_res_unref[VCMD_RES_UNREF_RES_HANDLE] = res_id;
362
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
363
vtest_write(vtest, vcmd_res_unref, sizeof(vcmd_res_unref));
367
vtest_vcmd_sync_create(struct vtest *vtest, uint64_t initial_val)
369
uint32_t vtest_hdr[VTEST_HDR_SIZE];
370
uint32_t vcmd_sync_create[VCMD_SYNC_CREATE_SIZE];
372
vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_CREATE_SIZE;
373
vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_CREATE;
375
vcmd_sync_create[VCMD_SYNC_CREATE_VALUE_LO] = (uint32_t)initial_val;
376
vcmd_sync_create[VCMD_SYNC_CREATE_VALUE_HI] =
377
(uint32_t)(initial_val >> 32);
379
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
380
vtest_write(vtest, vcmd_sync_create, sizeof(vcmd_sync_create));
382
vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
383
assert(vtest_hdr[VTEST_CMD_LEN] == 1);
384
assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_CREATE);
387
vtest_read(vtest, &sync_id, sizeof(sync_id));
393
vtest_vcmd_sync_unref(struct vtest *vtest, uint32_t sync_id)
395
uint32_t vtest_hdr[VTEST_HDR_SIZE];
396
uint32_t vcmd_sync_unref[VCMD_SYNC_UNREF_SIZE];
398
vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_UNREF_SIZE;
399
vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_UNREF;
400
vcmd_sync_unref[VCMD_SYNC_UNREF_ID] = sync_id;
402
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
403
vtest_write(vtest, vcmd_sync_unref, sizeof(vcmd_sync_unref));
407
vtest_vcmd_sync_read(struct vtest *vtest, uint32_t sync_id)
409
uint32_t vtest_hdr[VTEST_HDR_SIZE];
410
uint32_t vcmd_sync_read[VCMD_SYNC_READ_SIZE];
412
vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_READ_SIZE;
413
vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_READ;
415
vcmd_sync_read[VCMD_SYNC_READ_ID] = sync_id;
417
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
418
vtest_write(vtest, vcmd_sync_read, sizeof(vcmd_sync_read));
420
vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
421
assert(vtest_hdr[VTEST_CMD_LEN] == 2);
422
assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_READ);
425
vtest_read(vtest, &val, sizeof(val));
431
vtest_vcmd_sync_write(struct vtest *vtest, uint32_t sync_id, uint64_t val)
433
uint32_t vtest_hdr[VTEST_HDR_SIZE];
434
uint32_t vcmd_sync_write[VCMD_SYNC_WRITE_SIZE];
436
vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_WRITE_SIZE;
437
vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_WRITE;
439
vcmd_sync_write[VCMD_SYNC_WRITE_ID] = sync_id;
440
vcmd_sync_write[VCMD_SYNC_WRITE_VALUE_LO] = (uint32_t)val;
441
vcmd_sync_write[VCMD_SYNC_WRITE_VALUE_HI] = (uint32_t)(val >> 32);
443
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
444
vtest_write(vtest, vcmd_sync_write, sizeof(vcmd_sync_write));
448
vtest_vcmd_sync_wait(struct vtest *vtest,
451
struct vn_renderer_sync *const *syncs,
452
const uint64_t *vals,
455
const uint32_t timeout = poll_timeout >= 0 && poll_timeout <= INT32_MAX
459
uint32_t vtest_hdr[VTEST_HDR_SIZE];
460
vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_WAIT_SIZE(count);
461
vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_WAIT;
463
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
464
vtest_write(vtest, &flags, sizeof(flags));
465
vtest_write(vtest, &timeout, sizeof(timeout));
466
for (uint32_t i = 0; i < count; i++) {
467
const uint64_t val = vals[i];
468
const uint32_t sync[3] = {
471
(uint32_t)(val >> 32),
473
vtest_write(vtest, sync, sizeof(sync));
476
vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
477
assert(vtest_hdr[VTEST_CMD_LEN] == 0);
478
assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_WAIT);
480
return vtest_receive_fd(vtest);
484
submit_cmd2_sizes(const struct vn_renderer_submit *submit,
489
if (!submit->batch_count) {
496
*header_size = sizeof(uint32_t) +
497
sizeof(struct vcmd_submit_cmd2_batch) * submit->batch_count;
501
for (uint32_t i = 0; i < submit->batch_count; i++) {
502
const struct vn_renderer_submit_batch *batch = &submit->batches[i];
503
assert(batch->cs_size % sizeof(uint32_t) == 0);
504
*cs_size += batch->cs_size;
505
*sync_size += (sizeof(uint32_t) + sizeof(uint64_t)) * batch->sync_count;
508
assert(*header_size % sizeof(uint32_t) == 0);
509
assert(*cs_size % sizeof(uint32_t) == 0);
510
assert(*sync_size % sizeof(uint32_t) == 0);
514
vtest_vcmd_submit_cmd2(struct vtest *vtest,
515
const struct vn_renderer_submit *submit)
520
submit_cmd2_sizes(submit, &header_size, &cs_size, &sync_size);
521
const size_t total_size = header_size + cs_size + sync_size;
525
uint32_t vtest_hdr[VTEST_HDR_SIZE];
526
vtest_hdr[VTEST_CMD_LEN] = total_size / sizeof(uint32_t);
527
vtest_hdr[VTEST_CMD_ID] = VCMD_SUBMIT_CMD2;
528
vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
530
/* write batch count and batch headers */
531
const uint32_t batch_count = submit->batch_count;
532
size_t cs_offset = header_size;
533
size_t sync_offset = cs_offset + cs_size;
534
vtest_write(vtest, &batch_count, sizeof(batch_count));
535
for (uint32_t i = 0; i < submit->batch_count; i++) {
536
const struct vn_renderer_submit_batch *batch = &submit->batches[i];
537
struct vcmd_submit_cmd2_batch dst = {
538
.cmd_offset = cs_offset / sizeof(uint32_t),
539
.cmd_size = batch->cs_size / sizeof(uint32_t),
540
.sync_offset = sync_offset / sizeof(uint32_t),
541
.sync_count = batch->sync_count,
543
if (!batch->sync_queue_cpu) {
544
dst.flags = VCMD_SUBMIT_CMD2_FLAG_SYNC_QUEUE;
545
dst.sync_queue_index = batch->sync_queue_index;
546
dst.sync_queue_id = batch->vk_queue_id;
548
vtest_write(vtest, &dst, sizeof(dst));
550
cs_offset += batch->cs_size;
552
(sizeof(uint32_t) + sizeof(uint64_t)) * batch->sync_count;
557
for (uint32_t i = 0; i < submit->batch_count; i++) {
558
const struct vn_renderer_submit_batch *batch = &submit->batches[i];
560
vtest_write(vtest, batch->cs_data, batch->cs_size);
565
for (uint32_t i = 0; i < submit->batch_count; i++) {
566
const struct vn_renderer_submit_batch *batch = &submit->batches[i];
568
for (uint32_t j = 0; j < batch->sync_count; j++) {
569
const uint64_t val = batch->sync_values[j];
570
const uint32_t sync[3] = {
571
batch->syncs[j]->sync_id,
573
(uint32_t)(val >> 32),
575
vtest_write(vtest, sync, sizeof(sync));
581
vtest_sync_write(struct vn_renderer *renderer,
582
struct vn_renderer_sync *_sync,
585
struct vtest *vtest = (struct vtest *)renderer;
586
struct vtest_sync *sync = (struct vtest_sync *)_sync;
588
mtx_lock(&vtest->sock_mutex);
589
vtest_vcmd_sync_write(vtest, sync->base.sync_id, val);
590
mtx_unlock(&vtest->sock_mutex);
596
vtest_sync_read(struct vn_renderer *renderer,
597
struct vn_renderer_sync *_sync,
600
struct vtest *vtest = (struct vtest *)renderer;
601
struct vtest_sync *sync = (struct vtest_sync *)_sync;
603
mtx_lock(&vtest->sock_mutex);
604
*val = vtest_vcmd_sync_read(vtest, sync->base.sync_id);
605
mtx_unlock(&vtest->sock_mutex);
611
vtest_sync_reset(struct vn_renderer *renderer,
612
struct vn_renderer_sync *sync,
613
uint64_t initial_val)
616
return vtest_sync_write(renderer, sync, initial_val);
620
vtest_sync_destroy(struct vn_renderer *renderer,
621
struct vn_renderer_sync *_sync)
623
struct vtest *vtest = (struct vtest *)renderer;
624
struct vtest_sync *sync = (struct vtest_sync *)_sync;
626
mtx_lock(&vtest->sock_mutex);
627
vtest_vcmd_sync_unref(vtest, sync->base.sync_id);
628
mtx_unlock(&vtest->sock_mutex);
634
vtest_sync_create(struct vn_renderer *renderer,
635
uint64_t initial_val,
637
struct vn_renderer_sync **out_sync)
639
struct vtest *vtest = (struct vtest *)renderer;
641
struct vtest_sync *sync = calloc(1, sizeof(*sync));
643
return VK_ERROR_OUT_OF_HOST_MEMORY;
645
mtx_lock(&vtest->sock_mutex);
646
sync->base.sync_id = vtest_vcmd_sync_create(vtest, initial_val);
647
mtx_unlock(&vtest->sock_mutex);
649
*out_sync = &sync->base;
654
vtest_bo_invalidate(struct vn_renderer *renderer,
655
struct vn_renderer_bo *bo,
663
vtest_bo_flush(struct vn_renderer *renderer,
664
struct vn_renderer_bo *bo,
672
vtest_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
674
struct vtest *vtest = (struct vtest *)renderer;
675
struct vtest_bo *bo = (struct vtest_bo *)_bo;
676
const bool mappable = bo->blob_flags & VCMD_BLOB_FLAG_MAPPABLE;
677
const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
679
/* not thread-safe but is fine */
680
if (!bo->base.mmap_ptr && mappable) {
681
/* We wrongly assume that mmap(dma_buf) and vkMapMemory(VkDeviceMemory)
682
* are equivalent when the blob type is VCMD_BLOB_TYPE_HOST3D. While we
683
* check for VCMD_PARAM_HOST_COHERENT_DMABUF_BLOB, we know vtest can
686
void *ptr = mmap(NULL, bo->base.mmap_size, PROT_READ | PROT_WRITE,
687
MAP_SHARED, bo->res_fd, 0);
688
if (ptr == MAP_FAILED) {
689
vn_log(vtest->instance, "failed to mmap %d of size %zu rw: %s",
690
bo->res_fd, bo->base.mmap_size, strerror(errno));
692
bo->base.mmap_ptr = ptr;
693
/* we don't need the fd anymore */
701
return bo->base.mmap_ptr;
705
vtest_bo_export_dma_buf(struct vn_renderer *renderer,
706
struct vn_renderer_bo *_bo)
708
const struct vtest_bo *bo = (struct vtest_bo *)_bo;
709
const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
710
return shareable ? os_dupfd_cloexec(bo->res_fd) : -1;
714
vtest_bo_destroy(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
716
struct vtest *vtest = (struct vtest *)renderer;
717
struct vtest_bo *bo = (struct vtest_bo *)_bo;
719
if (bo->base.mmap_ptr)
720
munmap(bo->base.mmap_ptr, bo->base.mmap_size);
724
mtx_lock(&vtest->sock_mutex);
725
vtest_vcmd_resource_unref(vtest, bo->base.res_id);
726
mtx_unlock(&vtest->sock_mutex);
732
vtest_bo_blob_flags(VkMemoryPropertyFlags flags,
733
VkExternalMemoryHandleTypeFlags external_handles)
735
uint32_t blob_flags = 0;
736
if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
737
blob_flags |= VCMD_BLOB_FLAG_MAPPABLE;
738
if (external_handles)
739
blob_flags |= VCMD_BLOB_FLAG_SHAREABLE;
740
if (external_handles & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
741
blob_flags |= VCMD_BLOB_FLAG_CROSS_DEVICE;
747
vtest_bo_create_from_device_memory(
748
struct vn_renderer *renderer,
751
VkMemoryPropertyFlags flags,
752
VkExternalMemoryHandleTypeFlags external_handles,
753
struct vn_renderer_bo **out_bo)
755
struct vtest *vtest = (struct vtest *)renderer;
756
const uint32_t blob_flags = vtest_bo_blob_flags(flags, external_handles);
758
mtx_lock(&vtest->sock_mutex);
760
uint32_t res_id = vtest_vcmd_resource_create_blob(
761
vtest, VCMD_BLOB_TYPE_HOST3D, blob_flags, size, mem_id, &res_fd);
762
assert(res_id > 0 && res_fd >= 0);
763
mtx_unlock(&vtest->sock_mutex);
765
struct vtest_bo *bo = util_sparse_array_get(&vtest->bo_array, res_id);
766
*bo = (struct vtest_bo){
768
.refcount = VN_REFCOUNT_INIT(1),
773
.blob_flags = blob_flags,
782
vtest_shmem_destroy_now(struct vn_renderer *renderer,
783
struct vn_renderer_shmem *_shmem)
785
struct vtest *vtest = (struct vtest *)renderer;
786
struct vtest_shmem *shmem = (struct vtest_shmem *)_shmem;
788
munmap(shmem->base.mmap_ptr, shmem->base.mmap_size);
790
mtx_lock(&vtest->sock_mutex);
791
vtest_vcmd_resource_unref(vtest, shmem->base.res_id);
792
mtx_unlock(&vtest->sock_mutex);
796
vtest_shmem_destroy(struct vn_renderer *renderer,
797
struct vn_renderer_shmem *shmem)
799
struct vtest *vtest = (struct vtest *)renderer;
801
if (vn_renderer_shmem_cache_add(&vtest->shmem_cache, shmem))
804
vtest_shmem_destroy_now(&vtest->base, shmem);
807
static struct vn_renderer_shmem *
808
vtest_shmem_create(struct vn_renderer *renderer, size_t size)
810
struct vtest *vtest = (struct vtest *)renderer;
812
struct vn_renderer_shmem *cached_shmem =
813
vn_renderer_shmem_cache_get(&vtest->shmem_cache, size);
815
cached_shmem->refcount = VN_REFCOUNT_INIT(1);
819
mtx_lock(&vtest->sock_mutex);
821
uint32_t res_id = vtest_vcmd_resource_create_blob(
822
vtest, vtest->shmem_blob_mem, VCMD_BLOB_FLAG_MAPPABLE, size, 0,
824
assert(res_id > 0 && res_fd >= 0);
825
mtx_unlock(&vtest->sock_mutex);
828
mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, res_fd, 0);
830
if (ptr == MAP_FAILED) {
831
mtx_lock(&vtest->sock_mutex);
832
vtest_vcmd_resource_unref(vtest, res_id);
833
mtx_unlock(&vtest->sock_mutex);
837
struct vtest_shmem *shmem =
838
util_sparse_array_get(&vtest->shmem_array, res_id);
839
*shmem = (struct vtest_shmem){
841
.refcount = VN_REFCOUNT_INIT(1),
852
sync_wait_poll(int fd, int poll_timeout)
854
struct pollfd pollfd = {
860
ret = poll(&pollfd, 1, poll_timeout);
861
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
863
if (ret < 0 || (ret > 0 && !(pollfd.revents & POLLIN))) {
864
return (ret < 0 && errno == ENOMEM) ? VK_ERROR_OUT_OF_HOST_MEMORY
865
: VK_ERROR_DEVICE_LOST;
868
return ret ? VK_SUCCESS : VK_TIMEOUT;
872
timeout_to_poll_timeout(uint64_t timeout)
874
const uint64_t ns_per_ms = 1000000;
875
const uint64_t ms = (timeout + ns_per_ms - 1) / ns_per_ms;
878
return ms <= INT_MAX ? ms : -1;
882
vtest_wait(struct vn_renderer *renderer, const struct vn_renderer_wait *wait)
884
struct vtest *vtest = (struct vtest *)renderer;
885
const uint32_t flags = wait->wait_any ? VCMD_SYNC_WAIT_FLAG_ANY : 0;
886
const int poll_timeout = timeout_to_poll_timeout(wait->timeout);
889
* vtest_vcmd_sync_wait (and some other sync commands) is executed after
890
* all prior commands are dispatched. That is far from ideal.
892
* In virtio-gpu, a drm_syncobj wait ioctl is executed immediately. It
893
* works because it uses virtio-gpu interrupts as a side channel. vtest
894
* needs a side channel to perform well.
896
* virtio-gpu or vtest, we should also set up a 1-byte coherent memory that
897
* is set to non-zero by GPU after the syncs signal. That would allow us
898
* to do a quick check (or spin a bit) before waiting.
900
mtx_lock(&vtest->sock_mutex);
902
vtest_vcmd_sync_wait(vtest, flags, poll_timeout, wait->syncs,
903
wait->sync_values, wait->sync_count);
904
mtx_unlock(&vtest->sock_mutex);
906
VkResult result = sync_wait_poll(fd, poll_timeout);
913
vtest_submit(struct vn_renderer *renderer,
914
const struct vn_renderer_submit *submit)
916
struct vtest *vtest = (struct vtest *)renderer;
918
mtx_lock(&vtest->sock_mutex);
919
vtest_vcmd_submit_cmd2(vtest, submit);
920
mtx_unlock(&vtest->sock_mutex);
926
vtest_init_renderer_info(struct vtest *vtest)
928
struct vn_renderer_info *info = &vtest->base.info;
930
info->pci.vendor_id = VTEST_PCI_VENDOR_ID;
931
info->pci.device_id = VTEST_PCI_DEVICE_ID;
933
info->has_dma_buf_import = false;
934
info->has_cache_management = false;
935
info->has_external_sync = false;
936
info->has_implicit_fencing = false;
938
info->max_sync_queue_count = vtest->max_sync_queue_count;
940
const struct virgl_renderer_capset_venus *capset = &vtest->capset.data;
941
info->wire_format_version = capset->wire_format_version;
942
info->vk_xml_version = capset->vk_xml_version;
943
info->vk_ext_command_serialization_spec_version =
944
capset->vk_ext_command_serialization_spec_version;
945
info->vk_mesa_venus_protocol_spec_version =
946
capset->vk_mesa_venus_protocol_spec_version;
947
info->supports_blob_id_0 = capset->supports_blob_id_0;
949
/* ensure vk_extension_mask is large enough to hold all capset masks */
950
STATIC_ASSERT(sizeof(info->vk_extension_mask) >=
951
sizeof(capset->vk_extension_mask1));
952
memcpy(info->vk_extension_mask, capset->vk_extension_mask1,
953
sizeof(capset->vk_extension_mask1));
957
vtest_destroy(struct vn_renderer *renderer,
958
const VkAllocationCallbacks *alloc)
960
struct vtest *vtest = (struct vtest *)renderer;
962
vn_renderer_shmem_cache_fini(&vtest->shmem_cache);
964
if (vtest->sock_fd >= 0) {
965
shutdown(vtest->sock_fd, SHUT_RDWR);
966
close(vtest->sock_fd);
969
mtx_destroy(&vtest->sock_mutex);
970
util_sparse_array_finish(&vtest->shmem_array);
971
util_sparse_array_finish(&vtest->bo_array);
973
vk_free(alloc, vtest);
977
vtest_init_capset(struct vtest *vtest)
979
vtest->capset.id = VIRGL_RENDERER_CAPSET_VENUS;
980
vtest->capset.version = 0;
982
if (!vtest_vcmd_get_capset(vtest, vtest->capset.id, vtest->capset.version,
984
sizeof(vtest->capset.data))) {
985
vn_log(vtest->instance, "no venus capset");
986
return VK_ERROR_INITIALIZATION_FAILED;
993
vtest_init_params(struct vtest *vtest)
996
vtest_vcmd_get_param(vtest, VCMD_PARAM_MAX_SYNC_QUEUE_COUNT);
998
vn_log(vtest->instance, "no sync queue support");
999
return VK_ERROR_INITIALIZATION_FAILED;
1001
vtest->max_sync_queue_count = val;
1007
vtest_init_protocol_version(struct vtest *vtest)
1009
const uint32_t min_protocol_version = 3;
1011
const uint32_t ver = vtest_vcmd_ping_protocol_version(vtest)
1012
? vtest_vcmd_protocol_version(vtest)
1014
if (ver < min_protocol_version) {
1015
vn_log(vtest->instance, "vtest protocol version (%d) too old", ver);
1016
return VK_ERROR_INITIALIZATION_FAILED;
1019
vtest->protocol_version = ver;
1025
vtest_init(struct vtest *vtest)
1027
util_sparse_array_init(&vtest->shmem_array, sizeof(struct vtest_shmem),
1029
util_sparse_array_init(&vtest->bo_array, sizeof(struct vtest_bo), 1024);
1031
mtx_init(&vtest->sock_mutex, mtx_plain);
1033
vtest_connect_socket(vtest->instance, VTEST_DEFAULT_SOCKET_NAME);
1034
if (vtest->sock_fd < 0)
1035
return VK_ERROR_INITIALIZATION_FAILED;
1037
const char *renderer_name = util_get_process_name();
1039
renderer_name = "venus";
1040
vtest_vcmd_create_renderer(vtest, renderer_name);
1042
VkResult result = vtest_init_protocol_version(vtest);
1043
if (result == VK_SUCCESS)
1044
result = vtest_init_params(vtest);
1045
if (result == VK_SUCCESS)
1046
result = vtest_init_capset(vtest);
1047
if (result != VK_SUCCESS)
1050
/* see virtgpu_init_shmem_blob_mem */
1051
vtest->shmem_blob_mem = vtest->capset.data.supports_blob_id_0
1052
? VCMD_BLOB_TYPE_HOST3D
1053
: VCMD_BLOB_TYPE_GUEST;
1055
vn_renderer_shmem_cache_init(&vtest->shmem_cache, &vtest->base,
1056
vtest_shmem_destroy_now);
1058
vtest_vcmd_context_init(vtest, vtest->capset.id);
1060
vtest_init_renderer_info(vtest);
1062
vtest->base.ops.destroy = vtest_destroy;
1063
vtest->base.ops.submit = vtest_submit;
1064
vtest->base.ops.wait = vtest_wait;
1066
vtest->base.shmem_ops.create = vtest_shmem_create;
1067
vtest->base.shmem_ops.destroy = vtest_shmem_destroy;
1069
vtest->base.bo_ops.create_from_device_memory =
1070
vtest_bo_create_from_device_memory;
1071
vtest->base.bo_ops.create_from_dma_buf = NULL;
1072
vtest->base.bo_ops.destroy = vtest_bo_destroy;
1073
vtest->base.bo_ops.export_dma_buf = vtest_bo_export_dma_buf;
1074
vtest->base.bo_ops.map = vtest_bo_map;
1075
vtest->base.bo_ops.flush = vtest_bo_flush;
1076
vtest->base.bo_ops.invalidate = vtest_bo_invalidate;
1078
vtest->base.sync_ops.create = vtest_sync_create;
1079
vtest->base.sync_ops.create_from_syncobj = NULL;
1080
vtest->base.sync_ops.destroy = vtest_sync_destroy;
1081
vtest->base.sync_ops.export_syncobj = NULL;
1082
vtest->base.sync_ops.reset = vtest_sync_reset;
1083
vtest->base.sync_ops.read = vtest_sync_read;
1084
vtest->base.sync_ops.write = vtest_sync_write;
1090
vn_renderer_create_vtest(struct vn_instance *instance,
1091
const VkAllocationCallbacks *alloc,
1092
struct vn_renderer **renderer)
1094
struct vtest *vtest = vk_zalloc(alloc, sizeof(*vtest), VN_DEFAULT_ALIGN,
1095
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1097
return VK_ERROR_OUT_OF_HOST_MEMORY;
1099
vtest->instance = instance;
1100
vtest->sock_fd = -1;
1102
VkResult result = vtest_init(vtest);
1103
if (result != VK_SUCCESS) {
1104
vtest_destroy(&vtest->base, alloc);
1108
*renderer = &vtest->base;