98
107
VirtIOHandleOutput handle_output;
99
VirtIOHandleOutput handle_aio_output;
108
VirtIOHandleAIOOutput handle_aio_output;
100
109
VirtIODevice *vdev;
101
110
EventNotifier guest_notifier;
102
111
EventNotifier host_notifier;
103
112
QLIST_ENTRY(VirtQueue) node;
115
static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
121
address_space_cache_destroy(&caches->desc);
122
address_space_cache_destroy(&caches->avail);
123
address_space_cache_destroy(&caches->used);
127
static void virtio_init_region_cache(VirtIODevice *vdev, int n)
129
VirtQueue *vq = &vdev->vq[n];
130
VRingMemoryRegionCaches *old = vq->vring.caches;
131
VRingMemoryRegionCaches *new;
136
event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
138
addr = vq->vring.desc;
142
new = g_new0(VRingMemoryRegionCaches, 1);
143
size = virtio_queue_get_desc_size(vdev, n);
144
len = address_space_cache_init(&new->desc, vdev->dma_as,
147
virtio_error(vdev, "Cannot map desc");
151
size = virtio_queue_get_used_size(vdev, n) + event_size;
152
len = address_space_cache_init(&new->used, vdev->dma_as,
153
vq->vring.used, size, true);
155
virtio_error(vdev, "Cannot map used");
159
size = virtio_queue_get_avail_size(vdev, n) + event_size;
160
len = address_space_cache_init(&new->avail, vdev->dma_as,
161
vq->vring.avail, size, false);
163
virtio_error(vdev, "Cannot map avail");
167
atomic_rcu_set(&vq->vring.caches, new);
169
call_rcu(old, virtio_free_region_cache, rcu);
174
address_space_cache_destroy(&new->used);
176
address_space_cache_destroy(&new->desc);
106
181
/* virt queue functions */
107
182
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
116
191
vring->used = vring_align(vring->avail +
117
192
offsetof(VRingAvail, ring[vring->num]),
194
virtio_init_region_cache(vdev, n);
197
/* Called within rcu_read_lock(). */
121
198
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
122
hwaddr desc_pa, int i)
199
MemoryRegionCache *cache, int i)
124
address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
125
MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
201
address_space_read_cached(cache, i * sizeof(VRingDesc),
202
desc, sizeof(VRingDesc));
126
203
virtio_tswap64s(vdev, &desc->addr);
127
204
virtio_tswap32s(vdev, &desc->len);
128
205
virtio_tswap16s(vdev, &desc->flags);
129
206
virtio_tswap16s(vdev, &desc->next);
209
static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
211
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
212
assert(caches != NULL);
215
/* Called within rcu_read_lock(). */
132
216
static inline uint16_t vring_avail_flags(VirtQueue *vq)
135
pa = vq->vring.avail + offsetof(VRingAvail, flags);
136
return virtio_lduw_phys(vq->vdev, pa);
218
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
219
hwaddr pa = offsetof(VRingAvail, flags);
220
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
223
/* Called within rcu_read_lock(). */
139
224
static inline uint16_t vring_avail_idx(VirtQueue *vq)
142
pa = vq->vring.avail + offsetof(VRingAvail, idx);
143
vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
226
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
227
hwaddr pa = offsetof(VRingAvail, idx);
228
vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
144
229
return vq->shadow_avail_idx;
232
/* Called within rcu_read_lock(). */
147
233
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
150
pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
151
return virtio_lduw_phys(vq->vdev, pa);
235
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
236
hwaddr pa = offsetof(VRingAvail, ring[i]);
237
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
240
/* Called within rcu_read_lock(). */
154
241
static inline uint16_t vring_get_used_event(VirtQueue *vq)
156
243
return vring_avail_ring(vq, vq->vring.num);
246
/* Called within rcu_read_lock(). */
159
247
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
250
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
251
hwaddr pa = offsetof(VRingUsed, ring[i]);
163
252
virtio_tswap32s(vq->vdev, &uelem->id);
164
253
virtio_tswap32s(vq->vdev, &uelem->len);
165
pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
166
address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED,
167
(void *)uelem, sizeof(VRingUsedElem));
254
address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
255
address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
258
/* Called within rcu_read_lock(). */
170
259
static uint16_t vring_used_idx(VirtQueue *vq)
173
pa = vq->vring.used + offsetof(VRingUsed, idx);
174
return virtio_lduw_phys(vq->vdev, pa);
261
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
262
hwaddr pa = offsetof(VRingUsed, idx);
263
return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
266
/* Called within rcu_read_lock(). */
177
267
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
180
pa = vq->vring.used + offsetof(VRingUsed, idx);
181
virtio_stw_phys(vq->vdev, pa, val);
269
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
270
hwaddr pa = offsetof(VRingUsed, idx);
271
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
272
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
182
273
vq->used_idx = val;
276
/* Called within rcu_read_lock(). */
185
277
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
279
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
187
280
VirtIODevice *vdev = vq->vdev;
189
pa = vq->vring.used + offsetof(VRingUsed, flags);
190
virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
281
hwaddr pa = offsetof(VRingUsed, flags);
282
uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
284
virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
285
address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
288
/* Called within rcu_read_lock(). */
193
289
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
291
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
195
292
VirtIODevice *vdev = vq->vdev;
197
pa = vq->vring.used + offsetof(VRingUsed, flags);
198
virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
293
hwaddr pa = offsetof(VRingUsed, flags);
294
uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
296
virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
297
address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
300
/* Called within rcu_read_lock(). */
201
301
static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
303
VRingMemoryRegionCaches *caches;
204
305
if (!vq->notification) {
207
pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
208
virtio_stw_phys(vq->vdev, pa, val);
309
caches = vring_get_region_caches(vq);
310
pa = offsetof(VRingUsed, ring[vq->vring.num]);
311
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
312
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
211
315
void virtio_queue_set_notification(VirtQueue *vq, int enable)
213
317
vq->notification = enable;
319
if (!vq->vring.desc) {
214
324
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
215
325
vring_set_avail_event(vq, vring_avail_idx(vq));
216
326
} else if (enable) {
431
581
unsigned int *out_bytes,
432
582
unsigned max_in_bytes, unsigned max_out_bytes)
584
VirtIODevice *vdev = vq->vdev;
585
unsigned int max, idx;
435
586
unsigned int total_bufs, in_total, out_total;
587
VRingMemoryRegionCaches *caches;
588
MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
592
if (unlikely(!vq->vring.desc)) {
438
603
idx = vq->last_avail_idx;
440
604
total_bufs = in_total = out_total = 0;
607
caches = vring_get_region_caches(vq);
608
if (caches->desc.len < max * sizeof(VRingDesc)) {
609
virtio_error(vdev, "Cannot map descriptor ring");
441
613
while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
442
VirtIODevice *vdev = vq->vdev;
443
unsigned int max, num_bufs, indirect = 0;
614
MemoryRegionCache *desc_cache = &caches->desc;
615
unsigned int num_bufs;
449
619
num_bufs = total_bufs;
451
621
if (!virtqueue_get_head(vq, idx++, &i)) {
455
desc_pa = vq->vring.desc;
456
vring_desc_read(vdev, &desc, desc_pa, i);
625
vring_desc_read(vdev, &desc, desc_cache, i);
458
627
if (desc.flags & VRING_DESC_F_INDIRECT) {
459
628
if (desc.len % sizeof(VRingDesc)) {
746
951
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
953
address_space_cache_destroy(&indirect_desc_cache);
750
959
virtqueue_undo_map_desc(out_num, in_num, iov);
963
/* virtqueue_drop_all:
964
* @vq: The #VirtQueue
965
* Drops all queued buffers and indicates them to the guest
966
* as if they are done. Useful when buffers can not be
967
* processed but must be returned to the guest.
969
unsigned int virtqueue_drop_all(VirtQueue *vq)
971
unsigned int dropped = 0;
972
VirtQueueElement elem = {};
973
VirtIODevice *vdev = vq->vdev;
974
bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
976
if (unlikely(vdev->broken)) {
980
while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
981
/* works similar to virtqueue_pop but does not map buffers
982
* and does not allocate any memory */
984
if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
988
vq->last_avail_idx++;
990
vring_set_avail_event(vq, vq->last_avail_idx);
992
/* immediately push the element, nothing to unmap
993
* as both in_num and out_num are set to 0 */
994
virtqueue_push(vq, &elem, 0);
754
1001
/* Reading and writing a structure directly to QEMUFile is *awful*, but
2375
static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
2377
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2379
virtio_queue_set_notification(vq, 0);
2382
static bool virtio_queue_host_notifier_aio_poll(void *opaque)
2384
EventNotifier *n = opaque;
2385
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2388
if (!vq->vring.desc || virtio_queue_empty(vq)) {
2392
progress = virtio_queue_notify_aio_vq(vq);
2394
/* In case the handler function re-enabled notifications */
2395
virtio_queue_set_notification(vq, 0);
2399
static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
2401
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2403
/* Caller polls once more after this to catch requests that race with us */
2404
virtio_queue_set_notification(vq, 1);
2059
2407
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
2060
VirtIOHandleOutput handle_output)
2408
VirtIOHandleAIOOutput handle_output)
2062
2410
if (handle_output) {
2063
2411
vq->handle_aio_output = handle_output;
2064
2412
aio_set_event_notifier(ctx, &vq->host_notifier, true,
2065
virtio_queue_host_notifier_aio_read);
2413
virtio_queue_host_notifier_aio_read,
2414
virtio_queue_host_notifier_aio_poll);
2415
aio_set_event_notifier_poll(ctx, &vq->host_notifier,
2416
virtio_queue_host_notifier_aio_poll_begin,
2417
virtio_queue_host_notifier_aio_poll_end);
2067
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
2419
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
2068
2420
/* Test and clear notifier before after disabling event,
2069
2421
* in case poll callback didn't have time to run. */
2070
2422
virtio_queue_host_notifier_aio_read(&vq->host_notifier);