4
* Copyright IBM, Corp. 2007
7
* Anthony Liguori <aliguori@us.ibm.com>
9
* This work is licensed under the terms of the GNU GPL, version 2. See
10
* the COPYING file in the top-level directory.
14
#ifndef _QEMU_VIRTIO_H
15
#define _QEMU_VIRTIO_H
20
#include "sysemu/sysemu.h"
21
#include "qemu/event_notifier.h"
23
#include "hw/virtio/virtio-9p.h"
26
/* from Linux's linux/virtio_config.h */
28
/* Status byte for guest to report progress, and synchronize features. */
29
/* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
30
#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
31
/* We have found a driver for the device. */
32
#define VIRTIO_CONFIG_S_DRIVER 2
33
/* Driver has used its parts of the config, and is happy */
34
#define VIRTIO_CONFIG_S_DRIVER_OK 4
35
/* We've given up on this device. */
36
#define VIRTIO_CONFIG_S_FAILED 0x80
38
/* Some virtio feature bits (currently bits 28 through 31) are reserved for the
39
* transport being used (eg. virtio_ring), the rest are per-device feature bits. */
40
#define VIRTIO_TRANSPORT_F_START 28
41
#define VIRTIO_TRANSPORT_F_END 32
43
/* We notify when the ring is completely used, even if the guest is suppressing
45
#define VIRTIO_F_NOTIFY_ON_EMPTY 24
46
/* Can the device handle any descriptor layout? */
47
#define VIRTIO_F_ANY_LAYOUT 27
48
/* We support indirect buffer descriptors */
49
#define VIRTIO_RING_F_INDIRECT_DESC 28
50
/* The Guest publishes the used index for which it expects an interrupt
51
* at the end of the avail ring. Host should ignore the avail->flags field. */
52
/* The Host publishes the avail index for which it expects a kick
53
* at the end of the used ring. Guest should ignore the used->flags field. */
54
#define VIRTIO_RING_F_EVENT_IDX 29
55
/* A guest should never accept this. It implies negotiation is broken. */
56
#define VIRTIO_F_BAD_FEATURE 30
58
/* from Linux's linux/virtio_ring.h */
60
/* This marks a buffer as continuing via the next field. */
61
#define VRING_DESC_F_NEXT 1
62
/* This marks a buffer as write-only (otherwise read-only). */
63
#define VRING_DESC_F_WRITE 2
64
/* This means the buffer contains a list of buffer descriptors. */
65
#define VRING_DESC_F_INDIRECT 4
67
/* This means don't notify other side when buffer added. */
68
#define VRING_USED_F_NO_NOTIFY 1
69
/* This means don't interrupt guest when buffer consumed. */
70
#define VRING_AVAIL_F_NO_INTERRUPT 1
74
static inline hwaddr vring_align(hwaddr addr,
77
return (addr + align - 1) & ~(align - 1);
80
typedef struct VirtQueue VirtQueue;
82
#define VIRTQUEUE_MAX_SIZE 1024
84
typedef struct VirtQueueElement
89
hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
90
hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
91
struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
92
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
95
#define VIRTIO_PCI_QUEUE_MAX 64
97
#define VIRTIO_NO_VECTOR 0xffff
99
#define TYPE_VIRTIO_DEVICE "virtio-device"
100
#define VIRTIO_DEVICE_GET_CLASS(obj) \
101
OBJECT_GET_CLASS(VirtioDeviceClass, obj, TYPE_VIRTIO_DEVICE)
102
#define VIRTIO_DEVICE_CLASS(klass) \
103
OBJECT_CLASS_CHECK(VirtioDeviceClass, klass, TYPE_VIRTIO_DEVICE)
104
#define VIRTIO_DEVICE(obj) \
105
OBJECT_CHECK(VirtIODevice, (obj), TYPE_VIRTIO_DEVICE)
109
DeviceState parent_obj;
114
uint32_t guest_features;
117
uint16_t config_vector;
122
VMChangeStateEntry *vmstate;
126
typedef struct VirtioDeviceClass {
127
/* This is what a VirtioDevice must implement */
129
int (*init)(VirtIODevice *vdev);
130
uint32_t (*get_features)(VirtIODevice *vdev, uint32_t requested_features);
131
uint32_t (*bad_features)(VirtIODevice *vdev);
132
void (*set_features)(VirtIODevice *vdev, uint32_t val);
133
void (*get_config)(VirtIODevice *vdev, uint8_t *config);
134
void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
135
void (*reset)(VirtIODevice *vdev);
136
void (*set_status)(VirtIODevice *vdev, uint8_t val);
137
/* Test and clear event pending status.
138
* Should be called after unmask to avoid losing events.
139
* If backend does not support masking,
140
* must check in frontend instead.
142
bool (*guest_notifier_pending)(VirtIODevice *vdev, int n);
143
/* Mask/unmask events from this vq. Any events reported
144
* while masked will become pending.
145
* If backend does not support masking,
146
* must mask in frontend instead.
148
void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask);
151
void virtio_init(VirtIODevice *vdev, const char *name,
152
uint16_t device_id, size_t config_size);
153
void virtio_cleanup(VirtIODevice *vdev);
155
/* Set the child bus name. */
156
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name);
158
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
159
void (*handle_output)(VirtIODevice *,
162
void virtio_del_queue(VirtIODevice *vdev, int n);
164
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
166
void virtqueue_flush(VirtQueue *vq, unsigned int count);
167
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
168
unsigned int len, unsigned int idx);
170
void virtqueue_map_sg(struct iovec *sg, hwaddr *addr,
171
size_t num_sg, int is_write);
172
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
173
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
174
unsigned int out_bytes);
175
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
176
unsigned int *out_bytes,
177
unsigned max_in_bytes, unsigned max_out_bytes);
179
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
181
void virtio_save(VirtIODevice *vdev, QEMUFile *f);
183
int virtio_load(VirtIODevice *vdev, QEMUFile *f);
185
void virtio_notify_config(VirtIODevice *vdev);
187
void virtio_queue_set_notification(VirtQueue *vq, int enable);
189
int virtio_queue_ready(VirtQueue *vq);
191
int virtio_queue_empty(VirtQueue *vq);
193
/* Host binding interface. */
195
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr);
196
uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr);
197
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr);
198
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data);
199
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data);
200
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data);
201
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr);
202
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n);
203
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num);
204
int virtio_queue_get_num(VirtIODevice *vdev, int n);
205
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align);
206
void virtio_queue_notify(VirtIODevice *vdev, int n);
207
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n);
208
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector);
209
void virtio_set_status(VirtIODevice *vdev, uint8_t val);
210
void virtio_reset(void *opaque);
211
void virtio_update_irq(VirtIODevice *vdev);
212
int virtio_set_features(VirtIODevice *vdev, uint32_t val);
215
typedef struct VirtIOBlkConf VirtIOBlkConf;
216
struct virtio_net_conf;
217
VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
218
struct virtio_net_conf *net,
219
uint32_t host_features);
220
typedef struct virtio_serial_conf virtio_serial_conf;
221
typedef struct VirtIOSCSIConf VirtIOSCSIConf;
222
typedef struct VirtIORNGConf VirtIORNGConf;
224
#define DEFINE_VIRTIO_COMMON_FEATURES(_state, _field) \
225
DEFINE_PROP_BIT("indirect_desc", _state, _field, \
226
VIRTIO_RING_F_INDIRECT_DESC, true), \
227
DEFINE_PROP_BIT("event_idx", _state, _field, \
228
VIRTIO_RING_F_EVENT_IDX, true)
230
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
231
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
232
hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
233
hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n);
234
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
235
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n);
236
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n);
237
hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n);
238
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
239
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
240
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n);
241
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
242
uint16_t virtio_get_queue_index(VirtQueue *vq);
243
int virtio_queue_get_id(VirtQueue *vq);
244
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
245
void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
247
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
248
void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
250
void virtio_queue_notify_vq(VirtQueue *vq);
251
void virtio_irq(VirtQueue *vq);