29
33
return (VirtIOBlock *)vdev;
36
/* store identify data in little endian format
38
static inline void put_le16(uint16_t *p, unsigned int v)
43
/* copy to *dst from *src, nul pad dst tail as needed to len bytes
45
static inline void padstr(char *dst, const char *src, int len)
48
*dst++ = *src ? *src++ : '\0';
51
/* setup simulated identify data as appropriate for virtio block device
53
* ref: AT Attachment 8 - ATA/ATAPI Command Set (ATA8-ACS)
55
static inline void virtio_identify_template(struct virtio_blk_config *bc)
57
uint16_t *p = &bc->identify[0];
58
uint64_t lba_sectors = bc->capacity;
60
memset(p, 0, sizeof(bc->identify));
61
put_le16(p + 0, 0x0); /* ATA device */
62
padstr((char *)(p + 23), QEMU_VERSION, 8); /* firmware revision */
63
padstr((char *)(p + 27), "QEMU VIRT_BLK", 40); /* model# */
64
put_le16(p + 47, 0x80ff); /* max xfer 255 sectors */
65
put_le16(p + 49, 0x0b00); /* support IORDY/LBA/DMA */
66
put_le16(p + 59, 0x1ff); /* cur xfer 255 sectors */
67
put_le16(p + 80, 0x1f0); /* support ATA8/7/6/5/4 */
68
put_le16(p + 81, 0x16);
69
put_le16(p + 82, 0x400);
70
put_le16(p + 83, 0x400);
71
put_le16(p + 100, lba_sectors);
72
put_le16(p + 101, lba_sectors >> 16);
73
put_le16(p + 102, lba_sectors >> 32);
74
put_le16(p + 103, lba_sectors >> 48);
32
77
typedef struct VirtIOBlockReq
35
80
VirtQueueElement elem;
36
81
struct virtio_blk_inhdr *in;
37
82
struct virtio_blk_outhdr *out;
83
struct virtio_scsi_inhdr *scsi;
40
85
struct VirtIOBlockReq *next;
125
static int virtio_blk_handle_write(VirtIOBlockReq *req)
131
for (i = 1; i < req->elem.out_num; i++)
132
req->size += req->elem.out_sg[i].iov_len;
134
req->buffer = qemu_memalign(512, req->size);
135
if (req->buffer == NULL) {
140
/* We copy the data from the SG list to avoid splitting up the request.
141
This helps performance a lot until we can pass full sg lists as AIO
143
for (i = 1; i < req->elem.out_num; i++) {
146
len = MIN(req->elem.out_sg[i].iov_len,
148
memcpy(req->buffer + offset,
149
req->elem.out_sg[i].iov_base,
155
bdrv_aio_write(req->dev->bs, req->out->sector, req->buffer, req->size / 512,
156
virtio_blk_rw_complete, req);
153
static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
155
struct sg_io_hdr hdr;
161
* We require at least one output segment each for the virtio_blk_outhdr
162
* and the SCSI command block.
164
* We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
165
* and the sense buffer pointer in the input segments.
167
if (req->elem.out_num < 2 || req->elem.in_num < 3) {
168
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
173
* No support for bidirection commands yet.
175
if (req->elem.out_num > 2 && req->elem.in_num > 3) {
176
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
181
* The scsi inhdr is placed in the second-to-last input segment, just
182
* before the regular inhdr.
184
req->scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
185
size = sizeof(*req->in) + sizeof(*req->scsi);
187
memset(&hdr, 0, sizeof(struct sg_io_hdr));
188
hdr.interface_id = 'S';
189
hdr.cmd_len = req->elem.out_sg[1].iov_len;
190
hdr.cmdp = req->elem.out_sg[1].iov_base;
193
if (req->elem.out_num > 2) {
195
* If there are more than the minimally required 2 output segments
196
* there is write payload starting from the third iovec.
198
hdr.dxfer_direction = SG_DXFER_TO_DEV;
199
hdr.iovec_count = req->elem.out_num - 2;
201
for (i = 0; i < hdr.iovec_count; i++)
202
hdr.dxfer_len += req->elem.out_sg[i + 2].iov_len;
204
hdr.dxferp = req->elem.out_sg + 2;
206
} else if (req->elem.in_num > 3) {
208
* If we have more than 3 input segments the guest wants to actually
211
hdr.dxfer_direction = SG_DXFER_FROM_DEV;
212
hdr.iovec_count = req->elem.in_num - 3;
213
for (i = 0; i < hdr.iovec_count; i++)
214
hdr.dxfer_len += req->elem.in_sg[i].iov_len;
216
hdr.dxferp = req->elem.in_sg;
217
size += hdr.dxfer_len;
220
* Some SCSI commands don't actually transfer any data.
222
hdr.dxfer_direction = SG_DXFER_NONE;
225
hdr.sbp = req->elem.in_sg[req->elem.in_num - 3].iov_base;
226
hdr.mx_sb_len = req->elem.in_sg[req->elem.in_num - 3].iov_len;
227
size += hdr.mx_sb_len;
229
ret = bdrv_ioctl(req->dev->bs, SG_IO, &hdr);
231
status = VIRTIO_BLK_S_UNSUPP;
233
hdr.resid = hdr.dxfer_len;
234
} else if (hdr.status) {
235
status = VIRTIO_BLK_S_IOERR;
237
status = VIRTIO_BLK_S_OK;
240
req->scsi->errors = hdr.status;
241
req->scsi->residual = hdr.resid;
242
req->scsi->sense_len = hdr.sb_len_wr;
243
req->scsi->data_len = hdr.dxfer_len;
245
virtio_blk_req_complete(req, status);
248
static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
250
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
252
#endif /* __linux__ */
254
static void virtio_blk_handle_write(VirtIOBlockReq *req)
256
bdrv_aio_writev(req->dev->bs, req->out->sector, &req->qiov,
257
req->qiov.size / 512, virtio_blk_rw_complete, req);
260
static void virtio_blk_handle_read(VirtIOBlockReq *req)
262
bdrv_aio_readv(req->dev->bs, req->out->sector, &req->qiov,
263
req->qiov.size / 512, virtio_blk_rw_complete, req);
160
266
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
180
284
req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base;
182
286
if (req->out->type & VIRTIO_BLK_T_SCSI_CMD) {
183
unsigned int len = sizeof(*req->in);
185
req->in->status = VIRTIO_BLK_S_UNSUPP;
186
virtqueue_push(vq, &req->elem, len);
187
virtio_notify(vdev, vq);
287
virtio_blk_handle_scsi(req);
189
288
} else if (req->out->type & VIRTIO_BLK_T_OUT) {
190
if (virtio_blk_handle_write(req) < 0)
289
qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
290
req->elem.out_num - 1);
291
virtio_blk_handle_write(req);
193
for (i = 0; i < req->elem.in_num - 1; i++)
194
req->size += req->elem.in_sg[i].iov_len;
196
req->buffer = qemu_memalign(512, req->size);
197
if (req->buffer == NULL) {
202
bdrv_aio_read(s->bs, req->out->sector,
205
virtio_blk_rw_complete,
293
qemu_iovec_init_external(&req->qiov, &req->elem.in_sg[0],
294
req->elem.in_num - 1);
295
virtio_blk_handle_read(req);
248
339
bdrv_get_geometry(s->bs, &capacity);
249
340
bdrv_get_geometry_hint(s->bs, &cylinders, &heads, &secs);
341
memset(&blkcfg, 0, sizeof(blkcfg));
250
342
stq_raw(&blkcfg.capacity, capacity);
251
343
stl_raw(&blkcfg.seg_max, 128 - 2);
252
344
stw_raw(&blkcfg.cylinders, cylinders);
253
345
blkcfg.heads = heads;
254
346
blkcfg.sectors = secs;
348
virtio_identify_template(&blkcfg);
349
memcpy(&blkcfg.identify[VIRTIO_BLK_ID_SN], s->serial_str,
350
VIRTIO_BLK_ID_SN_BYTES);
255
351
memcpy(config, &blkcfg, sizeof(blkcfg));
258
354
static uint32_t virtio_blk_get_features(VirtIODevice *vdev)
260
return (1 << VIRTIO_BLK_F_SEG_MAX | 1 << VIRTIO_BLK_F_GEOMETRY);
356
VirtIOBlock *s = to_virtio_blk(vdev);
357
uint32_t features = 0;
359
features |= (1 << VIRTIO_BLK_F_SEG_MAX);
360
features |= (1 << VIRTIO_BLK_F_GEOMETRY);
362
features |= (1 << VIRTIO_BLK_F_SCSI);
364
if (strcmp(s->serial_str, "0"))
365
features |= 1 << VIRTIO_BLK_F_IDENTIFY;
263
370
static void virtio_blk_save(QEMUFile *f, void *opaque)
296
void *virtio_blk_init(PCIBus *bus, BlockDriverState *bs)
403
VirtIODevice *virtio_blk_init(DeviceState *dev)
299
406
int cylinders, heads, secs;
300
407
static int virtio_blk_id;
302
s = (VirtIOBlock *)virtio_init_pci(bus, "virtio-blk",
303
PCI_VENDOR_ID_REDHAT_QUMRANET,
304
PCI_DEVICE_ID_VIRTIO_BLOCK,
305
PCI_VENDOR_ID_REDHAT_QUMRANET,
307
PCI_CLASS_STORAGE_OTHER, 0x00,
308
sizeof(struct virtio_blk_config), sizeof(VirtIOBlock));
408
BlockDriverState *bs;
411
s = (VirtIOBlock *)virtio_common_init("virtio-blk", VIRTIO_ID_BLOCK,
412
sizeof(struct virtio_blk_config),
413
sizeof(VirtIOBlock));
415
bs = qdev_init_bdrv(dev, IF_VIRTIO);
312
416
s->vdev.get_config = virtio_blk_update_config;
313
417
s->vdev.get_features = virtio_blk_get_features;
314
418
s->vdev.reset = virtio_blk_reset;
317
bs->private = &s->vdev.pci_dev;
421
if (strlen(ps = (char *)drive_get_serial(bs)))
422
strncpy(s->serial_str, ps, sizeof(s->serial_str));
424
snprintf(s->serial_str, sizeof(s->serial_str), "0");
318
426
bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs);
319
427
bdrv_set_geometry_hint(s->bs, cylinders, heads, secs);