1
/******************************************************************************
2
* Copyright (c) 2011 IBM Corporation
4
* This program and the accompanying materials
5
* are made available under the terms of the BSD License
6
* which accompanies this distribution, and is available at
7
* http://www.opensource.org/licenses/bsd-license.php
10
* IBM Corporation - initial implementation
11
*****************************************************************************/
14
* This is the implementation for the Virtio network device driver. Details
15
* about the virtio-net interface can be found in Rusty Russel's "Virtio PCI
16
* Card Specification v0.8.10", appendix C, which can be found here:
18
* http://ozlabs.org/~rusty/virtio-spec/virtio-spec.pdf
26
#include <byteorder.h>
28
#include "virtio-net.h"
29
#include "virtio-internal.h"
34
# define dprintf(fmt...) do { printf(fmt); } while(0)
36
# define dprintf(fmt...)
39
#define sync() asm volatile (" sync \n" ::: "memory")
41
#define DRIVER_FEATURE_SUPPORT (VIRTIO_NET_F_MAC | VIRTIO_F_VERSION_1)
43
struct virtio_device virtiodev;
44
static struct vqs vq_rx; /* Information about receive virtqueues */
45
static struct vqs vq_tx; /* Information about transmit virtqueues */
47
/* See Virtio Spec, appendix C, "Device Operation" */
48
struct virtio_net_hdr {
55
// uint16_t num_buffers; /* Only if VIRTIO_NET_F_MRG_RXBUF */
58
static unsigned int net_hdr_size;
60
struct virtio_net_hdr_v1 {
70
static uint16_t last_rx_idx; /* Last index in RX "used" ring */
73
* Module init for virtio via PCI.
74
* Checks whether we're reponsible for the given device and set up
75
* the virtqueue configuration.
77
static int virtionet_init_pci(struct virtio_device *dev)
79
dprintf("virtionet: doing virtionet_init_pci!\n");
84
/* make a copy of the device structure */
85
memcpy(&virtiodev, dev, sizeof(struct virtio_device));
88
virtio_reset_device(&virtiodev);
90
/* The queue information can be retrieved via the virtio header that
91
* can be found in the I/O BAR. First queue is the receive queue,
92
* second the transmit queue, and the forth is the control queue for
94
* We are only interested in the receive and transmit queue here. */
95
if (virtio_queue_init_vq(dev, &vq_rx, VQ_RX) ||
96
virtio_queue_init_vq(dev, &vq_tx, VQ_TX)) {
97
virtio_set_status(dev, VIRTIO_STAT_ACKNOWLEDGE|VIRTIO_STAT_DRIVER
102
/* Acknowledge device. */
103
virtio_set_status(&virtiodev, VIRTIO_STAT_ACKNOWLEDGE);
109
* Initialize the virtio-net device.
110
* See the Virtio Spec, chapter 2.2.1 and Appendix C "Device Initialization"
113
static int virtionet_init(net_driver_t *driver)
116
int status = VIRTIO_STAT_ACKNOWLEDGE | VIRTIO_STAT_DRIVER;
118
dprintf("virtionet_init(%02x:%02x:%02x:%02x:%02x:%02x)\n",
119
driver->mac_addr[0], driver->mac_addr[1],
120
driver->mac_addr[2], driver->mac_addr[3],
121
driver->mac_addr[4], driver->mac_addr[5]);
123
if (driver->running != 0)
126
/* Tell HV that we know how to drive the device. */
127
virtio_set_status(&virtiodev, status);
129
/* Device specific setup */
130
if (virtiodev.is_modern) {
131
if (virtio_negotiate_guest_features(&virtiodev, DRIVER_FEATURE_SUPPORT))
133
net_hdr_size = sizeof(struct virtio_net_hdr_v1);
134
virtio_get_status(&virtiodev, &status);
136
net_hdr_size = sizeof(struct virtio_net_hdr);
137
virtio_set_guest_features(&virtiodev, 0);
140
/* Allocate memory for one transmit an multiple receive buffers */
141
vq_rx.buf_mem = SLOF_alloc_mem((BUFFER_ENTRY_SIZE+net_hdr_size)
143
if (!vq_rx.buf_mem) {
144
printf("virtionet: Failed to allocate buffers!\n");
148
/* Prepare receive buffer queue */
149
for (i = 0; i < RX_QUEUE_SIZE; i++) {
150
uint64_t addr = (uint64_t)vq_rx.buf_mem
151
+ i * (BUFFER_ENTRY_SIZE+net_hdr_size);
153
/* Descriptor for net_hdr: */
154
virtio_fill_desc(&vq_rx.desc[id], virtiodev.is_modern, addr, net_hdr_size,
155
VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, id + 1);
157
/* Descriptor for data: */
158
virtio_fill_desc(&vq_rx.desc[id+1], virtiodev.is_modern, addr + net_hdr_size,
159
BUFFER_ENTRY_SIZE, VRING_DESC_F_WRITE, 0);
161
vq_rx.avail->ring[i] = virtio_cpu_to_modern16(&virtiodev, id);
165
vq_rx.avail->flags = virtio_cpu_to_modern16(&virtiodev, VRING_AVAIL_F_NO_INTERRUPT);
166
vq_rx.avail->idx = virtio_cpu_to_modern16(&virtiodev, RX_QUEUE_SIZE);
168
last_rx_idx = virtio_modern16_to_cpu(&virtiodev, vq_rx.used->idx);
170
vq_tx.avail->flags = virtio_cpu_to_modern16(&virtiodev, VRING_AVAIL_F_NO_INTERRUPT);
171
vq_tx.avail->idx = 0;
173
/* Tell HV that setup succeeded */
174
status |= VIRTIO_STAT_DRIVER_OK;
175
virtio_set_status(&virtiodev, status);
177
/* Tell HV that RX queues are ready */
178
virtio_queue_notify(&virtiodev, VQ_RX);
181
for(i = 0; i < (int)sizeof(driver->mac_addr); i++) {
182
driver->mac_addr[i] = virtio_get_config(&virtiodev, i, 1);
187
status |= VIRTIO_STAT_FAILED;
188
virtio_set_status(&virtiodev, status);
195
* We've got to make sure that the hosts stops all transfers since the buffers
196
* in our main memory will become invalid after this module has been terminated.
198
static int virtionet_term(net_driver_t *driver)
200
dprintf("virtionet_term()\n");
202
if (driver->running == 0)
206
virtio_set_status(&virtiodev, VIRTIO_STAT_FAILED);
209
virtio_reset_device(&virtiodev);
220
static int virtionet_xmit(char *buf, int len)
223
static struct virtio_net_hdr_v1 nethdr_v1;
224
static struct virtio_net_hdr nethdr_legacy;
225
void *nethdr = &nethdr_legacy;
227
if (len > BUFFER_ENTRY_SIZE) {
228
printf("virtionet: Packet too big!\n");
232
dprintf("\nvirtionet_xmit(packet at %p, %d bytes)\n", buf, len);
234
if (virtiodev.is_modern)
237
memset(nethdr, 0, net_hdr_size);
239
/* Determine descriptor index */
240
idx = virtio_modern16_to_cpu(&virtiodev, vq_tx.avail->idx);
241
id = (idx * 2) % vq_tx.size;
243
/* Set up virtqueue descriptor for header */
244
virtio_fill_desc(&vq_tx.desc[id], virtiodev.is_modern, (uint64_t)nethdr,
245
net_hdr_size, VRING_DESC_F_NEXT, id + 1);
247
/* Set up virtqueue descriptor for data */
248
virtio_fill_desc(&vq_tx.desc[id+1], virtiodev.is_modern, (uint64_t)buf, len, 0, 0);
250
vq_tx.avail->ring[idx % vq_tx.size] = virtio_cpu_to_modern16(&virtiodev, id);
252
vq_tx.avail->idx = virtio_cpu_to_modern16(&virtiodev, idx + 1);
255
/* Tell HV that TX queue is ready */
256
virtio_queue_notify(&virtiodev, VQ_TX);
265
static int virtionet_receive(char *buf, int maxlen)
271
idx = virtio_modern16_to_cpu(&virtiodev, vq_rx.used->idx);
273
if (last_rx_idx == idx) {
274
/* Nothing received yet */
278
id = (virtio_modern32_to_cpu(&virtiodev, vq_rx.used->ring[last_rx_idx % vq_rx.size].id) + 1)
280
len = virtio_modern32_to_cpu(&virtiodev, vq_rx.used->ring[last_rx_idx % vq_rx.size].len)
282
dprintf("virtionet_receive() last_rx_idx=%i, vq_rx.used->idx=%i,"
283
" id=%i len=%i\n", last_rx_idx, vq_rx.used->idx, id, len);
285
if (len > (uint32_t)maxlen) {
286
printf("virtio-net: Receive buffer not big enough!\n");
294
for (i=0; i<64; i++) {
295
printf(" %02x", *(uint8_t*)(vq_rx.desc[id].addr+i));
302
/* Copy data to destination buffer */
303
memcpy(buf, (void *)virtio_modern64_to_cpu(&virtiodev, vq_rx.desc[id].addr), len);
305
/* Move indices to next entries */
306
last_rx_idx = last_rx_idx + 1;
308
avail_idx = virtio_modern16_to_cpu(&virtiodev, vq_rx.avail->idx);
309
vq_rx.avail->ring[avail_idx % vq_rx.size] = virtio_cpu_to_modern16(&virtiodev, id - 1);
311
vq_rx.avail->idx = virtio_cpu_to_modern16(&virtiodev, avail_idx + 1);
313
/* Tell HV that RX queue entry is ready */
314
virtio_queue_notify(&virtiodev, VQ_RX);
319
net_driver_t *virtionet_open(struct virtio_device *dev)
321
net_driver_t *driver;
323
driver = SLOF_alloc_mem(sizeof(*driver));
325
printf("Unable to allocate virtio-net driver\n");
331
if (virtionet_init_pci(dev))
334
if (virtionet_init(driver))
339
FAIL: SLOF_free_mem(driver, sizeof(*driver));
343
void virtionet_close(net_driver_t *driver)
346
virtionet_term(driver);
347
SLOF_free_mem(driver, sizeof(*driver));
351
int virtionet_read(char *buf, int len)
354
return virtionet_receive(buf, len);
358
int virtionet_write(char *buf, int len)
361
return virtionet_xmit(buf, len);