192
194
virtio_net_set_status(vdev, vdev->status);
197
static void rxfilter_notify(NetClientState *nc)
200
VirtIONet *n = qemu_get_nic_opaque(nc);
202
if (nc->rxfilter_notify_enabled) {
203
if (n->netclient_name) {
204
event_data = qobject_from_jsonf("{ 'name': %s, 'path': %s }",
206
object_get_canonical_path(OBJECT(n->qdev)));
208
event_data = qobject_from_jsonf("{ 'path': %s }",
209
object_get_canonical_path(OBJECT(n->qdev)));
211
monitor_protocol_event(QEVENT_NIC_RX_FILTER_CHANGED, event_data);
212
qobject_decref(event_data);
214
/* disable event notification to avoid events flooding */
215
nc->rxfilter_notify_enabled = 0;
219
static char *mac_strdup_printf(const uint8_t *mac)
221
return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac[0],
222
mac[1], mac[2], mac[3], mac[4], mac[5]);
225
static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
227
VirtIONet *n = qemu_get_nic_opaque(nc);
229
strList *str_list, *entry;
230
intList *int_list, *int_entry;
233
info = g_malloc0(sizeof(*info));
234
info->name = g_strdup(nc->name);
235
info->promiscuous = n->promisc;
238
info->unicast = RX_STATE_NONE;
239
} else if (n->alluni) {
240
info->unicast = RX_STATE_ALL;
242
info->unicast = RX_STATE_NORMAL;
246
info->multicast = RX_STATE_NONE;
247
} else if (n->allmulti) {
248
info->multicast = RX_STATE_ALL;
250
info->multicast = RX_STATE_NORMAL;
253
info->broadcast_allowed = n->nobcast;
254
info->multicast_overflow = n->mac_table.multi_overflow;
255
info->unicast_overflow = n->mac_table.uni_overflow;
257
info->main_mac = mac_strdup_printf(n->mac);
260
for (i = 0; i < n->mac_table.first_multi; i++) {
261
entry = g_malloc0(sizeof(*entry));
262
entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
263
entry->next = str_list;
266
info->unicast_table = str_list;
269
for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
270
entry = g_malloc0(sizeof(*entry));
271
entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
272
entry->next = str_list;
275
info->multicast_table = str_list;
278
for (i = 0; i < MAX_VLAN >> 5; i++) {
279
for (j = 0; n->vlans[i] && j < 0x1f; j++) {
280
if (n->vlans[i] & (1U << j)) {
281
int_entry = g_malloc0(sizeof(*int_entry));
282
int_entry->value = (i << 5) + j;
283
int_entry->next = int_list;
284
int_list = int_entry;
288
info->vlan_table = int_list;
290
/* enable event notification after query */
291
nc->rxfilter_notify_enabled = 1;
195
296
static void virtio_net_reset(VirtIODevice *vdev)
197
298
VirtIONet *n = VIRTIO_NET(vdev);
463
static void virtio_net_apply_guest_offloads(VirtIONet *n)
465
tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer,
466
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
467
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
468
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
469
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
470
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
473
static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
475
static const uint64_t guest_offloads_mask =
476
(1ULL << VIRTIO_NET_F_GUEST_CSUM) |
477
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
478
(1ULL << VIRTIO_NET_F_GUEST_TSO6) |
479
(1ULL << VIRTIO_NET_F_GUEST_ECN) |
480
(1ULL << VIRTIO_NET_F_GUEST_UFO);
482
return guest_offloads_mask & features;
485
static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
487
VirtIODevice *vdev = VIRTIO_DEVICE(n);
488
return virtio_net_guest_offloads_by_features(vdev->guest_features);
362
491
static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
364
493
VirtIONet *n = VIRTIO_NET(vdev);
369
498
virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
371
500
if (n->has_vnet_hdr) {
372
tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer,
373
(features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
374
(features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
375
(features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
376
(features >> VIRTIO_NET_F_GUEST_ECN) & 1,
377
(features >> VIRTIO_NET_F_GUEST_UFO) & 1);
501
n->curr_guest_offloads =
502
virtio_net_guest_offloads_by_features(features);
503
virtio_net_apply_guest_offloads(n);
380
506
for (i = 0; i < n->max_queues; i++) {
417
544
return VIRTIO_NET_ERR;
420
549
return VIRTIO_NET_OK;
552
static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
553
struct iovec *iov, unsigned int iov_cnt)
555
VirtIODevice *vdev = VIRTIO_DEVICE(n);
559
if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) {
560
return VIRTIO_NET_ERR;
563
s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
564
if (s != sizeof(offloads)) {
565
return VIRTIO_NET_ERR;
568
if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
569
uint64_t supported_offloads;
571
if (!n->has_vnet_hdr) {
572
return VIRTIO_NET_ERR;
575
supported_offloads = virtio_net_supported_guest_offloads(n);
576
if (offloads & ~supported_offloads) {
577
return VIRTIO_NET_ERR;
580
n->curr_guest_offloads = offloads;
581
virtio_net_apply_guest_offloads(n);
583
return VIRTIO_NET_OK;
585
return VIRTIO_NET_ERR;
423
589
static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
424
590
struct iovec *iov, unsigned int iov_cnt)
426
592
struct virtio_net_ctrl_mac mac_data;
594
NetClientState *nc = qemu_get_queue(n->nic);
429
596
if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
430
597
if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
450
619
sizeof(mac_data.entries));
451
620
mac_data.entries = ldl_p(&mac_data.entries);
452
621
if (s != sizeof(mac_data.entries)) {
453
return VIRTIO_NET_ERR;
455
624
iov_discard_front(&iov, &iov_cnt, s);
457
626
if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
458
return VIRTIO_NET_ERR;
461
630
if (mac_data.entries <= MAC_TABLE_ENTRIES) {
462
631
s = iov_to_buf(iov, iov_cnt, 0, n->mac_table.macs,
463
632
mac_data.entries * ETH_ALEN);
464
633
if (s != mac_data.entries * ETH_ALEN) {
465
return VIRTIO_NET_ERR;
467
636
n->mac_table.in_use += mac_data.entries;
477
646
sizeof(mac_data.entries));
478
647
mac_data.entries = ldl_p(&mac_data.entries);
479
648
if (s != sizeof(mac_data.entries)) {
480
return VIRTIO_NET_ERR;
483
652
iov_discard_front(&iov, &iov_cnt, s);
485
654
if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
486
return VIRTIO_NET_ERR;
489
658
if (n->mac_table.in_use + mac_data.entries <= MAC_TABLE_ENTRIES) {
490
659
s = iov_to_buf(iov, iov_cnt, 0, n->mac_table.macs,
491
660
mac_data.entries * ETH_ALEN);
492
661
if (s != mac_data.entries * ETH_ALEN) {
493
return VIRTIO_NET_ERR;
495
664
n->mac_table.in_use += mac_data.entries;
497
666
n->mac_table.multi_overflow = 1;
500
671
return VIRTIO_NET_OK;
675
return VIRTIO_NET_ERR;
503
678
static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
590
768
status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
591
769
} else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
592
770
status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
771
} else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
772
status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
595
775
s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
1167
1351
error_report("virtio-net: saved image requires vnet_hdr=on");
1171
if (n->has_vnet_hdr) {
1172
tap_set_offload(qemu_get_queue(n->nic)->peer,
1173
(vdev->guest_features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
1174
(vdev->guest_features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
1175
(vdev->guest_features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
1176
(vdev->guest_features >> VIRTIO_NET_F_GUEST_ECN) & 1,
1177
(vdev->guest_features >> VIRTIO_NET_F_GUEST_UFO) & 1);
1181
1356
if (version_id >= 9) {