1 /* 2 * Virtio Network Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/iov.h" 16 #include "hw/virtio/virtio.h" 17 #include "net/net.h" 18 #include "net/checksum.h" 19 #include "net/tap.h" 20 #include "qemu/error-report.h" 21 #include "qemu/timer.h" 22 #include "hw/virtio/virtio-net.h" 23 #include "net/vhost_net.h" 24 #include "hw/virtio/virtio-bus.h" 25 #include "qapi/qmp/qjson.h" 26 #include "qapi-event.h" 27 #include "hw/virtio/virtio-access.h" 28 #include "migration/misc.h" 29 30 #define VIRTIO_NET_VM_VERSION 11 31 32 #define MAC_TABLE_ENTRIES 64 33 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ 34 35 /* previously fixed value */ 36 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256 37 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256 38 39 /* for now, only allow larger queues; with virtio-1, guest can downsize */ 40 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 41 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 42 43 /* 44 * Calculate the number of bytes up to and including the given 'field' of 45 * 'container'. 46 */ 47 #define endof(container, field) \ 48 (offsetof(container, field) + sizeof(((container *)0)->field)) 49 50 typedef struct VirtIOFeature { 51 uint32_t flags; 52 size_t end; 53 } VirtIOFeature; 54 55 static VirtIOFeature feature_sizes[] = { 56 {.flags = 1 << VIRTIO_NET_F_MAC, 57 .end = endof(struct virtio_net_config, mac)}, 58 {.flags = 1 << VIRTIO_NET_F_STATUS, 59 .end = endof(struct virtio_net_config, status)}, 60 {.flags = 1 << VIRTIO_NET_F_MQ, 61 .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, 62 {.flags = 1 << VIRTIO_NET_F_MTU, 63 .end = endof(struct virtio_net_config, mtu)}, 64 {} 65 }; 66 67 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) 68 { 69 VirtIONet *n = qemu_get_nic_opaque(nc); 70 71 return &n->vqs[nc->queue_index]; 72 } 73 74 static int vq2q(int queue_index) 75 { 76 return queue_index / 2; 77 } 78 79 /* TODO 80 * - we could suppress RX interrupt if we were so inclined. 81 */ 82 83 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) 84 { 85 VirtIONet *n = VIRTIO_NET(vdev); 86 struct virtio_net_config netcfg; 87 88 virtio_stw_p(vdev, &netcfg.status, n->status); 89 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); 90 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu); 91 memcpy(netcfg.mac, n->mac, ETH_ALEN); 92 memcpy(config, &netcfg, n->config_size); 93 } 94 95 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) 96 { 97 VirtIONet *n = VIRTIO_NET(vdev); 98 struct virtio_net_config netcfg = {}; 99 100 memcpy(&netcfg, config, n->config_size); 101 102 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) && 103 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) && 104 memcmp(netcfg.mac, n->mac, ETH_ALEN)) { 105 memcpy(n->mac, netcfg.mac, ETH_ALEN); 106 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 107 } 108 } 109 110 static bool virtio_net_started(VirtIONet *n, uint8_t status) 111 { 112 VirtIODevice *vdev = VIRTIO_DEVICE(n); 113 return (status & VIRTIO_CONFIG_S_DRIVER_OK) && 114 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; 115 } 116 117 static void virtio_net_announce_timer(void *opaque) 118 { 119 VirtIONet *n = opaque; 120 VirtIODevice *vdev = VIRTIO_DEVICE(n); 121 122 n->announce_counter--; 123 n->status |= VIRTIO_NET_S_ANNOUNCE; 124 virtio_notify_config(vdev); 125 } 126 127 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) 128 { 129 VirtIODevice *vdev = VIRTIO_DEVICE(n); 130 NetClientState *nc = qemu_get_queue(n->nic); 131 int queues = n->multiqueue ? n->max_queues : 1; 132 133 if (!get_vhost_net(nc->peer)) { 134 return; 135 } 136 137 if ((virtio_net_started(n, status) && !nc->peer->link_down) == 138 !!n->vhost_started) { 139 return; 140 } 141 if (!n->vhost_started) { 142 int r, i; 143 144 if (n->needs_vnet_hdr_swap) { 145 error_report("backend does not support %s vnet headers; " 146 "falling back on userspace virtio", 147 virtio_is_big_endian(vdev) ? "BE" : "LE"); 148 return; 149 } 150 151 /* Any packets outstanding? Purge them to avoid touching rings 152 * when vhost is running. 153 */ 154 for (i = 0; i < queues; i++) { 155 NetClientState *qnc = qemu_get_subqueue(n->nic, i); 156 157 /* Purge both directions: TX and RX. */ 158 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc); 159 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer); 160 } 161 162 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) { 163 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu); 164 if (r < 0) { 165 error_report("%uBytes MTU not supported by the backend", 166 n->net_conf.mtu); 167 168 return; 169 } 170 } 171 172 n->vhost_started = 1; 173 r = vhost_net_start(vdev, n->nic->ncs, queues); 174 if (r < 0) { 175 error_report("unable to start vhost net: %d: " 176 "falling back on userspace virtio", -r); 177 n->vhost_started = 0; 178 } 179 } else { 180 vhost_net_stop(vdev, n->nic->ncs, queues); 181 n->vhost_started = 0; 182 } 183 } 184 185 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev, 186 NetClientState *peer, 187 bool enable) 188 { 189 if (virtio_is_big_endian(vdev)) { 190 return qemu_set_vnet_be(peer, enable); 191 } else { 192 return qemu_set_vnet_le(peer, enable); 193 } 194 } 195 196 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, 197 int queues, bool enable) 198 { 199 int i; 200 201 for (i = 0; i < queues; i++) { 202 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 && 203 enable) { 204 while (--i >= 0) { 205 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false); 206 } 207 208 return true; 209 } 210 } 211 212 return false; 213 } 214 215 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) 216 { 217 VirtIODevice *vdev = VIRTIO_DEVICE(n); 218 int queues = n->multiqueue ? n->max_queues : 1; 219 220 if (virtio_net_started(n, status)) { 221 /* Before using the device, we tell the network backend about the 222 * endianness to use when parsing vnet headers. If the backend 223 * can't do it, we fallback onto fixing the headers in the core 224 * virtio-net code. 225 */ 226 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs, 227 queues, true); 228 } else if (virtio_net_started(n, vdev->status)) { 229 /* After using the device, we need to reset the network backend to 230 * the default (guest native endianness), otherwise the guest may 231 * lose network connectivity if it is rebooted into a different 232 * endianness. 233 */ 234 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false); 235 } 236 } 237 238 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq) 239 { 240 unsigned int dropped = virtqueue_drop_all(vq); 241 if (dropped) { 242 virtio_notify(vdev, vq); 243 } 244 } 245 246 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) 247 { 248 VirtIONet *n = VIRTIO_NET(vdev); 249 VirtIONetQueue *q; 250 int i; 251 uint8_t queue_status; 252 253 virtio_net_vnet_endian_status(n, status); 254 virtio_net_vhost_status(n, status); 255 256 for (i = 0; i < n->max_queues; i++) { 257 NetClientState *ncs = qemu_get_subqueue(n->nic, i); 258 bool queue_started; 259 q = &n->vqs[i]; 260 261 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { 262 queue_status = 0; 263 } else { 264 queue_status = status; 265 } 266 queue_started = 267 virtio_net_started(n, queue_status) && !n->vhost_started; 268 269 if (queue_started) { 270 qemu_flush_queued_packets(ncs); 271 } 272 273 if (!q->tx_waiting) { 274 continue; 275 } 276 277 if (queue_started) { 278 if (q->tx_timer) { 279 timer_mod(q->tx_timer, 280 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 281 } else { 282 qemu_bh_schedule(q->tx_bh); 283 } 284 } else { 285 if (q->tx_timer) { 286 timer_del(q->tx_timer); 287 } else { 288 qemu_bh_cancel(q->tx_bh); 289 } 290 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 && 291 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK)) { 292 /* if tx is waiting we are likely have some packets in tx queue 293 * and disabled notification */ 294 q->tx_waiting = 0; 295 virtio_queue_set_notification(q->tx_vq, 1); 296 virtio_net_drop_tx_queue_data(vdev, q->tx_vq); 297 } 298 } 299 } 300 } 301 302 static void virtio_net_set_link_status(NetClientState *nc) 303 { 304 VirtIONet *n = qemu_get_nic_opaque(nc); 305 VirtIODevice *vdev = VIRTIO_DEVICE(n); 306 uint16_t old_status = n->status; 307 308 if (nc->link_down) 309 n->status &= ~VIRTIO_NET_S_LINK_UP; 310 else 311 n->status |= VIRTIO_NET_S_LINK_UP; 312 313 if (n->status != old_status) 314 virtio_notify_config(vdev); 315 316 virtio_net_set_status(vdev, vdev->status); 317 } 318 319 static void rxfilter_notify(NetClientState *nc) 320 { 321 VirtIONet *n = qemu_get_nic_opaque(nc); 322 323 if (nc->rxfilter_notify_enabled) { 324 gchar *path = object_get_canonical_path(OBJECT(n->qdev)); 325 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name, 326 n->netclient_name, path, &error_abort); 327 g_free(path); 328 329 /* disable event notification to avoid events flooding */ 330 nc->rxfilter_notify_enabled = 0; 331 } 332 } 333 334 static intList *get_vlan_table(VirtIONet *n) 335 { 336 intList *list, *entry; 337 int i, j; 338 339 list = NULL; 340 for (i = 0; i < MAX_VLAN >> 5; i++) { 341 for (j = 0; n->vlans[i] && j <= 0x1f; j++) { 342 if (n->vlans[i] & (1U << j)) { 343 entry = g_malloc0(sizeof(*entry)); 344 entry->value = (i << 5) + j; 345 entry->next = list; 346 list = entry; 347 } 348 } 349 } 350 351 return list; 352 } 353 354 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) 355 { 356 VirtIONet *n = qemu_get_nic_opaque(nc); 357 VirtIODevice *vdev = VIRTIO_DEVICE(n); 358 RxFilterInfo *info; 359 strList *str_list, *entry; 360 int i; 361 362 info = g_malloc0(sizeof(*info)); 363 info->name = g_strdup(nc->name); 364 info->promiscuous = n->promisc; 365 366 if (n->nouni) { 367 info->unicast = RX_STATE_NONE; 368 } else if (n->alluni) { 369 info->unicast = RX_STATE_ALL; 370 } else { 371 info->unicast = RX_STATE_NORMAL; 372 } 373 374 if (n->nomulti) { 375 info->multicast = RX_STATE_NONE; 376 } else if (n->allmulti) { 377 info->multicast = RX_STATE_ALL; 378 } else { 379 info->multicast = RX_STATE_NORMAL; 380 } 381 382 info->broadcast_allowed = n->nobcast; 383 info->multicast_overflow = n->mac_table.multi_overflow; 384 info->unicast_overflow = n->mac_table.uni_overflow; 385 386 info->main_mac = qemu_mac_strdup_printf(n->mac); 387 388 str_list = NULL; 389 for (i = 0; i < n->mac_table.first_multi; i++) { 390 entry = g_malloc0(sizeof(*entry)); 391 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 392 entry->next = str_list; 393 str_list = entry; 394 } 395 info->unicast_table = str_list; 396 397 str_list = NULL; 398 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 399 entry = g_malloc0(sizeof(*entry)); 400 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 401 entry->next = str_list; 402 str_list = entry; 403 } 404 info->multicast_table = str_list; 405 info->vlan_table = get_vlan_table(n); 406 407 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) { 408 info->vlan = RX_STATE_ALL; 409 } else if (!info->vlan_table) { 410 info->vlan = RX_STATE_NONE; 411 } else { 412 info->vlan = RX_STATE_NORMAL; 413 } 414 415 /* enable event notification after query */ 416 nc->rxfilter_notify_enabled = 1; 417 418 return info; 419 } 420 421 static void virtio_net_reset(VirtIODevice *vdev) 422 { 423 VirtIONet *n = VIRTIO_NET(vdev); 424 425 /* Reset back to compatibility mode */ 426 n->promisc = 1; 427 n->allmulti = 0; 428 n->alluni = 0; 429 n->nomulti = 0; 430 n->nouni = 0; 431 n->nobcast = 0; 432 /* multiqueue is disabled by default */ 433 n->curr_queues = 1; 434 timer_del(n->announce_timer); 435 n->announce_counter = 0; 436 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 437 438 /* Flush any MAC and VLAN filter table state */ 439 n->mac_table.in_use = 0; 440 n->mac_table.first_multi = 0; 441 n->mac_table.multi_overflow = 0; 442 n->mac_table.uni_overflow = 0; 443 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 444 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); 445 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 446 memset(n->vlans, 0, MAX_VLAN >> 3); 447 } 448 449 static void peer_test_vnet_hdr(VirtIONet *n) 450 { 451 NetClientState *nc = qemu_get_queue(n->nic); 452 if (!nc->peer) { 453 return; 454 } 455 456 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer); 457 } 458 459 static int peer_has_vnet_hdr(VirtIONet *n) 460 { 461 return n->has_vnet_hdr; 462 } 463 464 static int peer_has_ufo(VirtIONet *n) 465 { 466 if (!peer_has_vnet_hdr(n)) 467 return 0; 468 469 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer); 470 471 return n->has_ufo; 472 } 473 474 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, 475 int version_1) 476 { 477 int i; 478 NetClientState *nc; 479 480 n->mergeable_rx_bufs = mergeable_rx_bufs; 481 482 if (version_1) { 483 n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 484 } else { 485 n->guest_hdr_len = n->mergeable_rx_bufs ? 486 sizeof(struct virtio_net_hdr_mrg_rxbuf) : 487 sizeof(struct virtio_net_hdr); 488 } 489 490 for (i = 0; i < n->max_queues; i++) { 491 nc = qemu_get_subqueue(n->nic, i); 492 493 if (peer_has_vnet_hdr(n) && 494 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { 495 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); 496 n->host_hdr_len = n->guest_hdr_len; 497 } 498 } 499 } 500 501 static int virtio_net_max_tx_queue_size(VirtIONet *n) 502 { 503 NetClientState *peer = n->nic_conf.peers.ncs[0]; 504 505 /* 506 * Backends other than vhost-user don't support max queue size. 507 */ 508 if (!peer) { 509 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; 510 } 511 512 if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) { 513 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; 514 } 515 516 return VIRTQUEUE_MAX_SIZE; 517 } 518 519 static int peer_attach(VirtIONet *n, int index) 520 { 521 NetClientState *nc = qemu_get_subqueue(n->nic, index); 522 523 if (!nc->peer) { 524 return 0; 525 } 526 527 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { 528 vhost_set_vring_enable(nc->peer, 1); 529 } 530 531 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { 532 return 0; 533 } 534 535 if (n->max_queues == 1) { 536 return 0; 537 } 538 539 return tap_enable(nc->peer); 540 } 541 542 static int peer_detach(VirtIONet *n, int index) 543 { 544 NetClientState *nc = qemu_get_subqueue(n->nic, index); 545 546 if (!nc->peer) { 547 return 0; 548 } 549 550 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { 551 vhost_set_vring_enable(nc->peer, 0); 552 } 553 554 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { 555 return 0; 556 } 557 558 return tap_disable(nc->peer); 559 } 560 561 static void virtio_net_set_queues(VirtIONet *n) 562 { 563 int i; 564 int r; 565 566 if (n->nic->peer_deleted) { 567 return; 568 } 569 570 for (i = 0; i < n->max_queues; i++) { 571 if (i < n->curr_queues) { 572 r = peer_attach(n, i); 573 assert(!r); 574 } else { 575 r = peer_detach(n, i); 576 assert(!r); 577 } 578 } 579 } 580 581 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); 582 583 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, 584 Error **errp) 585 { 586 VirtIONet *n = VIRTIO_NET(vdev); 587 NetClientState *nc = qemu_get_queue(n->nic); 588 589 /* Firstly sync all virtio-net possible supported features */ 590 features |= n->host_features; 591 592 virtio_add_feature(&features, VIRTIO_NET_F_MAC); 593 594 if (!peer_has_vnet_hdr(n)) { 595 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM); 596 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4); 597 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6); 598 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN); 599 600 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM); 601 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4); 602 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); 603 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); 604 } 605 606 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { 607 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO); 608 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); 609 } 610 611 if (!get_vhost_net(nc->peer)) { 612 return features; 613 } 614 features = vhost_net_get_features(get_vhost_net(nc->peer), features); 615 vdev->backend_features = features; 616 617 if (n->mtu_bypass_backend && 618 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) { 619 features |= (1ULL << VIRTIO_NET_F_MTU); 620 } 621 622 return features; 623 } 624 625 static uint64_t virtio_net_bad_features(VirtIODevice *vdev) 626 { 627 uint64_t features = 0; 628 629 /* Linux kernel 2.6.25. It understood MAC (as everyone must), 630 * but also these: */ 631 virtio_add_feature(&features, VIRTIO_NET_F_MAC); 632 virtio_add_feature(&features, VIRTIO_NET_F_CSUM); 633 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4); 634 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6); 635 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN); 636 637 return features; 638 } 639 640 static void virtio_net_apply_guest_offloads(VirtIONet *n) 641 { 642 qemu_set_offload(qemu_get_queue(n->nic)->peer, 643 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), 644 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), 645 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), 646 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), 647 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); 648 } 649 650 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) 651 { 652 static const uint64_t guest_offloads_mask = 653 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | 654 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 655 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | 656 (1ULL << VIRTIO_NET_F_GUEST_ECN) | 657 (1ULL << VIRTIO_NET_F_GUEST_UFO); 658 659 return guest_offloads_mask & features; 660 } 661 662 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) 663 { 664 VirtIODevice *vdev = VIRTIO_DEVICE(n); 665 return virtio_net_guest_offloads_by_features(vdev->guest_features); 666 } 667 668 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) 669 { 670 VirtIONet *n = VIRTIO_NET(vdev); 671 int i; 672 673 if (n->mtu_bypass_backend && 674 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) { 675 features &= ~(1ULL << VIRTIO_NET_F_MTU); 676 } 677 678 virtio_net_set_multiqueue(n, 679 virtio_has_feature(features, VIRTIO_NET_F_MQ)); 680 681 virtio_net_set_mrg_rx_bufs(n, 682 virtio_has_feature(features, 683 VIRTIO_NET_F_MRG_RXBUF), 684 virtio_has_feature(features, 685 VIRTIO_F_VERSION_1)); 686 687 if (n->has_vnet_hdr) { 688 n->curr_guest_offloads = 689 virtio_net_guest_offloads_by_features(features); 690 virtio_net_apply_guest_offloads(n); 691 } 692 693 for (i = 0; i < n->max_queues; i++) { 694 NetClientState *nc = qemu_get_subqueue(n->nic, i); 695 696 if (!get_vhost_net(nc->peer)) { 697 continue; 698 } 699 vhost_net_ack_features(get_vhost_net(nc->peer), features); 700 } 701 702 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) { 703 memset(n->vlans, 0, MAX_VLAN >> 3); 704 } else { 705 memset(n->vlans, 0xff, MAX_VLAN >> 3); 706 } 707 } 708 709 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, 710 struct iovec *iov, unsigned int iov_cnt) 711 { 712 uint8_t on; 713 size_t s; 714 NetClientState *nc = qemu_get_queue(n->nic); 715 716 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); 717 if (s != sizeof(on)) { 718 return VIRTIO_NET_ERR; 719 } 720 721 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { 722 n->promisc = on; 723 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { 724 n->allmulti = on; 725 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { 726 n->alluni = on; 727 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { 728 n->nomulti = on; 729 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { 730 n->nouni = on; 731 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { 732 n->nobcast = on; 733 } else { 734 return VIRTIO_NET_ERR; 735 } 736 737 rxfilter_notify(nc); 738 739 return VIRTIO_NET_OK; 740 } 741 742 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, 743 struct iovec *iov, unsigned int iov_cnt) 744 { 745 VirtIODevice *vdev = VIRTIO_DEVICE(n); 746 uint64_t offloads; 747 size_t s; 748 749 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 750 return VIRTIO_NET_ERR; 751 } 752 753 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); 754 if (s != sizeof(offloads)) { 755 return VIRTIO_NET_ERR; 756 } 757 758 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { 759 uint64_t supported_offloads; 760 761 offloads = virtio_ldq_p(vdev, &offloads); 762 763 if (!n->has_vnet_hdr) { 764 return VIRTIO_NET_ERR; 765 } 766 767 supported_offloads = virtio_net_supported_guest_offloads(n); 768 if (offloads & ~supported_offloads) { 769 return VIRTIO_NET_ERR; 770 } 771 772 n->curr_guest_offloads = offloads; 773 virtio_net_apply_guest_offloads(n); 774 775 return VIRTIO_NET_OK; 776 } else { 777 return VIRTIO_NET_ERR; 778 } 779 } 780 781 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, 782 struct iovec *iov, unsigned int iov_cnt) 783 { 784 VirtIODevice *vdev = VIRTIO_DEVICE(n); 785 struct virtio_net_ctrl_mac mac_data; 786 size_t s; 787 NetClientState *nc = qemu_get_queue(n->nic); 788 789 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { 790 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { 791 return VIRTIO_NET_ERR; 792 } 793 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); 794 assert(s == sizeof(n->mac)); 795 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 796 rxfilter_notify(nc); 797 798 return VIRTIO_NET_OK; 799 } 800 801 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { 802 return VIRTIO_NET_ERR; 803 } 804 805 int in_use = 0; 806 int first_multi = 0; 807 uint8_t uni_overflow = 0; 808 uint8_t multi_overflow = 0; 809 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 810 811 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 812 sizeof(mac_data.entries)); 813 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 814 if (s != sizeof(mac_data.entries)) { 815 goto error; 816 } 817 iov_discard_front(&iov, &iov_cnt, s); 818 819 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { 820 goto error; 821 } 822 823 if (mac_data.entries <= MAC_TABLE_ENTRIES) { 824 s = iov_to_buf(iov, iov_cnt, 0, macs, 825 mac_data.entries * ETH_ALEN); 826 if (s != mac_data.entries * ETH_ALEN) { 827 goto error; 828 } 829 in_use += mac_data.entries; 830 } else { 831 uni_overflow = 1; 832 } 833 834 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); 835 836 first_multi = in_use; 837 838 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 839 sizeof(mac_data.entries)); 840 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 841 if (s != sizeof(mac_data.entries)) { 842 goto error; 843 } 844 845 iov_discard_front(&iov, &iov_cnt, s); 846 847 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { 848 goto error; 849 } 850 851 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) { 852 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN], 853 mac_data.entries * ETH_ALEN); 854 if (s != mac_data.entries * ETH_ALEN) { 855 goto error; 856 } 857 in_use += mac_data.entries; 858 } else { 859 multi_overflow = 1; 860 } 861 862 n->mac_table.in_use = in_use; 863 n->mac_table.first_multi = first_multi; 864 n->mac_table.uni_overflow = uni_overflow; 865 n->mac_table.multi_overflow = multi_overflow; 866 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN); 867 g_free(macs); 868 rxfilter_notify(nc); 869 870 return VIRTIO_NET_OK; 871 872 error: 873 g_free(macs); 874 return VIRTIO_NET_ERR; 875 } 876 877 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, 878 struct iovec *iov, unsigned int iov_cnt) 879 { 880 VirtIODevice *vdev = VIRTIO_DEVICE(n); 881 uint16_t vid; 882 size_t s; 883 NetClientState *nc = qemu_get_queue(n->nic); 884 885 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); 886 vid = virtio_lduw_p(vdev, &vid); 887 if (s != sizeof(vid)) { 888 return VIRTIO_NET_ERR; 889 } 890 891 if (vid >= MAX_VLAN) 892 return VIRTIO_NET_ERR; 893 894 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) 895 n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); 896 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) 897 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); 898 else 899 return VIRTIO_NET_ERR; 900 901 rxfilter_notify(nc); 902 903 return VIRTIO_NET_OK; 904 } 905 906 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd, 907 struct iovec *iov, unsigned int iov_cnt) 908 { 909 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK && 910 n->status & VIRTIO_NET_S_ANNOUNCE) { 911 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 912 if (n->announce_counter) { 913 timer_mod(n->announce_timer, 914 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 915 self_announce_delay(n->announce_counter)); 916 } 917 return VIRTIO_NET_OK; 918 } else { 919 return VIRTIO_NET_ERR; 920 } 921 } 922 923 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, 924 struct iovec *iov, unsigned int iov_cnt) 925 { 926 VirtIODevice *vdev = VIRTIO_DEVICE(n); 927 struct virtio_net_ctrl_mq mq; 928 size_t s; 929 uint16_t queues; 930 931 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); 932 if (s != sizeof(mq)) { 933 return VIRTIO_NET_ERR; 934 } 935 936 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 937 return VIRTIO_NET_ERR; 938 } 939 940 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs); 941 942 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 943 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 944 queues > n->max_queues || 945 !n->multiqueue) { 946 return VIRTIO_NET_ERR; 947 } 948 949 n->curr_queues = queues; 950 /* stop the backend before changing the number of queues to avoid handling a 951 * disabled queue */ 952 virtio_net_set_status(vdev, vdev->status); 953 virtio_net_set_queues(n); 954 955 return VIRTIO_NET_OK; 956 } 957 958 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 959 { 960 VirtIONet *n = VIRTIO_NET(vdev); 961 struct virtio_net_ctrl_hdr ctrl; 962 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 963 VirtQueueElement *elem; 964 size_t s; 965 struct iovec *iov, *iov2; 966 unsigned int iov_cnt; 967 968 for (;;) { 969 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 970 if (!elem) { 971 break; 972 } 973 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || 974 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { 975 virtio_error(vdev, "virtio-net ctrl missing headers"); 976 virtqueue_detach_element(vq, elem, 0); 977 g_free(elem); 978 break; 979 } 980 981 iov_cnt = elem->out_num; 982 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num); 983 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); 984 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); 985 if (s != sizeof(ctrl)) { 986 status = VIRTIO_NET_ERR; 987 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { 988 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); 989 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { 990 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); 991 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { 992 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); 993 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { 994 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); 995 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { 996 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); 997 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { 998 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); 999 } 1000 1001 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); 1002 assert(s == sizeof(status)); 1003 1004 virtqueue_push(vq, elem, sizeof(status)); 1005 virtio_notify(vdev, vq); 1006 g_free(iov2); 1007 g_free(elem); 1008 } 1009 } 1010 1011 /* RX */ 1012 1013 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) 1014 { 1015 VirtIONet *n = VIRTIO_NET(vdev); 1016 int queue_index = vq2q(virtio_get_queue_index(vq)); 1017 1018 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); 1019 } 1020 1021 static int virtio_net_can_receive(NetClientState *nc) 1022 { 1023 VirtIONet *n = qemu_get_nic_opaque(nc); 1024 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1025 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1026 1027 if (!vdev->vm_running) { 1028 return 0; 1029 } 1030 1031 if (nc->queue_index >= n->curr_queues) { 1032 return 0; 1033 } 1034 1035 if (!virtio_queue_ready(q->rx_vq) || 1036 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1037 return 0; 1038 } 1039 1040 return 1; 1041 } 1042 1043 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) 1044 { 1045 VirtIONet *n = q->n; 1046 if (virtio_queue_empty(q->rx_vq) || 1047 (n->mergeable_rx_bufs && 1048 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 1049 virtio_queue_set_notification(q->rx_vq, 1); 1050 1051 /* To avoid a race condition where the guest has made some buffers 1052 * available after the above check but before notification was 1053 * enabled, check for available buffers again. 1054 */ 1055 if (virtio_queue_empty(q->rx_vq) || 1056 (n->mergeable_rx_bufs && 1057 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 1058 return 0; 1059 } 1060 } 1061 1062 virtio_queue_set_notification(q->rx_vq, 0); 1063 return 1; 1064 } 1065 1066 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr) 1067 { 1068 virtio_tswap16s(vdev, &hdr->hdr_len); 1069 virtio_tswap16s(vdev, &hdr->gso_size); 1070 virtio_tswap16s(vdev, &hdr->csum_start); 1071 virtio_tswap16s(vdev, &hdr->csum_offset); 1072 } 1073 1074 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so 1075 * it never finds out that the packets don't have valid checksums. This 1076 * causes dhclient to get upset. Fedora's carried a patch for ages to 1077 * fix this with Xen but it hasn't appeared in an upstream release of 1078 * dhclient yet. 1079 * 1080 * To avoid breaking existing guests, we catch udp packets and add 1081 * checksums. This is terrible but it's better than hacking the guest 1082 * kernels. 1083 * 1084 * N.B. if we introduce a zero-copy API, this operation is no longer free so 1085 * we should provide a mechanism to disable it to avoid polluting the host 1086 * cache. 1087 */ 1088 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, 1089 uint8_t *buf, size_t size) 1090 { 1091 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ 1092 (size > 27 && size < 1500) && /* normal sized MTU */ 1093 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ 1094 (buf[23] == 17) && /* ip.protocol == UDP */ 1095 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ 1096 net_checksum_calculate(buf, size); 1097 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; 1098 } 1099 } 1100 1101 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, 1102 const void *buf, size_t size) 1103 { 1104 if (n->has_vnet_hdr) { 1105 /* FIXME this cast is evil */ 1106 void *wbuf = (void *)buf; 1107 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, 1108 size - n->host_hdr_len); 1109 1110 if (n->needs_vnet_hdr_swap) { 1111 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf); 1112 } 1113 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); 1114 } else { 1115 struct virtio_net_hdr hdr = { 1116 .flags = 0, 1117 .gso_type = VIRTIO_NET_HDR_GSO_NONE 1118 }; 1119 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); 1120 } 1121 } 1122 1123 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) 1124 { 1125 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1126 static const uint8_t vlan[] = {0x81, 0x00}; 1127 uint8_t *ptr = (uint8_t *)buf; 1128 int i; 1129 1130 if (n->promisc) 1131 return 1; 1132 1133 ptr += n->host_hdr_len; 1134 1135 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { 1136 int vid = lduw_be_p(ptr + 14) & 0xfff; 1137 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) 1138 return 0; 1139 } 1140 1141 if (ptr[0] & 1) { // multicast 1142 if (!memcmp(ptr, bcast, sizeof(bcast))) { 1143 return !n->nobcast; 1144 } else if (n->nomulti) { 1145 return 0; 1146 } else if (n->allmulti || n->mac_table.multi_overflow) { 1147 return 1; 1148 } 1149 1150 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 1151 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 1152 return 1; 1153 } 1154 } 1155 } else { // unicast 1156 if (n->nouni) { 1157 return 0; 1158 } else if (n->alluni || n->mac_table.uni_overflow) { 1159 return 1; 1160 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { 1161 return 1; 1162 } 1163 1164 for (i = 0; i < n->mac_table.first_multi; i++) { 1165 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 1166 return 1; 1167 } 1168 } 1169 } 1170 1171 return 0; 1172 } 1173 1174 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, 1175 size_t size) 1176 { 1177 VirtIONet *n = qemu_get_nic_opaque(nc); 1178 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1179 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1180 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; 1181 struct virtio_net_hdr_mrg_rxbuf mhdr; 1182 unsigned mhdr_cnt = 0; 1183 size_t offset, i, guest_offset; 1184 1185 if (!virtio_net_can_receive(nc)) { 1186 return -1; 1187 } 1188 1189 /* hdr_len refers to the header we supply to the guest */ 1190 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { 1191 return 0; 1192 } 1193 1194 if (!receive_filter(n, buf, size)) 1195 return size; 1196 1197 offset = i = 0; 1198 1199 while (offset < size) { 1200 VirtQueueElement *elem; 1201 int len, total; 1202 const struct iovec *sg; 1203 1204 total = 0; 1205 1206 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); 1207 if (!elem) { 1208 if (i) { 1209 virtio_error(vdev, "virtio-net unexpected empty queue: " 1210 "i %zd mergeable %d offset %zd, size %zd, " 1211 "guest hdr len %zd, host hdr len %zd " 1212 "guest features 0x%" PRIx64, 1213 i, n->mergeable_rx_bufs, offset, size, 1214 n->guest_hdr_len, n->host_hdr_len, 1215 vdev->guest_features); 1216 } 1217 return -1; 1218 } 1219 1220 if (elem->in_num < 1) { 1221 virtio_error(vdev, 1222 "virtio-net receive queue contains no in buffers"); 1223 virtqueue_detach_element(q->rx_vq, elem, 0); 1224 g_free(elem); 1225 return -1; 1226 } 1227 1228 sg = elem->in_sg; 1229 if (i == 0) { 1230 assert(offset == 0); 1231 if (n->mergeable_rx_bufs) { 1232 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), 1233 sg, elem->in_num, 1234 offsetof(typeof(mhdr), num_buffers), 1235 sizeof(mhdr.num_buffers)); 1236 } 1237 1238 receive_header(n, sg, elem->in_num, buf, size); 1239 offset = n->host_hdr_len; 1240 total += n->guest_hdr_len; 1241 guest_offset = n->guest_hdr_len; 1242 } else { 1243 guest_offset = 0; 1244 } 1245 1246 /* copy in packet. ugh */ 1247 len = iov_from_buf(sg, elem->in_num, guest_offset, 1248 buf + offset, size - offset); 1249 total += len; 1250 offset += len; 1251 /* If buffers can't be merged, at this point we 1252 * must have consumed the complete packet. 1253 * Otherwise, drop it. */ 1254 if (!n->mergeable_rx_bufs && offset < size) { 1255 virtqueue_unpop(q->rx_vq, elem, total); 1256 g_free(elem); 1257 return size; 1258 } 1259 1260 /* signal other side */ 1261 virtqueue_fill(q->rx_vq, elem, total, i++); 1262 g_free(elem); 1263 } 1264 1265 if (mhdr_cnt) { 1266 virtio_stw_p(vdev, &mhdr.num_buffers, i); 1267 iov_from_buf(mhdr_sg, mhdr_cnt, 1268 0, 1269 &mhdr.num_buffers, sizeof mhdr.num_buffers); 1270 } 1271 1272 virtqueue_flush(q->rx_vq, i); 1273 virtio_notify(vdev, q->rx_vq); 1274 1275 return size; 1276 } 1277 1278 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, 1279 size_t size) 1280 { 1281 ssize_t r; 1282 1283 rcu_read_lock(); 1284 r = virtio_net_receive_rcu(nc, buf, size); 1285 rcu_read_unlock(); 1286 return r; 1287 } 1288 1289 static int32_t virtio_net_flush_tx(VirtIONetQueue *q); 1290 1291 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) 1292 { 1293 VirtIONet *n = qemu_get_nic_opaque(nc); 1294 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1295 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1296 1297 virtqueue_push(q->tx_vq, q->async_tx.elem, 0); 1298 virtio_notify(vdev, q->tx_vq); 1299 1300 g_free(q->async_tx.elem); 1301 q->async_tx.elem = NULL; 1302 1303 virtio_queue_set_notification(q->tx_vq, 1); 1304 virtio_net_flush_tx(q); 1305 } 1306 1307 /* TX */ 1308 static int32_t virtio_net_flush_tx(VirtIONetQueue *q) 1309 { 1310 VirtIONet *n = q->n; 1311 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1312 VirtQueueElement *elem; 1313 int32_t num_packets = 0; 1314 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); 1315 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1316 return num_packets; 1317 } 1318 1319 if (q->async_tx.elem) { 1320 virtio_queue_set_notification(q->tx_vq, 0); 1321 return num_packets; 1322 } 1323 1324 for (;;) { 1325 ssize_t ret; 1326 unsigned int out_num; 1327 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg; 1328 struct virtio_net_hdr_mrg_rxbuf mhdr; 1329 1330 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement)); 1331 if (!elem) { 1332 break; 1333 } 1334 1335 out_num = elem->out_num; 1336 out_sg = elem->out_sg; 1337 if (out_num < 1) { 1338 virtio_error(vdev, "virtio-net header not in first element"); 1339 virtqueue_detach_element(q->tx_vq, elem, 0); 1340 g_free(elem); 1341 return -EINVAL; 1342 } 1343 1344 if (n->has_vnet_hdr) { 1345 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < 1346 n->guest_hdr_len) { 1347 virtio_error(vdev, "virtio-net header incorrect"); 1348 virtqueue_detach_element(q->tx_vq, elem, 0); 1349 g_free(elem); 1350 return -EINVAL; 1351 } 1352 if (n->needs_vnet_hdr_swap) { 1353 virtio_net_hdr_swap(vdev, (void *) &mhdr); 1354 sg2[0].iov_base = &mhdr; 1355 sg2[0].iov_len = n->guest_hdr_len; 1356 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, 1357 out_sg, out_num, 1358 n->guest_hdr_len, -1); 1359 if (out_num == VIRTQUEUE_MAX_SIZE) { 1360 goto drop; 1361 } 1362 out_num += 1; 1363 out_sg = sg2; 1364 } 1365 } 1366 /* 1367 * If host wants to see the guest header as is, we can 1368 * pass it on unchanged. Otherwise, copy just the parts 1369 * that host is interested in. 1370 */ 1371 assert(n->host_hdr_len <= n->guest_hdr_len); 1372 if (n->host_hdr_len != n->guest_hdr_len) { 1373 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), 1374 out_sg, out_num, 1375 0, n->host_hdr_len); 1376 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, 1377 out_sg, out_num, 1378 n->guest_hdr_len, -1); 1379 out_num = sg_num; 1380 out_sg = sg; 1381 } 1382 1383 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), 1384 out_sg, out_num, virtio_net_tx_complete); 1385 if (ret == 0) { 1386 virtio_queue_set_notification(q->tx_vq, 0); 1387 q->async_tx.elem = elem; 1388 return -EBUSY; 1389 } 1390 1391 drop: 1392 virtqueue_push(q->tx_vq, elem, 0); 1393 virtio_notify(vdev, q->tx_vq); 1394 g_free(elem); 1395 1396 if (++num_packets >= n->tx_burst) { 1397 break; 1398 } 1399 } 1400 return num_packets; 1401 } 1402 1403 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) 1404 { 1405 VirtIONet *n = VIRTIO_NET(vdev); 1406 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1407 1408 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { 1409 virtio_net_drop_tx_queue_data(vdev, vq); 1410 return; 1411 } 1412 1413 /* This happens when device was stopped but VCPU wasn't. */ 1414 if (!vdev->vm_running) { 1415 q->tx_waiting = 1; 1416 return; 1417 } 1418 1419 if (q->tx_waiting) { 1420 virtio_queue_set_notification(vq, 1); 1421 timer_del(q->tx_timer); 1422 q->tx_waiting = 0; 1423 if (virtio_net_flush_tx(q) == -EINVAL) { 1424 return; 1425 } 1426 } else { 1427 timer_mod(q->tx_timer, 1428 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 1429 q->tx_waiting = 1; 1430 virtio_queue_set_notification(vq, 0); 1431 } 1432 } 1433 1434 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) 1435 { 1436 VirtIONet *n = VIRTIO_NET(vdev); 1437 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1438 1439 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { 1440 virtio_net_drop_tx_queue_data(vdev, vq); 1441 return; 1442 } 1443 1444 if (unlikely(q->tx_waiting)) { 1445 return; 1446 } 1447 q->tx_waiting = 1; 1448 /* This happens when device was stopped but VCPU wasn't. */ 1449 if (!vdev->vm_running) { 1450 return; 1451 } 1452 virtio_queue_set_notification(vq, 0); 1453 qemu_bh_schedule(q->tx_bh); 1454 } 1455 1456 static void virtio_net_tx_timer(void *opaque) 1457 { 1458 VirtIONetQueue *q = opaque; 1459 VirtIONet *n = q->n; 1460 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1461 /* This happens when device was stopped but BH wasn't. */ 1462 if (!vdev->vm_running) { 1463 /* Make sure tx waiting is set, so we'll run when restarted. */ 1464 assert(q->tx_waiting); 1465 return; 1466 } 1467 1468 q->tx_waiting = 0; 1469 1470 /* Just in case the driver is not ready on more */ 1471 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1472 return; 1473 } 1474 1475 virtio_queue_set_notification(q->tx_vq, 1); 1476 virtio_net_flush_tx(q); 1477 } 1478 1479 static void virtio_net_tx_bh(void *opaque) 1480 { 1481 VirtIONetQueue *q = opaque; 1482 VirtIONet *n = q->n; 1483 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1484 int32_t ret; 1485 1486 /* This happens when device was stopped but BH wasn't. */ 1487 if (!vdev->vm_running) { 1488 /* Make sure tx waiting is set, so we'll run when restarted. */ 1489 assert(q->tx_waiting); 1490 return; 1491 } 1492 1493 q->tx_waiting = 0; 1494 1495 /* Just in case the driver is not ready on more */ 1496 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { 1497 return; 1498 } 1499 1500 ret = virtio_net_flush_tx(q); 1501 if (ret == -EBUSY || ret == -EINVAL) { 1502 return; /* Notification re-enable handled by tx_complete or device 1503 * broken */ 1504 } 1505 1506 /* If we flush a full burst of packets, assume there are 1507 * more coming and immediately reschedule */ 1508 if (ret >= n->tx_burst) { 1509 qemu_bh_schedule(q->tx_bh); 1510 q->tx_waiting = 1; 1511 return; 1512 } 1513 1514 /* If less than a full burst, re-enable notification and flush 1515 * anything that may have come in while we weren't looking. If 1516 * we find something, assume the guest is still active and reschedule */ 1517 virtio_queue_set_notification(q->tx_vq, 1); 1518 ret = virtio_net_flush_tx(q); 1519 if (ret == -EINVAL) { 1520 return; 1521 } else if (ret > 0) { 1522 virtio_queue_set_notification(q->tx_vq, 0); 1523 qemu_bh_schedule(q->tx_bh); 1524 q->tx_waiting = 1; 1525 } 1526 } 1527 1528 static void virtio_net_add_queue(VirtIONet *n, int index) 1529 { 1530 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1531 1532 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size, 1533 virtio_net_handle_rx); 1534 1535 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { 1536 n->vqs[index].tx_vq = 1537 virtio_add_queue(vdev, n->net_conf.tx_queue_size, 1538 virtio_net_handle_tx_timer); 1539 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1540 virtio_net_tx_timer, 1541 &n->vqs[index]); 1542 } else { 1543 n->vqs[index].tx_vq = 1544 virtio_add_queue(vdev, n->net_conf.tx_queue_size, 1545 virtio_net_handle_tx_bh); 1546 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); 1547 } 1548 1549 n->vqs[index].tx_waiting = 0; 1550 n->vqs[index].n = n; 1551 } 1552 1553 static void virtio_net_del_queue(VirtIONet *n, int index) 1554 { 1555 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1556 VirtIONetQueue *q = &n->vqs[index]; 1557 NetClientState *nc = qemu_get_subqueue(n->nic, index); 1558 1559 qemu_purge_queued_packets(nc); 1560 1561 virtio_del_queue(vdev, index * 2); 1562 if (q->tx_timer) { 1563 timer_del(q->tx_timer); 1564 timer_free(q->tx_timer); 1565 q->tx_timer = NULL; 1566 } else { 1567 qemu_bh_delete(q->tx_bh); 1568 q->tx_bh = NULL; 1569 } 1570 q->tx_waiting = 0; 1571 virtio_del_queue(vdev, index * 2 + 1); 1572 } 1573 1574 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) 1575 { 1576 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1577 int old_num_queues = virtio_get_num_queues(vdev); 1578 int new_num_queues = new_max_queues * 2 + 1; 1579 int i; 1580 1581 assert(old_num_queues >= 3); 1582 assert(old_num_queues % 2 == 1); 1583 1584 if (old_num_queues == new_num_queues) { 1585 return; 1586 } 1587 1588 /* 1589 * We always need to remove and add ctrl vq if 1590 * old_num_queues != new_num_queues. Remove ctrl_vq first, 1591 * and then we only enter one of the following too loops. 1592 */ 1593 virtio_del_queue(vdev, old_num_queues - 1); 1594 1595 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) { 1596 /* new_num_queues < old_num_queues */ 1597 virtio_net_del_queue(n, i / 2); 1598 } 1599 1600 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) { 1601 /* new_num_queues > old_num_queues */ 1602 virtio_net_add_queue(n, i / 2); 1603 } 1604 1605 /* add ctrl_vq last */ 1606 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1607 } 1608 1609 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) 1610 { 1611 int max = multiqueue ? n->max_queues : 1; 1612 1613 n->multiqueue = multiqueue; 1614 virtio_net_change_num_queues(n, max); 1615 1616 virtio_net_set_queues(n); 1617 } 1618 1619 static int virtio_net_post_load_device(void *opaque, int version_id) 1620 { 1621 VirtIONet *n = opaque; 1622 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1623 int i, link_down; 1624 1625 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs, 1626 virtio_vdev_has_feature(vdev, 1627 VIRTIO_F_VERSION_1)); 1628 1629 /* MAC_TABLE_ENTRIES may be different from the saved image */ 1630 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) { 1631 n->mac_table.in_use = 0; 1632 } 1633 1634 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 1635 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); 1636 } 1637 1638 if (peer_has_vnet_hdr(n)) { 1639 virtio_net_apply_guest_offloads(n); 1640 } 1641 1642 virtio_net_set_queues(n); 1643 1644 /* Find the first multicast entry in the saved MAC filter */ 1645 for (i = 0; i < n->mac_table.in_use; i++) { 1646 if (n->mac_table.macs[i * ETH_ALEN] & 1) { 1647 break; 1648 } 1649 } 1650 n->mac_table.first_multi = i; 1651 1652 /* nc.link_down can't be migrated, so infer link_down according 1653 * to link status bit in n->status */ 1654 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; 1655 for (i = 0; i < n->max_queues; i++) { 1656 qemu_get_subqueue(n->nic, i)->link_down = link_down; 1657 } 1658 1659 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && 1660 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { 1661 n->announce_counter = SELF_ANNOUNCE_ROUNDS; 1662 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL)); 1663 } 1664 1665 return 0; 1666 } 1667 1668 /* tx_waiting field of a VirtIONetQueue */ 1669 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { 1670 .name = "virtio-net-queue-tx_waiting", 1671 .fields = (VMStateField[]) { 1672 VMSTATE_UINT32(tx_waiting, VirtIONetQueue), 1673 VMSTATE_END_OF_LIST() 1674 }, 1675 }; 1676 1677 static bool max_queues_gt_1(void *opaque, int version_id) 1678 { 1679 return VIRTIO_NET(opaque)->max_queues > 1; 1680 } 1681 1682 static bool has_ctrl_guest_offloads(void *opaque, int version_id) 1683 { 1684 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque), 1685 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); 1686 } 1687 1688 static bool mac_table_fits(void *opaque, int version_id) 1689 { 1690 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES; 1691 } 1692 1693 static bool mac_table_doesnt_fit(void *opaque, int version_id) 1694 { 1695 return !mac_table_fits(opaque, version_id); 1696 } 1697 1698 /* This temporary type is shared by all the WITH_TMP methods 1699 * although only some fields are used by each. 1700 */ 1701 struct VirtIONetMigTmp { 1702 VirtIONet *parent; 1703 VirtIONetQueue *vqs_1; 1704 uint16_t curr_queues_1; 1705 uint8_t has_ufo; 1706 uint32_t has_vnet_hdr; 1707 }; 1708 1709 /* The 2nd and subsequent tx_waiting flags are loaded later than 1710 * the 1st entry in the queues and only if there's more than one 1711 * entry. We use the tmp mechanism to calculate a temporary 1712 * pointer and count and also validate the count. 1713 */ 1714 1715 static void virtio_net_tx_waiting_pre_save(void *opaque) 1716 { 1717 struct VirtIONetMigTmp *tmp = opaque; 1718 1719 tmp->vqs_1 = tmp->parent->vqs + 1; 1720 tmp->curr_queues_1 = tmp->parent->curr_queues - 1; 1721 if (tmp->parent->curr_queues == 0) { 1722 tmp->curr_queues_1 = 0; 1723 } 1724 } 1725 1726 static int virtio_net_tx_waiting_pre_load(void *opaque) 1727 { 1728 struct VirtIONetMigTmp *tmp = opaque; 1729 1730 /* Reuse the pointer setup from save */ 1731 virtio_net_tx_waiting_pre_save(opaque); 1732 1733 if (tmp->parent->curr_queues > tmp->parent->max_queues) { 1734 error_report("virtio-net: curr_queues %x > max_queues %x", 1735 tmp->parent->curr_queues, tmp->parent->max_queues); 1736 1737 return -EINVAL; 1738 } 1739 1740 return 0; /* all good */ 1741 } 1742 1743 static const VMStateDescription vmstate_virtio_net_tx_waiting = { 1744 .name = "virtio-net-tx_waiting", 1745 .pre_load = virtio_net_tx_waiting_pre_load, 1746 .pre_save = virtio_net_tx_waiting_pre_save, 1747 .fields = (VMStateField[]) { 1748 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, 1749 curr_queues_1, 1750 vmstate_virtio_net_queue_tx_waiting, 1751 struct VirtIONetQueue), 1752 VMSTATE_END_OF_LIST() 1753 }, 1754 }; 1755 1756 /* the 'has_ufo' flag is just tested; if the incoming stream has the 1757 * flag set we need to check that we have it 1758 */ 1759 static int virtio_net_ufo_post_load(void *opaque, int version_id) 1760 { 1761 struct VirtIONetMigTmp *tmp = opaque; 1762 1763 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) { 1764 error_report("virtio-net: saved image requires TUN_F_UFO support"); 1765 return -EINVAL; 1766 } 1767 1768 return 0; 1769 } 1770 1771 static void virtio_net_ufo_pre_save(void *opaque) 1772 { 1773 struct VirtIONetMigTmp *tmp = opaque; 1774 1775 tmp->has_ufo = tmp->parent->has_ufo; 1776 } 1777 1778 static const VMStateDescription vmstate_virtio_net_has_ufo = { 1779 .name = "virtio-net-ufo", 1780 .post_load = virtio_net_ufo_post_load, 1781 .pre_save = virtio_net_ufo_pre_save, 1782 .fields = (VMStateField[]) { 1783 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp), 1784 VMSTATE_END_OF_LIST() 1785 }, 1786 }; 1787 1788 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the 1789 * flag set we need to check that we have it 1790 */ 1791 static int virtio_net_vnet_post_load(void *opaque, int version_id) 1792 { 1793 struct VirtIONetMigTmp *tmp = opaque; 1794 1795 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) { 1796 error_report("virtio-net: saved image requires vnet_hdr=on"); 1797 return -EINVAL; 1798 } 1799 1800 return 0; 1801 } 1802 1803 static void virtio_net_vnet_pre_save(void *opaque) 1804 { 1805 struct VirtIONetMigTmp *tmp = opaque; 1806 1807 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr; 1808 } 1809 1810 static const VMStateDescription vmstate_virtio_net_has_vnet = { 1811 .name = "virtio-net-vnet", 1812 .post_load = virtio_net_vnet_post_load, 1813 .pre_save = virtio_net_vnet_pre_save, 1814 .fields = (VMStateField[]) { 1815 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp), 1816 VMSTATE_END_OF_LIST() 1817 }, 1818 }; 1819 1820 static const VMStateDescription vmstate_virtio_net_device = { 1821 .name = "virtio-net-device", 1822 .version_id = VIRTIO_NET_VM_VERSION, 1823 .minimum_version_id = VIRTIO_NET_VM_VERSION, 1824 .post_load = virtio_net_post_load_device, 1825 .fields = (VMStateField[]) { 1826 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN), 1827 VMSTATE_STRUCT_POINTER(vqs, VirtIONet, 1828 vmstate_virtio_net_queue_tx_waiting, 1829 VirtIONetQueue), 1830 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet), 1831 VMSTATE_UINT16(status, VirtIONet), 1832 VMSTATE_UINT8(promisc, VirtIONet), 1833 VMSTATE_UINT8(allmulti, VirtIONet), 1834 VMSTATE_UINT32(mac_table.in_use, VirtIONet), 1835 1836 /* Guarded pair: If it fits we load it, else we throw it away 1837 * - can happen if source has a larger MAC table.; post-load 1838 * sets flags in this case. 1839 */ 1840 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet, 1841 0, mac_table_fits, mac_table.in_use, 1842 ETH_ALEN), 1843 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0, 1844 mac_table.in_use, ETH_ALEN), 1845 1846 /* Note: This is an array of uint32's that's always been saved as a 1847 * buffer; hold onto your endiannesses; it's actually used as a bitmap 1848 * but based on the uint. 1849 */ 1850 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3), 1851 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, 1852 vmstate_virtio_net_has_vnet), 1853 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet), 1854 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet), 1855 VMSTATE_UINT8(alluni, VirtIONet), 1856 VMSTATE_UINT8(nomulti, VirtIONet), 1857 VMSTATE_UINT8(nouni, VirtIONet), 1858 VMSTATE_UINT8(nobcast, VirtIONet), 1859 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, 1860 vmstate_virtio_net_has_ufo), 1861 VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0, 1862 vmstate_info_uint16_equal, uint16_t), 1863 VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1), 1864 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, 1865 vmstate_virtio_net_tx_waiting), 1866 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, 1867 has_ctrl_guest_offloads), 1868 VMSTATE_END_OF_LIST() 1869 }, 1870 }; 1871 1872 static NetClientInfo net_virtio_info = { 1873 .type = NET_CLIENT_DRIVER_NIC, 1874 .size = sizeof(NICState), 1875 .can_receive = virtio_net_can_receive, 1876 .receive = virtio_net_receive, 1877 .link_status_changed = virtio_net_set_link_status, 1878 .query_rx_filter = virtio_net_query_rxfilter, 1879 }; 1880 1881 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) 1882 { 1883 VirtIONet *n = VIRTIO_NET(vdev); 1884 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1885 assert(n->vhost_started); 1886 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); 1887 } 1888 1889 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, 1890 bool mask) 1891 { 1892 VirtIONet *n = VIRTIO_NET(vdev); 1893 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1894 assert(n->vhost_started); 1895 vhost_net_virtqueue_mask(get_vhost_net(nc->peer), 1896 vdev, idx, mask); 1897 } 1898 1899 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) 1900 { 1901 int i, config_size = 0; 1902 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); 1903 1904 for (i = 0; feature_sizes[i].flags != 0; i++) { 1905 if (host_features & feature_sizes[i].flags) { 1906 config_size = MAX(feature_sizes[i].end, config_size); 1907 } 1908 } 1909 n->config_size = config_size; 1910 } 1911 1912 void virtio_net_set_netclient_name(VirtIONet *n, const char *name, 1913 const char *type) 1914 { 1915 /* 1916 * The name can be NULL, the netclient name will be type.x. 1917 */ 1918 assert(type != NULL); 1919 1920 g_free(n->netclient_name); 1921 g_free(n->netclient_type); 1922 n->netclient_name = g_strdup(name); 1923 n->netclient_type = g_strdup(type); 1924 } 1925 1926 static void virtio_net_device_realize(DeviceState *dev, Error **errp) 1927 { 1928 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1929 VirtIONet *n = VIRTIO_NET(dev); 1930 NetClientState *nc; 1931 int i; 1932 1933 if (n->net_conf.mtu) { 1934 n->host_features |= (0x1 << VIRTIO_NET_F_MTU); 1935 } 1936 1937 virtio_net_set_config_size(n, n->host_features); 1938 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); 1939 1940 /* 1941 * We set a lower limit on RX queue size to what it always was. 1942 * Guests that want a smaller ring can always resize it without 1943 * help from us (using virtio 1 and up). 1944 */ 1945 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE || 1946 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE || 1947 !is_power_of_2(n->net_conf.rx_queue_size)) { 1948 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), " 1949 "must be a power of 2 between %d and %d.", 1950 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE, 1951 VIRTQUEUE_MAX_SIZE); 1952 virtio_cleanup(vdev); 1953 return; 1954 } 1955 1956 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE || 1957 n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE || 1958 !is_power_of_2(n->net_conf.tx_queue_size)) { 1959 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), " 1960 "must be a power of 2 between %d and %d", 1961 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE, 1962 VIRTQUEUE_MAX_SIZE); 1963 virtio_cleanup(vdev); 1964 return; 1965 } 1966 1967 n->max_queues = MAX(n->nic_conf.peers.queues, 1); 1968 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) { 1969 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " 1970 "must be a positive integer less than %d.", 1971 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2); 1972 virtio_cleanup(vdev); 1973 return; 1974 } 1975 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); 1976 n->curr_queues = 1; 1977 n->tx_timeout = n->net_conf.txtimer; 1978 1979 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") 1980 && strcmp(n->net_conf.tx, "bh")) { 1981 error_report("virtio-net: " 1982 "Unknown option tx=%s, valid options: \"timer\" \"bh\"", 1983 n->net_conf.tx); 1984 error_report("Defaulting to \"bh\""); 1985 } 1986 1987 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n), 1988 n->net_conf.tx_queue_size); 1989 1990 for (i = 0; i < n->max_queues; i++) { 1991 virtio_net_add_queue(n, i); 1992 } 1993 1994 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1995 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); 1996 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); 1997 n->status = VIRTIO_NET_S_LINK_UP; 1998 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 1999 virtio_net_announce_timer, n); 2000 2001 if (n->netclient_type) { 2002 /* 2003 * Happen when virtio_net_set_netclient_name has been called. 2004 */ 2005 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 2006 n->netclient_type, n->netclient_name, n); 2007 } else { 2008 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 2009 object_get_typename(OBJECT(dev)), dev->id, n); 2010 } 2011 2012 peer_test_vnet_hdr(n); 2013 if (peer_has_vnet_hdr(n)) { 2014 for (i = 0; i < n->max_queues; i++) { 2015 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); 2016 } 2017 n->host_hdr_len = sizeof(struct virtio_net_hdr); 2018 } else { 2019 n->host_hdr_len = 0; 2020 } 2021 2022 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); 2023 2024 n->vqs[0].tx_waiting = 0; 2025 n->tx_burst = n->net_conf.txburst; 2026 virtio_net_set_mrg_rx_bufs(n, 0, 0); 2027 n->promisc = 1; /* for compatibility */ 2028 2029 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 2030 2031 n->vlans = g_malloc0(MAX_VLAN >> 3); 2032 2033 nc = qemu_get_queue(n->nic); 2034 nc->rxfilter_notify_enabled = 1; 2035 2036 n->qdev = dev; 2037 } 2038 2039 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) 2040 { 2041 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 2042 VirtIONet *n = VIRTIO_NET(dev); 2043 int i, max_queues; 2044 2045 /* This will stop vhost backend if appropriate. */ 2046 virtio_net_set_status(vdev, 0); 2047 2048 g_free(n->netclient_name); 2049 n->netclient_name = NULL; 2050 g_free(n->netclient_type); 2051 n->netclient_type = NULL; 2052 2053 g_free(n->mac_table.macs); 2054 g_free(n->vlans); 2055 2056 max_queues = n->multiqueue ? n->max_queues : 1; 2057 for (i = 0; i < max_queues; i++) { 2058 virtio_net_del_queue(n, i); 2059 } 2060 2061 timer_del(n->announce_timer); 2062 timer_free(n->announce_timer); 2063 g_free(n->vqs); 2064 qemu_del_nic(n->nic); 2065 virtio_cleanup(vdev); 2066 } 2067 2068 static void virtio_net_instance_init(Object *obj) 2069 { 2070 VirtIONet *n = VIRTIO_NET(obj); 2071 2072 /* 2073 * The default config_size is sizeof(struct virtio_net_config). 2074 * Can be overriden with virtio_net_set_config_size. 2075 */ 2076 n->config_size = sizeof(struct virtio_net_config); 2077 device_add_bootindex_property(obj, &n->nic_conf.bootindex, 2078 "bootindex", "/ethernet-phy@0", 2079 DEVICE(n), NULL); 2080 } 2081 2082 static void virtio_net_pre_save(void *opaque) 2083 { 2084 VirtIONet *n = opaque; 2085 2086 /* At this point, backend must be stopped, otherwise 2087 * it might keep writing to memory. */ 2088 assert(!n->vhost_started); 2089 } 2090 2091 static const VMStateDescription vmstate_virtio_net = { 2092 .name = "virtio-net", 2093 .minimum_version_id = VIRTIO_NET_VM_VERSION, 2094 .version_id = VIRTIO_NET_VM_VERSION, 2095 .fields = (VMStateField[]) { 2096 VMSTATE_VIRTIO_DEVICE, 2097 VMSTATE_END_OF_LIST() 2098 }, 2099 .pre_save = virtio_net_pre_save, 2100 }; 2101 2102 static Property virtio_net_properties[] = { 2103 DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true), 2104 DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features, 2105 VIRTIO_NET_F_GUEST_CSUM, true), 2106 DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true), 2107 DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features, 2108 VIRTIO_NET_F_GUEST_TSO4, true), 2109 DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features, 2110 VIRTIO_NET_F_GUEST_TSO6, true), 2111 DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features, 2112 VIRTIO_NET_F_GUEST_ECN, true), 2113 DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features, 2114 VIRTIO_NET_F_GUEST_UFO, true), 2115 DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features, 2116 VIRTIO_NET_F_GUEST_ANNOUNCE, true), 2117 DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features, 2118 VIRTIO_NET_F_HOST_TSO4, true), 2119 DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features, 2120 VIRTIO_NET_F_HOST_TSO6, true), 2121 DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features, 2122 VIRTIO_NET_F_HOST_ECN, true), 2123 DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features, 2124 VIRTIO_NET_F_HOST_UFO, true), 2125 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features, 2126 VIRTIO_NET_F_MRG_RXBUF, true), 2127 DEFINE_PROP_BIT("status", VirtIONet, host_features, 2128 VIRTIO_NET_F_STATUS, true), 2129 DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features, 2130 VIRTIO_NET_F_CTRL_VQ, true), 2131 DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features, 2132 VIRTIO_NET_F_CTRL_RX, true), 2133 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features, 2134 VIRTIO_NET_F_CTRL_VLAN, true), 2135 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features, 2136 VIRTIO_NET_F_CTRL_RX_EXTRA, true), 2137 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features, 2138 VIRTIO_NET_F_CTRL_MAC_ADDR, true), 2139 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features, 2140 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true), 2141 DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false), 2142 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), 2143 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, 2144 TX_TIMER_INTERVAL), 2145 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), 2146 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), 2147 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size, 2148 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE), 2149 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size, 2150 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE), 2151 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0), 2152 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend, 2153 true), 2154 DEFINE_PROP_END_OF_LIST(), 2155 }; 2156 2157 static void virtio_net_class_init(ObjectClass *klass, void *data) 2158 { 2159 DeviceClass *dc = DEVICE_CLASS(klass); 2160 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 2161 2162 dc->props = virtio_net_properties; 2163 dc->vmsd = &vmstate_virtio_net; 2164 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 2165 vdc->realize = virtio_net_device_realize; 2166 vdc->unrealize = virtio_net_device_unrealize; 2167 vdc->get_config = virtio_net_get_config; 2168 vdc->set_config = virtio_net_set_config; 2169 vdc->get_features = virtio_net_get_features; 2170 vdc->set_features = virtio_net_set_features; 2171 vdc->bad_features = virtio_net_bad_features; 2172 vdc->reset = virtio_net_reset; 2173 vdc->set_status = virtio_net_set_status; 2174 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; 2175 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; 2176 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO); 2177 vdc->vmsd = &vmstate_virtio_net_device; 2178 } 2179 2180 static const TypeInfo virtio_net_info = { 2181 .name = TYPE_VIRTIO_NET, 2182 .parent = TYPE_VIRTIO_DEVICE, 2183 .instance_size = sizeof(VirtIONet), 2184 .instance_init = virtio_net_instance_init, 2185 .class_init = virtio_net_class_init, 2186 }; 2187 2188 static void virtio_register_types(void) 2189 { 2190 type_register_static(&virtio_net_info); 2191 } 2192 2193 type_init(virtio_register_types) 2194