1 /* 2 * Virtio Network Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/atomic.h" 16 #include "qemu/iov.h" 17 #include "qemu/main-loop.h" 18 #include "qemu/module.h" 19 #include "hw/virtio/virtio.h" 20 #include "net/net.h" 21 #include "net/checksum.h" 22 #include "net/tap.h" 23 #include "qemu/error-report.h" 24 #include "qemu/timer.h" 25 #include "qemu/option.h" 26 #include "qemu/option_int.h" 27 #include "qemu/config-file.h" 28 #include "qapi/qmp/qdict.h" 29 #include "hw/virtio/virtio-net.h" 30 #include "net/vhost_net.h" 31 #include "net/announce.h" 32 #include "hw/virtio/virtio-bus.h" 33 #include "qapi/error.h" 34 #include "qapi/qapi-events-net.h" 35 #include "hw/qdev-properties.h" 36 #include "qapi/qapi-types-migration.h" 37 #include "qapi/qapi-events-migration.h" 38 #include "hw/virtio/virtio-access.h" 39 #include "migration/misc.h" 40 #include "standard-headers/linux/ethtool.h" 41 #include "sysemu/sysemu.h" 42 #include "trace.h" 43 #include "monitor/qdev.h" 44 #include "hw/pci/pci.h" 45 #include "net_rx_pkt.h" 46 #include "hw/virtio/vhost.h" 47 48 #define VIRTIO_NET_VM_VERSION 11 49 50 #define MAC_TABLE_ENTRIES 64 51 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ 52 53 /* previously fixed value */ 54 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256 55 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256 56 57 /* for now, only allow larger queues; with virtio-1, guest can downsize */ 58 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 59 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 60 61 #define VIRTIO_NET_IP4_ADDR_SIZE 8 /* ipv4 saddr + daddr */ 62 63 #define VIRTIO_NET_TCP_FLAG 0x3F 64 #define VIRTIO_NET_TCP_HDR_LENGTH 0xF000 65 66 /* IPv4 max payload, 16 bits in the header */ 67 #define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header)) 68 #define VIRTIO_NET_MAX_TCP_PAYLOAD 65535 69 70 /* header length value in ip header without option */ 71 #define VIRTIO_NET_IP4_HEADER_LENGTH 5 72 73 #define VIRTIO_NET_IP6_ADDR_SIZE 32 /* ipv6 saddr + daddr */ 74 #define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD 75 76 /* Purge coalesced packets timer interval, This value affects the performance 77 a lot, and should be tuned carefully, '300000'(300us) is the recommended 78 value to pass the WHQL test, '50000' can gain 2x netperf throughput with 79 tso/gso/gro 'off'. */ 80 #define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000 81 82 #define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \ 83 VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \ 84 VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \ 85 VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \ 86 VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \ 87 VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \ 88 VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \ 89 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \ 90 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) 91 92 static VirtIOFeature feature_sizes[] = { 93 {.flags = 1ULL << VIRTIO_NET_F_MAC, 94 .end = endof(struct virtio_net_config, mac)}, 95 {.flags = 1ULL << VIRTIO_NET_F_STATUS, 96 .end = endof(struct virtio_net_config, status)}, 97 {.flags = 1ULL << VIRTIO_NET_F_MQ, 98 .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, 99 {.flags = 1ULL << VIRTIO_NET_F_MTU, 100 .end = endof(struct virtio_net_config, mtu)}, 101 {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX, 102 .end = endof(struct virtio_net_config, duplex)}, 103 {.flags = (1ULL << VIRTIO_NET_F_RSS) | (1ULL << VIRTIO_NET_F_HASH_REPORT), 104 .end = endof(struct virtio_net_config, supported_hash_types)}, 105 {} 106 }; 107 108 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) 109 { 110 VirtIONet *n = qemu_get_nic_opaque(nc); 111 112 return &n->vqs[nc->queue_index]; 113 } 114 115 static int vq2q(int queue_index) 116 { 117 return queue_index / 2; 118 } 119 120 /* TODO 121 * - we could suppress RX interrupt if we were so inclined. 122 */ 123 124 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) 125 { 126 VirtIONet *n = VIRTIO_NET(vdev); 127 struct virtio_net_config netcfg; 128 NetClientState *nc = qemu_get_queue(n->nic); 129 130 int ret = 0; 131 memset(&netcfg, 0 , sizeof(struct virtio_net_config)); 132 virtio_stw_p(vdev, &netcfg.status, n->status); 133 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); 134 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu); 135 memcpy(netcfg.mac, n->mac, ETH_ALEN); 136 virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed); 137 netcfg.duplex = n->net_conf.duplex; 138 netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE; 139 virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length, 140 virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ? 141 VIRTIO_NET_RSS_MAX_TABLE_LEN : 1); 142 virtio_stl_p(vdev, &netcfg.supported_hash_types, 143 VIRTIO_NET_RSS_SUPPORTED_HASHES); 144 memcpy(config, &netcfg, n->config_size); 145 146 /* 147 * Is this VDPA? No peer means not VDPA: there's no way to 148 * disconnect/reconnect a VDPA peer. 149 */ 150 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { 151 ret = vhost_net_get_config(get_vhost_net(nc->peer), (uint8_t *)&netcfg, 152 n->config_size); 153 if (ret != -1) { 154 memcpy(config, &netcfg, n->config_size); 155 } 156 } 157 } 158 159 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) 160 { 161 VirtIONet *n = VIRTIO_NET(vdev); 162 struct virtio_net_config netcfg = {}; 163 NetClientState *nc = qemu_get_queue(n->nic); 164 165 memcpy(&netcfg, config, n->config_size); 166 167 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) && 168 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) && 169 memcmp(netcfg.mac, n->mac, ETH_ALEN)) { 170 memcpy(n->mac, netcfg.mac, ETH_ALEN); 171 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 172 } 173 174 /* 175 * Is this VDPA? No peer means not VDPA: there's no way to 176 * disconnect/reconnect a VDPA peer. 177 */ 178 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { 179 vhost_net_set_config(get_vhost_net(nc->peer), 180 (uint8_t *)&netcfg, 0, n->config_size, 181 VHOST_SET_CONFIG_TYPE_MASTER); 182 } 183 } 184 185 static bool virtio_net_started(VirtIONet *n, uint8_t status) 186 { 187 VirtIODevice *vdev = VIRTIO_DEVICE(n); 188 return (status & VIRTIO_CONFIG_S_DRIVER_OK) && 189 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; 190 } 191 192 static void virtio_net_announce_notify(VirtIONet *net) 193 { 194 VirtIODevice *vdev = VIRTIO_DEVICE(net); 195 trace_virtio_net_announce_notify(); 196 197 net->status |= VIRTIO_NET_S_ANNOUNCE; 198 virtio_notify_config(vdev); 199 } 200 201 static void virtio_net_announce_timer(void *opaque) 202 { 203 VirtIONet *n = opaque; 204 trace_virtio_net_announce_timer(n->announce_timer.round); 205 206 n->announce_timer.round--; 207 virtio_net_announce_notify(n); 208 } 209 210 static void virtio_net_announce(NetClientState *nc) 211 { 212 VirtIONet *n = qemu_get_nic_opaque(nc); 213 VirtIODevice *vdev = VIRTIO_DEVICE(n); 214 215 /* 216 * Make sure the virtio migration announcement timer isn't running 217 * If it is, let it trigger announcement so that we do not cause 218 * confusion. 219 */ 220 if (n->announce_timer.round) { 221 return; 222 } 223 224 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && 225 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { 226 virtio_net_announce_notify(n); 227 } 228 } 229 230 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) 231 { 232 VirtIODevice *vdev = VIRTIO_DEVICE(n); 233 NetClientState *nc = qemu_get_queue(n->nic); 234 int queues = n->multiqueue ? n->max_queues : 1; 235 236 if (!get_vhost_net(nc->peer)) { 237 return; 238 } 239 240 if ((virtio_net_started(n, status) && !nc->peer->link_down) == 241 !!n->vhost_started) { 242 return; 243 } 244 if (!n->vhost_started) { 245 int r, i; 246 247 if (n->needs_vnet_hdr_swap) { 248 error_report("backend does not support %s vnet headers; " 249 "falling back on userspace virtio", 250 virtio_is_big_endian(vdev) ? "BE" : "LE"); 251 return; 252 } 253 254 /* Any packets outstanding? Purge them to avoid touching rings 255 * when vhost is running. 256 */ 257 for (i = 0; i < queues; i++) { 258 NetClientState *qnc = qemu_get_subqueue(n->nic, i); 259 260 /* Purge both directions: TX and RX. */ 261 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc); 262 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer); 263 } 264 265 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) { 266 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu); 267 if (r < 0) { 268 error_report("%uBytes MTU not supported by the backend", 269 n->net_conf.mtu); 270 271 return; 272 } 273 } 274 275 n->vhost_started = 1; 276 r = vhost_net_start(vdev, n->nic->ncs, queues); 277 if (r < 0) { 278 error_report("unable to start vhost net: %d: " 279 "falling back on userspace virtio", -r); 280 n->vhost_started = 0; 281 } 282 } else { 283 vhost_net_stop(vdev, n->nic->ncs, queues); 284 n->vhost_started = 0; 285 } 286 } 287 288 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev, 289 NetClientState *peer, 290 bool enable) 291 { 292 if (virtio_is_big_endian(vdev)) { 293 return qemu_set_vnet_be(peer, enable); 294 } else { 295 return qemu_set_vnet_le(peer, enable); 296 } 297 } 298 299 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, 300 int queues, bool enable) 301 { 302 int i; 303 304 for (i = 0; i < queues; i++) { 305 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 && 306 enable) { 307 while (--i >= 0) { 308 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false); 309 } 310 311 return true; 312 } 313 } 314 315 return false; 316 } 317 318 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) 319 { 320 VirtIODevice *vdev = VIRTIO_DEVICE(n); 321 int queues = n->multiqueue ? n->max_queues : 1; 322 323 if (virtio_net_started(n, status)) { 324 /* Before using the device, we tell the network backend about the 325 * endianness to use when parsing vnet headers. If the backend 326 * can't do it, we fallback onto fixing the headers in the core 327 * virtio-net code. 328 */ 329 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs, 330 queues, true); 331 } else if (virtio_net_started(n, vdev->status)) { 332 /* After using the device, we need to reset the network backend to 333 * the default (guest native endianness), otherwise the guest may 334 * lose network connectivity if it is rebooted into a different 335 * endianness. 336 */ 337 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false); 338 } 339 } 340 341 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq) 342 { 343 unsigned int dropped = virtqueue_drop_all(vq); 344 if (dropped) { 345 virtio_notify(vdev, vq); 346 } 347 } 348 349 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) 350 { 351 VirtIONet *n = VIRTIO_NET(vdev); 352 VirtIONetQueue *q; 353 int i; 354 uint8_t queue_status; 355 356 virtio_net_vnet_endian_status(n, status); 357 virtio_net_vhost_status(n, status); 358 359 for (i = 0; i < n->max_queues; i++) { 360 NetClientState *ncs = qemu_get_subqueue(n->nic, i); 361 bool queue_started; 362 q = &n->vqs[i]; 363 364 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { 365 queue_status = 0; 366 } else { 367 queue_status = status; 368 } 369 queue_started = 370 virtio_net_started(n, queue_status) && !n->vhost_started; 371 372 if (queue_started) { 373 qemu_flush_queued_packets(ncs); 374 } 375 376 if (!q->tx_waiting) { 377 continue; 378 } 379 380 if (queue_started) { 381 if (q->tx_timer) { 382 timer_mod(q->tx_timer, 383 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 384 } else { 385 qemu_bh_schedule(q->tx_bh); 386 } 387 } else { 388 if (q->tx_timer) { 389 timer_del(q->tx_timer); 390 } else { 391 qemu_bh_cancel(q->tx_bh); 392 } 393 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 && 394 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) && 395 vdev->vm_running) { 396 /* if tx is waiting we are likely have some packets in tx queue 397 * and disabled notification */ 398 q->tx_waiting = 0; 399 virtio_queue_set_notification(q->tx_vq, 1); 400 virtio_net_drop_tx_queue_data(vdev, q->tx_vq); 401 } 402 } 403 } 404 } 405 406 static void virtio_net_set_link_status(NetClientState *nc) 407 { 408 VirtIONet *n = qemu_get_nic_opaque(nc); 409 VirtIODevice *vdev = VIRTIO_DEVICE(n); 410 uint16_t old_status = n->status; 411 412 if (nc->link_down) 413 n->status &= ~VIRTIO_NET_S_LINK_UP; 414 else 415 n->status |= VIRTIO_NET_S_LINK_UP; 416 417 if (n->status != old_status) 418 virtio_notify_config(vdev); 419 420 virtio_net_set_status(vdev, vdev->status); 421 } 422 423 static void rxfilter_notify(NetClientState *nc) 424 { 425 VirtIONet *n = qemu_get_nic_opaque(nc); 426 427 if (nc->rxfilter_notify_enabled) { 428 char *path = object_get_canonical_path(OBJECT(n->qdev)); 429 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name, 430 n->netclient_name, path); 431 g_free(path); 432 433 /* disable event notification to avoid events flooding */ 434 nc->rxfilter_notify_enabled = 0; 435 } 436 } 437 438 static intList *get_vlan_table(VirtIONet *n) 439 { 440 intList *list; 441 int i, j; 442 443 list = NULL; 444 for (i = 0; i < MAX_VLAN >> 5; i++) { 445 for (j = 0; n->vlans[i] && j <= 0x1f; j++) { 446 if (n->vlans[i] & (1U << j)) { 447 QAPI_LIST_PREPEND(list, (i << 5) + j); 448 } 449 } 450 } 451 452 return list; 453 } 454 455 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) 456 { 457 VirtIONet *n = qemu_get_nic_opaque(nc); 458 VirtIODevice *vdev = VIRTIO_DEVICE(n); 459 RxFilterInfo *info; 460 strList *str_list; 461 int i; 462 463 info = g_malloc0(sizeof(*info)); 464 info->name = g_strdup(nc->name); 465 info->promiscuous = n->promisc; 466 467 if (n->nouni) { 468 info->unicast = RX_STATE_NONE; 469 } else if (n->alluni) { 470 info->unicast = RX_STATE_ALL; 471 } else { 472 info->unicast = RX_STATE_NORMAL; 473 } 474 475 if (n->nomulti) { 476 info->multicast = RX_STATE_NONE; 477 } else if (n->allmulti) { 478 info->multicast = RX_STATE_ALL; 479 } else { 480 info->multicast = RX_STATE_NORMAL; 481 } 482 483 info->broadcast_allowed = n->nobcast; 484 info->multicast_overflow = n->mac_table.multi_overflow; 485 info->unicast_overflow = n->mac_table.uni_overflow; 486 487 info->main_mac = qemu_mac_strdup_printf(n->mac); 488 489 str_list = NULL; 490 for (i = 0; i < n->mac_table.first_multi; i++) { 491 QAPI_LIST_PREPEND(str_list, 492 qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN)); 493 } 494 info->unicast_table = str_list; 495 496 str_list = NULL; 497 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 498 QAPI_LIST_PREPEND(str_list, 499 qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN)); 500 } 501 info->multicast_table = str_list; 502 info->vlan_table = get_vlan_table(n); 503 504 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) { 505 info->vlan = RX_STATE_ALL; 506 } else if (!info->vlan_table) { 507 info->vlan = RX_STATE_NONE; 508 } else { 509 info->vlan = RX_STATE_NORMAL; 510 } 511 512 /* enable event notification after query */ 513 nc->rxfilter_notify_enabled = 1; 514 515 return info; 516 } 517 518 static void virtio_net_reset(VirtIODevice *vdev) 519 { 520 VirtIONet *n = VIRTIO_NET(vdev); 521 int i; 522 523 /* Reset back to compatibility mode */ 524 n->promisc = 1; 525 n->allmulti = 0; 526 n->alluni = 0; 527 n->nomulti = 0; 528 n->nouni = 0; 529 n->nobcast = 0; 530 /* multiqueue is disabled by default */ 531 n->curr_queues = 1; 532 timer_del(n->announce_timer.tm); 533 n->announce_timer.round = 0; 534 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 535 536 /* Flush any MAC and VLAN filter table state */ 537 n->mac_table.in_use = 0; 538 n->mac_table.first_multi = 0; 539 n->mac_table.multi_overflow = 0; 540 n->mac_table.uni_overflow = 0; 541 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 542 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); 543 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 544 memset(n->vlans, 0, MAX_VLAN >> 3); 545 546 /* Flush any async TX */ 547 for (i = 0; i < n->max_queues; i++) { 548 NetClientState *nc = qemu_get_subqueue(n->nic, i); 549 550 if (nc->peer) { 551 qemu_flush_or_purge_queued_packets(nc->peer, true); 552 assert(!virtio_net_get_subqueue(nc)->async_tx.elem); 553 } 554 } 555 } 556 557 static void peer_test_vnet_hdr(VirtIONet *n) 558 { 559 NetClientState *nc = qemu_get_queue(n->nic); 560 if (!nc->peer) { 561 return; 562 } 563 564 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer); 565 } 566 567 static int peer_has_vnet_hdr(VirtIONet *n) 568 { 569 return n->has_vnet_hdr; 570 } 571 572 static int peer_has_ufo(VirtIONet *n) 573 { 574 if (!peer_has_vnet_hdr(n)) 575 return 0; 576 577 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer); 578 579 return n->has_ufo; 580 } 581 582 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, 583 int version_1, int hash_report) 584 { 585 int i; 586 NetClientState *nc; 587 588 n->mergeable_rx_bufs = mergeable_rx_bufs; 589 590 if (version_1) { 591 n->guest_hdr_len = hash_report ? 592 sizeof(struct virtio_net_hdr_v1_hash) : 593 sizeof(struct virtio_net_hdr_mrg_rxbuf); 594 n->rss_data.populate_hash = !!hash_report; 595 } else { 596 n->guest_hdr_len = n->mergeable_rx_bufs ? 597 sizeof(struct virtio_net_hdr_mrg_rxbuf) : 598 sizeof(struct virtio_net_hdr); 599 } 600 601 for (i = 0; i < n->max_queues; i++) { 602 nc = qemu_get_subqueue(n->nic, i); 603 604 if (peer_has_vnet_hdr(n) && 605 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { 606 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); 607 n->host_hdr_len = n->guest_hdr_len; 608 } 609 } 610 } 611 612 static int virtio_net_max_tx_queue_size(VirtIONet *n) 613 { 614 NetClientState *peer = n->nic_conf.peers.ncs[0]; 615 616 /* 617 * Backends other than vhost-user don't support max queue size. 618 */ 619 if (!peer) { 620 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; 621 } 622 623 if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) { 624 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; 625 } 626 627 return VIRTQUEUE_MAX_SIZE; 628 } 629 630 static int peer_attach(VirtIONet *n, int index) 631 { 632 NetClientState *nc = qemu_get_subqueue(n->nic, index); 633 634 if (!nc->peer) { 635 return 0; 636 } 637 638 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { 639 vhost_set_vring_enable(nc->peer, 1); 640 } 641 642 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { 643 return 0; 644 } 645 646 if (n->max_queues == 1) { 647 return 0; 648 } 649 650 return tap_enable(nc->peer); 651 } 652 653 static int peer_detach(VirtIONet *n, int index) 654 { 655 NetClientState *nc = qemu_get_subqueue(n->nic, index); 656 657 if (!nc->peer) { 658 return 0; 659 } 660 661 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { 662 vhost_set_vring_enable(nc->peer, 0); 663 } 664 665 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { 666 return 0; 667 } 668 669 return tap_disable(nc->peer); 670 } 671 672 static void virtio_net_set_queues(VirtIONet *n) 673 { 674 int i; 675 int r; 676 677 if (n->nic->peer_deleted) { 678 return; 679 } 680 681 for (i = 0; i < n->max_queues; i++) { 682 if (i < n->curr_queues) { 683 r = peer_attach(n, i); 684 assert(!r); 685 } else { 686 r = peer_detach(n, i); 687 assert(!r); 688 } 689 } 690 } 691 692 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); 693 694 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, 695 Error **errp) 696 { 697 VirtIONet *n = VIRTIO_NET(vdev); 698 NetClientState *nc = qemu_get_queue(n->nic); 699 700 /* Firstly sync all virtio-net possible supported features */ 701 features |= n->host_features; 702 703 virtio_add_feature(&features, VIRTIO_NET_F_MAC); 704 705 if (!peer_has_vnet_hdr(n)) { 706 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM); 707 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4); 708 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6); 709 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN); 710 711 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM); 712 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4); 713 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); 714 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); 715 716 virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); 717 } 718 719 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { 720 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO); 721 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); 722 } 723 724 if (!get_vhost_net(nc->peer)) { 725 return features; 726 } 727 728 virtio_clear_feature(&features, VIRTIO_NET_F_RSS); 729 virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); 730 features = vhost_net_get_features(get_vhost_net(nc->peer), features); 731 vdev->backend_features = features; 732 733 if (n->mtu_bypass_backend && 734 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) { 735 features |= (1ULL << VIRTIO_NET_F_MTU); 736 } 737 738 return features; 739 } 740 741 static uint64_t virtio_net_bad_features(VirtIODevice *vdev) 742 { 743 uint64_t features = 0; 744 745 /* Linux kernel 2.6.25. It understood MAC (as everyone must), 746 * but also these: */ 747 virtio_add_feature(&features, VIRTIO_NET_F_MAC); 748 virtio_add_feature(&features, VIRTIO_NET_F_CSUM); 749 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4); 750 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6); 751 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN); 752 753 return features; 754 } 755 756 static void virtio_net_apply_guest_offloads(VirtIONet *n) 757 { 758 qemu_set_offload(qemu_get_queue(n->nic)->peer, 759 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), 760 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), 761 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), 762 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), 763 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); 764 } 765 766 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) 767 { 768 static const uint64_t guest_offloads_mask = 769 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | 770 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 771 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | 772 (1ULL << VIRTIO_NET_F_GUEST_ECN) | 773 (1ULL << VIRTIO_NET_F_GUEST_UFO); 774 775 return guest_offloads_mask & features; 776 } 777 778 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) 779 { 780 VirtIODevice *vdev = VIRTIO_DEVICE(n); 781 return virtio_net_guest_offloads_by_features(vdev->guest_features); 782 } 783 784 typedef struct { 785 VirtIONet *n; 786 char *id; 787 } FailoverId; 788 789 /** 790 * Set the id of the failover primary device 791 * 792 * @opaque: FailoverId to setup 793 * @opts: opts for device we are handling 794 * @errp: returns an error if this function fails 795 */ 796 static int failover_set_primary(void *opaque, QemuOpts *opts, Error **errp) 797 { 798 FailoverId *fid = opaque; 799 const char *standby_id = qemu_opt_get(opts, "failover_pair_id"); 800 801 if (g_strcmp0(standby_id, fid->n->netclient_name) == 0) { 802 fid->id = g_strdup(opts->id); 803 return 1; 804 } 805 806 return 0; 807 } 808 809 /** 810 * Find the primary device id for this failover virtio-net 811 * 812 * @n: VirtIONet device 813 * @errp: returns an error if this function fails 814 */ 815 static char *failover_find_primary_device_id(VirtIONet *n) 816 { 817 Error *err = NULL; 818 FailoverId fid; 819 820 fid.n = n; 821 if (!qemu_opts_foreach(qemu_find_opts("device"), 822 failover_set_primary, &fid, &err)) { 823 return NULL; 824 } 825 return fid.id; 826 } 827 828 /** 829 * Find the primary device for this failover virtio-net 830 * 831 * @n: VirtIONet device 832 * @errp: returns an error if this function fails 833 */ 834 static DeviceState *failover_find_primary_device(VirtIONet *n) 835 { 836 char *id = failover_find_primary_device_id(n); 837 838 if (!id) { 839 return NULL; 840 } 841 842 return qdev_find_recursive(sysbus_get_default(), id); 843 } 844 845 static void failover_add_primary(VirtIONet *n, Error **errp) 846 { 847 Error *err = NULL; 848 QemuOpts *opts; 849 char *id; 850 DeviceState *dev = failover_find_primary_device(n); 851 852 if (dev) { 853 return; 854 } 855 856 id = failover_find_primary_device_id(n); 857 if (!id) { 858 return; 859 } 860 opts = qemu_opts_find(qemu_find_opts("device"), id); 861 if (opts) { 862 dev = qdev_device_add(opts, &err); 863 if (err) { 864 qemu_opts_del(opts); 865 } else { 866 object_unref(OBJECT(dev)); 867 } 868 } else { 869 error_setg(errp, "Primary device not found"); 870 error_append_hint(errp, "Virtio-net failover will not work. Make " 871 "sure primary device has parameter" 872 " failover_pair_id=<virtio-net-id>\n"); 873 } 874 error_propagate(errp, err); 875 } 876 877 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) 878 { 879 VirtIONet *n = VIRTIO_NET(vdev); 880 Error *err = NULL; 881 int i; 882 883 if (n->mtu_bypass_backend && 884 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) { 885 features &= ~(1ULL << VIRTIO_NET_F_MTU); 886 } 887 888 virtio_net_set_multiqueue(n, 889 virtio_has_feature(features, VIRTIO_NET_F_RSS) || 890 virtio_has_feature(features, VIRTIO_NET_F_MQ)); 891 892 virtio_net_set_mrg_rx_bufs(n, 893 virtio_has_feature(features, 894 VIRTIO_NET_F_MRG_RXBUF), 895 virtio_has_feature(features, 896 VIRTIO_F_VERSION_1), 897 virtio_has_feature(features, 898 VIRTIO_NET_F_HASH_REPORT)); 899 900 n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) && 901 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4); 902 n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) && 903 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6); 904 n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS); 905 906 if (n->has_vnet_hdr) { 907 n->curr_guest_offloads = 908 virtio_net_guest_offloads_by_features(features); 909 virtio_net_apply_guest_offloads(n); 910 } 911 912 for (i = 0; i < n->max_queues; i++) { 913 NetClientState *nc = qemu_get_subqueue(n->nic, i); 914 915 if (!get_vhost_net(nc->peer)) { 916 continue; 917 } 918 vhost_net_ack_features(get_vhost_net(nc->peer), features); 919 } 920 921 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) { 922 memset(n->vlans, 0, MAX_VLAN >> 3); 923 } else { 924 memset(n->vlans, 0xff, MAX_VLAN >> 3); 925 } 926 927 if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) { 928 qapi_event_send_failover_negotiated(n->netclient_name); 929 qatomic_set(&n->failover_primary_hidden, false); 930 failover_add_primary(n, &err); 931 if (err) { 932 warn_report_err(err); 933 } 934 } 935 } 936 937 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, 938 struct iovec *iov, unsigned int iov_cnt) 939 { 940 uint8_t on; 941 size_t s; 942 NetClientState *nc = qemu_get_queue(n->nic); 943 944 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); 945 if (s != sizeof(on)) { 946 return VIRTIO_NET_ERR; 947 } 948 949 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { 950 n->promisc = on; 951 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { 952 n->allmulti = on; 953 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { 954 n->alluni = on; 955 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { 956 n->nomulti = on; 957 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { 958 n->nouni = on; 959 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { 960 n->nobcast = on; 961 } else { 962 return VIRTIO_NET_ERR; 963 } 964 965 rxfilter_notify(nc); 966 967 return VIRTIO_NET_OK; 968 } 969 970 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, 971 struct iovec *iov, unsigned int iov_cnt) 972 { 973 VirtIODevice *vdev = VIRTIO_DEVICE(n); 974 uint64_t offloads; 975 size_t s; 976 977 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 978 return VIRTIO_NET_ERR; 979 } 980 981 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); 982 if (s != sizeof(offloads)) { 983 return VIRTIO_NET_ERR; 984 } 985 986 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { 987 uint64_t supported_offloads; 988 989 offloads = virtio_ldq_p(vdev, &offloads); 990 991 if (!n->has_vnet_hdr) { 992 return VIRTIO_NET_ERR; 993 } 994 995 n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) && 996 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4); 997 n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) && 998 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6); 999 virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT); 1000 1001 supported_offloads = virtio_net_supported_guest_offloads(n); 1002 if (offloads & ~supported_offloads) { 1003 return VIRTIO_NET_ERR; 1004 } 1005 1006 n->curr_guest_offloads = offloads; 1007 virtio_net_apply_guest_offloads(n); 1008 1009 return VIRTIO_NET_OK; 1010 } else { 1011 return VIRTIO_NET_ERR; 1012 } 1013 } 1014 1015 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, 1016 struct iovec *iov, unsigned int iov_cnt) 1017 { 1018 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1019 struct virtio_net_ctrl_mac mac_data; 1020 size_t s; 1021 NetClientState *nc = qemu_get_queue(n->nic); 1022 1023 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { 1024 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { 1025 return VIRTIO_NET_ERR; 1026 } 1027 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); 1028 assert(s == sizeof(n->mac)); 1029 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 1030 rxfilter_notify(nc); 1031 1032 return VIRTIO_NET_OK; 1033 } 1034 1035 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { 1036 return VIRTIO_NET_ERR; 1037 } 1038 1039 int in_use = 0; 1040 int first_multi = 0; 1041 uint8_t uni_overflow = 0; 1042 uint8_t multi_overflow = 0; 1043 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 1044 1045 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 1046 sizeof(mac_data.entries)); 1047 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 1048 if (s != sizeof(mac_data.entries)) { 1049 goto error; 1050 } 1051 iov_discard_front(&iov, &iov_cnt, s); 1052 1053 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { 1054 goto error; 1055 } 1056 1057 if (mac_data.entries <= MAC_TABLE_ENTRIES) { 1058 s = iov_to_buf(iov, iov_cnt, 0, macs, 1059 mac_data.entries * ETH_ALEN); 1060 if (s != mac_data.entries * ETH_ALEN) { 1061 goto error; 1062 } 1063 in_use += mac_data.entries; 1064 } else { 1065 uni_overflow = 1; 1066 } 1067 1068 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); 1069 1070 first_multi = in_use; 1071 1072 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 1073 sizeof(mac_data.entries)); 1074 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 1075 if (s != sizeof(mac_data.entries)) { 1076 goto error; 1077 } 1078 1079 iov_discard_front(&iov, &iov_cnt, s); 1080 1081 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { 1082 goto error; 1083 } 1084 1085 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) { 1086 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN], 1087 mac_data.entries * ETH_ALEN); 1088 if (s != mac_data.entries * ETH_ALEN) { 1089 goto error; 1090 } 1091 in_use += mac_data.entries; 1092 } else { 1093 multi_overflow = 1; 1094 } 1095 1096 n->mac_table.in_use = in_use; 1097 n->mac_table.first_multi = first_multi; 1098 n->mac_table.uni_overflow = uni_overflow; 1099 n->mac_table.multi_overflow = multi_overflow; 1100 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN); 1101 g_free(macs); 1102 rxfilter_notify(nc); 1103 1104 return VIRTIO_NET_OK; 1105 1106 error: 1107 g_free(macs); 1108 return VIRTIO_NET_ERR; 1109 } 1110 1111 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, 1112 struct iovec *iov, unsigned int iov_cnt) 1113 { 1114 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1115 uint16_t vid; 1116 size_t s; 1117 NetClientState *nc = qemu_get_queue(n->nic); 1118 1119 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); 1120 vid = virtio_lduw_p(vdev, &vid); 1121 if (s != sizeof(vid)) { 1122 return VIRTIO_NET_ERR; 1123 } 1124 1125 if (vid >= MAX_VLAN) 1126 return VIRTIO_NET_ERR; 1127 1128 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) 1129 n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); 1130 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) 1131 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); 1132 else 1133 return VIRTIO_NET_ERR; 1134 1135 rxfilter_notify(nc); 1136 1137 return VIRTIO_NET_OK; 1138 } 1139 1140 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd, 1141 struct iovec *iov, unsigned int iov_cnt) 1142 { 1143 trace_virtio_net_handle_announce(n->announce_timer.round); 1144 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK && 1145 n->status & VIRTIO_NET_S_ANNOUNCE) { 1146 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 1147 if (n->announce_timer.round) { 1148 qemu_announce_timer_step(&n->announce_timer); 1149 } 1150 return VIRTIO_NET_OK; 1151 } else { 1152 return VIRTIO_NET_ERR; 1153 } 1154 } 1155 1156 static void virtio_net_disable_rss(VirtIONet *n) 1157 { 1158 if (n->rss_data.enabled) { 1159 trace_virtio_net_rss_disable(); 1160 } 1161 n->rss_data.enabled = false; 1162 } 1163 1164 static uint16_t virtio_net_handle_rss(VirtIONet *n, 1165 struct iovec *iov, 1166 unsigned int iov_cnt, 1167 bool do_rss) 1168 { 1169 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1170 struct virtio_net_rss_config cfg; 1171 size_t s, offset = 0, size_get; 1172 uint16_t queues, i; 1173 struct { 1174 uint16_t us; 1175 uint8_t b; 1176 } QEMU_PACKED temp; 1177 const char *err_msg = ""; 1178 uint32_t err_value = 0; 1179 1180 if (do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) { 1181 err_msg = "RSS is not negotiated"; 1182 goto error; 1183 } 1184 if (!do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) { 1185 err_msg = "Hash report is not negotiated"; 1186 goto error; 1187 } 1188 size_get = offsetof(struct virtio_net_rss_config, indirection_table); 1189 s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get); 1190 if (s != size_get) { 1191 err_msg = "Short command buffer"; 1192 err_value = (uint32_t)s; 1193 goto error; 1194 } 1195 n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types); 1196 n->rss_data.indirections_len = 1197 virtio_lduw_p(vdev, &cfg.indirection_table_mask); 1198 n->rss_data.indirections_len++; 1199 if (!do_rss) { 1200 n->rss_data.indirections_len = 1; 1201 } 1202 if (!is_power_of_2(n->rss_data.indirections_len)) { 1203 err_msg = "Invalid size of indirection table"; 1204 err_value = n->rss_data.indirections_len; 1205 goto error; 1206 } 1207 if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) { 1208 err_msg = "Too large indirection table"; 1209 err_value = n->rss_data.indirections_len; 1210 goto error; 1211 } 1212 n->rss_data.default_queue = do_rss ? 1213 virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0; 1214 if (n->rss_data.default_queue >= n->max_queues) { 1215 err_msg = "Invalid default queue"; 1216 err_value = n->rss_data.default_queue; 1217 goto error; 1218 } 1219 offset += size_get; 1220 size_get = sizeof(uint16_t) * n->rss_data.indirections_len; 1221 g_free(n->rss_data.indirections_table); 1222 n->rss_data.indirections_table = g_malloc(size_get); 1223 if (!n->rss_data.indirections_table) { 1224 err_msg = "Can't allocate indirections table"; 1225 err_value = n->rss_data.indirections_len; 1226 goto error; 1227 } 1228 s = iov_to_buf(iov, iov_cnt, offset, 1229 n->rss_data.indirections_table, size_get); 1230 if (s != size_get) { 1231 err_msg = "Short indirection table buffer"; 1232 err_value = (uint32_t)s; 1233 goto error; 1234 } 1235 for (i = 0; i < n->rss_data.indirections_len; ++i) { 1236 uint16_t val = n->rss_data.indirections_table[i]; 1237 n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val); 1238 } 1239 offset += size_get; 1240 size_get = sizeof(temp); 1241 s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get); 1242 if (s != size_get) { 1243 err_msg = "Can't get queues"; 1244 err_value = (uint32_t)s; 1245 goto error; 1246 } 1247 queues = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queues; 1248 if (queues == 0 || queues > n->max_queues) { 1249 err_msg = "Invalid number of queues"; 1250 err_value = queues; 1251 goto error; 1252 } 1253 if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) { 1254 err_msg = "Invalid key size"; 1255 err_value = temp.b; 1256 goto error; 1257 } 1258 if (!temp.b && n->rss_data.hash_types) { 1259 err_msg = "No key provided"; 1260 err_value = 0; 1261 goto error; 1262 } 1263 if (!temp.b && !n->rss_data.hash_types) { 1264 virtio_net_disable_rss(n); 1265 return queues; 1266 } 1267 offset += size_get; 1268 size_get = temp.b; 1269 s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get); 1270 if (s != size_get) { 1271 err_msg = "Can get key buffer"; 1272 err_value = (uint32_t)s; 1273 goto error; 1274 } 1275 n->rss_data.enabled = true; 1276 trace_virtio_net_rss_enable(n->rss_data.hash_types, 1277 n->rss_data.indirections_len, 1278 temp.b); 1279 return queues; 1280 error: 1281 trace_virtio_net_rss_error(err_msg, err_value); 1282 virtio_net_disable_rss(n); 1283 return 0; 1284 } 1285 1286 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, 1287 struct iovec *iov, unsigned int iov_cnt) 1288 { 1289 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1290 uint16_t queues; 1291 1292 virtio_net_disable_rss(n); 1293 if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) { 1294 queues = virtio_net_handle_rss(n, iov, iov_cnt, false); 1295 return queues ? VIRTIO_NET_OK : VIRTIO_NET_ERR; 1296 } 1297 if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) { 1298 queues = virtio_net_handle_rss(n, iov, iov_cnt, true); 1299 } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 1300 struct virtio_net_ctrl_mq mq; 1301 size_t s; 1302 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) { 1303 return VIRTIO_NET_ERR; 1304 } 1305 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); 1306 if (s != sizeof(mq)) { 1307 return VIRTIO_NET_ERR; 1308 } 1309 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs); 1310 1311 } else { 1312 return VIRTIO_NET_ERR; 1313 } 1314 1315 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 1316 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 1317 queues > n->max_queues || 1318 !n->multiqueue) { 1319 return VIRTIO_NET_ERR; 1320 } 1321 1322 n->curr_queues = queues; 1323 /* stop the backend before changing the number of queues to avoid handling a 1324 * disabled queue */ 1325 virtio_net_set_status(vdev, vdev->status); 1326 virtio_net_set_queues(n); 1327 1328 return VIRTIO_NET_OK; 1329 } 1330 1331 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 1332 { 1333 VirtIONet *n = VIRTIO_NET(vdev); 1334 struct virtio_net_ctrl_hdr ctrl; 1335 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 1336 VirtQueueElement *elem; 1337 size_t s; 1338 struct iovec *iov, *iov2; 1339 unsigned int iov_cnt; 1340 1341 for (;;) { 1342 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 1343 if (!elem) { 1344 break; 1345 } 1346 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || 1347 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { 1348 virtio_error(vdev, "virtio-net ctrl missing headers"); 1349 virtqueue_detach_element(vq, elem, 0); 1350 g_free(elem); 1351 break; 1352 } 1353 1354 iov_cnt = elem->out_num; 1355 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num); 1356 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); 1357 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); 1358 if (s != sizeof(ctrl)) { 1359 status = VIRTIO_NET_ERR; 1360 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { 1361 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); 1362 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { 1363 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); 1364 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { 1365 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); 1366 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { 1367 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); 1368 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { 1369 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); 1370 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { 1371 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); 1372 } 1373 1374 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); 1375 assert(s == sizeof(status)); 1376 1377 virtqueue_push(vq, elem, sizeof(status)); 1378 virtio_notify(vdev, vq); 1379 g_free(iov2); 1380 g_free(elem); 1381 } 1382 } 1383 1384 /* RX */ 1385 1386 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) 1387 { 1388 VirtIONet *n = VIRTIO_NET(vdev); 1389 int queue_index = vq2q(virtio_get_queue_index(vq)); 1390 1391 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); 1392 } 1393 1394 static bool virtio_net_can_receive(NetClientState *nc) 1395 { 1396 VirtIONet *n = qemu_get_nic_opaque(nc); 1397 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1398 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1399 1400 if (!vdev->vm_running) { 1401 return false; 1402 } 1403 1404 if (nc->queue_index >= n->curr_queues) { 1405 return false; 1406 } 1407 1408 if (!virtio_queue_ready(q->rx_vq) || 1409 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1410 return false; 1411 } 1412 1413 return true; 1414 } 1415 1416 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) 1417 { 1418 VirtIONet *n = q->n; 1419 if (virtio_queue_empty(q->rx_vq) || 1420 (n->mergeable_rx_bufs && 1421 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 1422 virtio_queue_set_notification(q->rx_vq, 1); 1423 1424 /* To avoid a race condition where the guest has made some buffers 1425 * available after the above check but before notification was 1426 * enabled, check for available buffers again. 1427 */ 1428 if (virtio_queue_empty(q->rx_vq) || 1429 (n->mergeable_rx_bufs && 1430 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 1431 return 0; 1432 } 1433 } 1434 1435 virtio_queue_set_notification(q->rx_vq, 0); 1436 return 1; 1437 } 1438 1439 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr) 1440 { 1441 virtio_tswap16s(vdev, &hdr->hdr_len); 1442 virtio_tswap16s(vdev, &hdr->gso_size); 1443 virtio_tswap16s(vdev, &hdr->csum_start); 1444 virtio_tswap16s(vdev, &hdr->csum_offset); 1445 } 1446 1447 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so 1448 * it never finds out that the packets don't have valid checksums. This 1449 * causes dhclient to get upset. Fedora's carried a patch for ages to 1450 * fix this with Xen but it hasn't appeared in an upstream release of 1451 * dhclient yet. 1452 * 1453 * To avoid breaking existing guests, we catch udp packets and add 1454 * checksums. This is terrible but it's better than hacking the guest 1455 * kernels. 1456 * 1457 * N.B. if we introduce a zero-copy API, this operation is no longer free so 1458 * we should provide a mechanism to disable it to avoid polluting the host 1459 * cache. 1460 */ 1461 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, 1462 uint8_t *buf, size_t size) 1463 { 1464 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ 1465 (size > 27 && size < 1500) && /* normal sized MTU */ 1466 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ 1467 (buf[23] == 17) && /* ip.protocol == UDP */ 1468 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ 1469 net_checksum_calculate(buf, size, CSUM_UDP); 1470 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; 1471 } 1472 } 1473 1474 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, 1475 const void *buf, size_t size) 1476 { 1477 if (n->has_vnet_hdr) { 1478 /* FIXME this cast is evil */ 1479 void *wbuf = (void *)buf; 1480 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, 1481 size - n->host_hdr_len); 1482 1483 if (n->needs_vnet_hdr_swap) { 1484 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf); 1485 } 1486 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); 1487 } else { 1488 struct virtio_net_hdr hdr = { 1489 .flags = 0, 1490 .gso_type = VIRTIO_NET_HDR_GSO_NONE 1491 }; 1492 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); 1493 } 1494 } 1495 1496 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) 1497 { 1498 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1499 static const uint8_t vlan[] = {0x81, 0x00}; 1500 uint8_t *ptr = (uint8_t *)buf; 1501 int i; 1502 1503 if (n->promisc) 1504 return 1; 1505 1506 ptr += n->host_hdr_len; 1507 1508 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { 1509 int vid = lduw_be_p(ptr + 14) & 0xfff; 1510 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) 1511 return 0; 1512 } 1513 1514 if (ptr[0] & 1) { // multicast 1515 if (!memcmp(ptr, bcast, sizeof(bcast))) { 1516 return !n->nobcast; 1517 } else if (n->nomulti) { 1518 return 0; 1519 } else if (n->allmulti || n->mac_table.multi_overflow) { 1520 return 1; 1521 } 1522 1523 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 1524 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 1525 return 1; 1526 } 1527 } 1528 } else { // unicast 1529 if (n->nouni) { 1530 return 0; 1531 } else if (n->alluni || n->mac_table.uni_overflow) { 1532 return 1; 1533 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { 1534 return 1; 1535 } 1536 1537 for (i = 0; i < n->mac_table.first_multi; i++) { 1538 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 1539 return 1; 1540 } 1541 } 1542 } 1543 1544 return 0; 1545 } 1546 1547 static uint8_t virtio_net_get_hash_type(bool isip4, 1548 bool isip6, 1549 bool isudp, 1550 bool istcp, 1551 uint32_t types) 1552 { 1553 if (isip4) { 1554 if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) { 1555 return NetPktRssIpV4Tcp; 1556 } 1557 if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) { 1558 return NetPktRssIpV4Udp; 1559 } 1560 if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 1561 return NetPktRssIpV4; 1562 } 1563 } else if (isip6) { 1564 uint32_t mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | 1565 VIRTIO_NET_RSS_HASH_TYPE_TCPv6; 1566 1567 if (istcp && (types & mask)) { 1568 return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ? 1569 NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp; 1570 } 1571 mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6; 1572 if (isudp && (types & mask)) { 1573 return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ? 1574 NetPktRssIpV6UdpEx : NetPktRssIpV6Udp; 1575 } 1576 mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6; 1577 if (types & mask) { 1578 return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ? 1579 NetPktRssIpV6Ex : NetPktRssIpV6; 1580 } 1581 } 1582 return 0xff; 1583 } 1584 1585 static void virtio_set_packet_hash(const uint8_t *buf, uint8_t report, 1586 uint32_t hash) 1587 { 1588 struct virtio_net_hdr_v1_hash *hdr = (void *)buf; 1589 hdr->hash_value = hash; 1590 hdr->hash_report = report; 1591 } 1592 1593 static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf, 1594 size_t size) 1595 { 1596 VirtIONet *n = qemu_get_nic_opaque(nc); 1597 unsigned int index = nc->queue_index, new_index = index; 1598 struct NetRxPkt *pkt = n->rx_pkt; 1599 uint8_t net_hash_type; 1600 uint32_t hash; 1601 bool isip4, isip6, isudp, istcp; 1602 static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = { 1603 VIRTIO_NET_HASH_REPORT_IPv4, 1604 VIRTIO_NET_HASH_REPORT_TCPv4, 1605 VIRTIO_NET_HASH_REPORT_TCPv6, 1606 VIRTIO_NET_HASH_REPORT_IPv6, 1607 VIRTIO_NET_HASH_REPORT_IPv6_EX, 1608 VIRTIO_NET_HASH_REPORT_TCPv6_EX, 1609 VIRTIO_NET_HASH_REPORT_UDPv4, 1610 VIRTIO_NET_HASH_REPORT_UDPv6, 1611 VIRTIO_NET_HASH_REPORT_UDPv6_EX 1612 }; 1613 1614 net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len, 1615 size - n->host_hdr_len); 1616 net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); 1617 if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) { 1618 istcp = isudp = false; 1619 } 1620 if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) { 1621 istcp = isudp = false; 1622 } 1623 net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp, 1624 n->rss_data.hash_types); 1625 if (net_hash_type > NetPktRssIpV6UdpEx) { 1626 if (n->rss_data.populate_hash) { 1627 virtio_set_packet_hash(buf, VIRTIO_NET_HASH_REPORT_NONE, 0); 1628 } 1629 return n->rss_data.redirect ? n->rss_data.default_queue : -1; 1630 } 1631 1632 hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key); 1633 1634 if (n->rss_data.populate_hash) { 1635 virtio_set_packet_hash(buf, reports[net_hash_type], hash); 1636 } 1637 1638 if (n->rss_data.redirect) { 1639 new_index = hash & (n->rss_data.indirections_len - 1); 1640 new_index = n->rss_data.indirections_table[new_index]; 1641 } 1642 1643 return (index == new_index) ? -1 : new_index; 1644 } 1645 1646 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, 1647 size_t size, bool no_rss) 1648 { 1649 VirtIONet *n = qemu_get_nic_opaque(nc); 1650 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1651 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1652 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; 1653 struct virtio_net_hdr_mrg_rxbuf mhdr; 1654 unsigned mhdr_cnt = 0; 1655 size_t offset, i, guest_offset; 1656 1657 if (!virtio_net_can_receive(nc)) { 1658 return -1; 1659 } 1660 1661 if (!no_rss && n->rss_data.enabled) { 1662 int index = virtio_net_process_rss(nc, buf, size); 1663 if (index >= 0) { 1664 NetClientState *nc2 = qemu_get_subqueue(n->nic, index); 1665 return virtio_net_receive_rcu(nc2, buf, size, true); 1666 } 1667 } 1668 1669 /* hdr_len refers to the header we supply to the guest */ 1670 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { 1671 return 0; 1672 } 1673 1674 if (!receive_filter(n, buf, size)) 1675 return size; 1676 1677 offset = i = 0; 1678 1679 while (offset < size) { 1680 VirtQueueElement *elem; 1681 int len, total; 1682 const struct iovec *sg; 1683 1684 total = 0; 1685 1686 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); 1687 if (!elem) { 1688 if (i) { 1689 virtio_error(vdev, "virtio-net unexpected empty queue: " 1690 "i %zd mergeable %d offset %zd, size %zd, " 1691 "guest hdr len %zd, host hdr len %zd " 1692 "guest features 0x%" PRIx64, 1693 i, n->mergeable_rx_bufs, offset, size, 1694 n->guest_hdr_len, n->host_hdr_len, 1695 vdev->guest_features); 1696 } 1697 return -1; 1698 } 1699 1700 if (elem->in_num < 1) { 1701 virtio_error(vdev, 1702 "virtio-net receive queue contains no in buffers"); 1703 virtqueue_detach_element(q->rx_vq, elem, 0); 1704 g_free(elem); 1705 return -1; 1706 } 1707 1708 sg = elem->in_sg; 1709 if (i == 0) { 1710 assert(offset == 0); 1711 if (n->mergeable_rx_bufs) { 1712 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), 1713 sg, elem->in_num, 1714 offsetof(typeof(mhdr), num_buffers), 1715 sizeof(mhdr.num_buffers)); 1716 } 1717 1718 receive_header(n, sg, elem->in_num, buf, size); 1719 if (n->rss_data.populate_hash) { 1720 offset = sizeof(mhdr); 1721 iov_from_buf(sg, elem->in_num, offset, 1722 buf + offset, n->host_hdr_len - sizeof(mhdr)); 1723 } 1724 offset = n->host_hdr_len; 1725 total += n->guest_hdr_len; 1726 guest_offset = n->guest_hdr_len; 1727 } else { 1728 guest_offset = 0; 1729 } 1730 1731 /* copy in packet. ugh */ 1732 len = iov_from_buf(sg, elem->in_num, guest_offset, 1733 buf + offset, size - offset); 1734 total += len; 1735 offset += len; 1736 /* If buffers can't be merged, at this point we 1737 * must have consumed the complete packet. 1738 * Otherwise, drop it. */ 1739 if (!n->mergeable_rx_bufs && offset < size) { 1740 virtqueue_unpop(q->rx_vq, elem, total); 1741 g_free(elem); 1742 return size; 1743 } 1744 1745 /* signal other side */ 1746 virtqueue_fill(q->rx_vq, elem, total, i++); 1747 g_free(elem); 1748 } 1749 1750 if (mhdr_cnt) { 1751 virtio_stw_p(vdev, &mhdr.num_buffers, i); 1752 iov_from_buf(mhdr_sg, mhdr_cnt, 1753 0, 1754 &mhdr.num_buffers, sizeof mhdr.num_buffers); 1755 } 1756 1757 virtqueue_flush(q->rx_vq, i); 1758 virtio_notify(vdev, q->rx_vq); 1759 1760 return size; 1761 } 1762 1763 static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf, 1764 size_t size) 1765 { 1766 RCU_READ_LOCK_GUARD(); 1767 1768 return virtio_net_receive_rcu(nc, buf, size, false); 1769 } 1770 1771 static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain, 1772 const uint8_t *buf, 1773 VirtioNetRscUnit *unit) 1774 { 1775 uint16_t ip_hdrlen; 1776 struct ip_header *ip; 1777 1778 ip = (struct ip_header *)(buf + chain->n->guest_hdr_len 1779 + sizeof(struct eth_header)); 1780 unit->ip = (void *)ip; 1781 ip_hdrlen = (ip->ip_ver_len & 0xF) << 2; 1782 unit->ip_plen = &ip->ip_len; 1783 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen); 1784 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10; 1785 unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen; 1786 } 1787 1788 static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain, 1789 const uint8_t *buf, 1790 VirtioNetRscUnit *unit) 1791 { 1792 struct ip6_header *ip6; 1793 1794 ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len 1795 + sizeof(struct eth_header)); 1796 unit->ip = ip6; 1797 unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen); 1798 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) 1799 + sizeof(struct ip6_header)); 1800 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10; 1801 1802 /* There is a difference between payload lenght in ipv4 and v6, 1803 ip header is excluded in ipv6 */ 1804 unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen; 1805 } 1806 1807 static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain, 1808 VirtioNetRscSeg *seg) 1809 { 1810 int ret; 1811 struct virtio_net_hdr_v1 *h; 1812 1813 h = (struct virtio_net_hdr_v1 *)seg->buf; 1814 h->flags = 0; 1815 h->gso_type = VIRTIO_NET_HDR_GSO_NONE; 1816 1817 if (seg->is_coalesced) { 1818 h->rsc.segments = seg->packets; 1819 h->rsc.dup_acks = seg->dup_ack; 1820 h->flags = VIRTIO_NET_HDR_F_RSC_INFO; 1821 if (chain->proto == ETH_P_IP) { 1822 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1823 } else { 1824 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1825 } 1826 } 1827 1828 ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size); 1829 QTAILQ_REMOVE(&chain->buffers, seg, next); 1830 g_free(seg->buf); 1831 g_free(seg); 1832 1833 return ret; 1834 } 1835 1836 static void virtio_net_rsc_purge(void *opq) 1837 { 1838 VirtioNetRscSeg *seg, *rn; 1839 VirtioNetRscChain *chain = (VirtioNetRscChain *)opq; 1840 1841 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) { 1842 if (virtio_net_rsc_drain_seg(chain, seg) == 0) { 1843 chain->stat.purge_failed++; 1844 continue; 1845 } 1846 } 1847 1848 chain->stat.timer++; 1849 if (!QTAILQ_EMPTY(&chain->buffers)) { 1850 timer_mod(chain->drain_timer, 1851 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout); 1852 } 1853 } 1854 1855 static void virtio_net_rsc_cleanup(VirtIONet *n) 1856 { 1857 VirtioNetRscChain *chain, *rn_chain; 1858 VirtioNetRscSeg *seg, *rn_seg; 1859 1860 QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) { 1861 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) { 1862 QTAILQ_REMOVE(&chain->buffers, seg, next); 1863 g_free(seg->buf); 1864 g_free(seg); 1865 } 1866 1867 timer_free(chain->drain_timer); 1868 QTAILQ_REMOVE(&n->rsc_chains, chain, next); 1869 g_free(chain); 1870 } 1871 } 1872 1873 static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain, 1874 NetClientState *nc, 1875 const uint8_t *buf, size_t size) 1876 { 1877 uint16_t hdr_len; 1878 VirtioNetRscSeg *seg; 1879 1880 hdr_len = chain->n->guest_hdr_len; 1881 seg = g_malloc(sizeof(VirtioNetRscSeg)); 1882 seg->buf = g_malloc(hdr_len + sizeof(struct eth_header) 1883 + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD); 1884 memcpy(seg->buf, buf, size); 1885 seg->size = size; 1886 seg->packets = 1; 1887 seg->dup_ack = 0; 1888 seg->is_coalesced = 0; 1889 seg->nc = nc; 1890 1891 QTAILQ_INSERT_TAIL(&chain->buffers, seg, next); 1892 chain->stat.cache++; 1893 1894 switch (chain->proto) { 1895 case ETH_P_IP: 1896 virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit); 1897 break; 1898 case ETH_P_IPV6: 1899 virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit); 1900 break; 1901 default: 1902 g_assert_not_reached(); 1903 } 1904 } 1905 1906 static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain, 1907 VirtioNetRscSeg *seg, 1908 const uint8_t *buf, 1909 struct tcp_header *n_tcp, 1910 struct tcp_header *o_tcp) 1911 { 1912 uint32_t nack, oack; 1913 uint16_t nwin, owin; 1914 1915 nack = htonl(n_tcp->th_ack); 1916 nwin = htons(n_tcp->th_win); 1917 oack = htonl(o_tcp->th_ack); 1918 owin = htons(o_tcp->th_win); 1919 1920 if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) { 1921 chain->stat.ack_out_of_win++; 1922 return RSC_FINAL; 1923 } else if (nack == oack) { 1924 /* duplicated ack or window probe */ 1925 if (nwin == owin) { 1926 /* duplicated ack, add dup ack count due to whql test up to 1 */ 1927 chain->stat.dup_ack++; 1928 return RSC_FINAL; 1929 } else { 1930 /* Coalesce window update */ 1931 o_tcp->th_win = n_tcp->th_win; 1932 chain->stat.win_update++; 1933 return RSC_COALESCE; 1934 } 1935 } else { 1936 /* pure ack, go to 'C', finalize*/ 1937 chain->stat.pure_ack++; 1938 return RSC_FINAL; 1939 } 1940 } 1941 1942 static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain, 1943 VirtioNetRscSeg *seg, 1944 const uint8_t *buf, 1945 VirtioNetRscUnit *n_unit) 1946 { 1947 void *data; 1948 uint16_t o_ip_len; 1949 uint32_t nseq, oseq; 1950 VirtioNetRscUnit *o_unit; 1951 1952 o_unit = &seg->unit; 1953 o_ip_len = htons(*o_unit->ip_plen); 1954 nseq = htonl(n_unit->tcp->th_seq); 1955 oseq = htonl(o_unit->tcp->th_seq); 1956 1957 /* out of order or retransmitted. */ 1958 if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) { 1959 chain->stat.data_out_of_win++; 1960 return RSC_FINAL; 1961 } 1962 1963 data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen; 1964 if (nseq == oseq) { 1965 if ((o_unit->payload == 0) && n_unit->payload) { 1966 /* From no payload to payload, normal case, not a dup ack or etc */ 1967 chain->stat.data_after_pure_ack++; 1968 goto coalesce; 1969 } else { 1970 return virtio_net_rsc_handle_ack(chain, seg, buf, 1971 n_unit->tcp, o_unit->tcp); 1972 } 1973 } else if ((nseq - oseq) != o_unit->payload) { 1974 /* Not a consistent packet, out of order */ 1975 chain->stat.data_out_of_order++; 1976 return RSC_FINAL; 1977 } else { 1978 coalesce: 1979 if ((o_ip_len + n_unit->payload) > chain->max_payload) { 1980 chain->stat.over_size++; 1981 return RSC_FINAL; 1982 } 1983 1984 /* Here comes the right data, the payload length in v4/v6 is different, 1985 so use the field value to update and record the new data len */ 1986 o_unit->payload += n_unit->payload; /* update new data len */ 1987 1988 /* update field in ip header */ 1989 *o_unit->ip_plen = htons(o_ip_len + n_unit->payload); 1990 1991 /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced 1992 for windows guest, while this may change the behavior for linux 1993 guest (only if it uses RSC feature). */ 1994 o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags; 1995 1996 o_unit->tcp->th_ack = n_unit->tcp->th_ack; 1997 o_unit->tcp->th_win = n_unit->tcp->th_win; 1998 1999 memmove(seg->buf + seg->size, data, n_unit->payload); 2000 seg->size += n_unit->payload; 2001 seg->packets++; 2002 chain->stat.coalesced++; 2003 return RSC_COALESCE; 2004 } 2005 } 2006 2007 static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain, 2008 VirtioNetRscSeg *seg, 2009 const uint8_t *buf, size_t size, 2010 VirtioNetRscUnit *unit) 2011 { 2012 struct ip_header *ip1, *ip2; 2013 2014 ip1 = (struct ip_header *)(unit->ip); 2015 ip2 = (struct ip_header *)(seg->unit.ip); 2016 if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst) 2017 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport) 2018 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) { 2019 chain->stat.no_match++; 2020 return RSC_NO_MATCH; 2021 } 2022 2023 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit); 2024 } 2025 2026 static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain, 2027 VirtioNetRscSeg *seg, 2028 const uint8_t *buf, size_t size, 2029 VirtioNetRscUnit *unit) 2030 { 2031 struct ip6_header *ip1, *ip2; 2032 2033 ip1 = (struct ip6_header *)(unit->ip); 2034 ip2 = (struct ip6_header *)(seg->unit.ip); 2035 if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address)) 2036 || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address)) 2037 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport) 2038 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) { 2039 chain->stat.no_match++; 2040 return RSC_NO_MATCH; 2041 } 2042 2043 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit); 2044 } 2045 2046 /* Packets with 'SYN' should bypass, other flag should be sent after drain 2047 * to prevent out of order */ 2048 static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain, 2049 struct tcp_header *tcp) 2050 { 2051 uint16_t tcp_hdr; 2052 uint16_t tcp_flag; 2053 2054 tcp_flag = htons(tcp->th_offset_flags); 2055 tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10; 2056 tcp_flag &= VIRTIO_NET_TCP_FLAG; 2057 if (tcp_flag & TH_SYN) { 2058 chain->stat.tcp_syn++; 2059 return RSC_BYPASS; 2060 } 2061 2062 if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) { 2063 chain->stat.tcp_ctrl_drain++; 2064 return RSC_FINAL; 2065 } 2066 2067 if (tcp_hdr > sizeof(struct tcp_header)) { 2068 chain->stat.tcp_all_opt++; 2069 return RSC_FINAL; 2070 } 2071 2072 return RSC_CANDIDATE; 2073 } 2074 2075 static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain, 2076 NetClientState *nc, 2077 const uint8_t *buf, size_t size, 2078 VirtioNetRscUnit *unit) 2079 { 2080 int ret; 2081 VirtioNetRscSeg *seg, *nseg; 2082 2083 if (QTAILQ_EMPTY(&chain->buffers)) { 2084 chain->stat.empty_cache++; 2085 virtio_net_rsc_cache_buf(chain, nc, buf, size); 2086 timer_mod(chain->drain_timer, 2087 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout); 2088 return size; 2089 } 2090 2091 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) { 2092 if (chain->proto == ETH_P_IP) { 2093 ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit); 2094 } else { 2095 ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit); 2096 } 2097 2098 if (ret == RSC_FINAL) { 2099 if (virtio_net_rsc_drain_seg(chain, seg) == 0) { 2100 /* Send failed */ 2101 chain->stat.final_failed++; 2102 return 0; 2103 } 2104 2105 /* Send current packet */ 2106 return virtio_net_do_receive(nc, buf, size); 2107 } else if (ret == RSC_NO_MATCH) { 2108 continue; 2109 } else { 2110 /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */ 2111 seg->is_coalesced = 1; 2112 return size; 2113 } 2114 } 2115 2116 chain->stat.no_match_cache++; 2117 virtio_net_rsc_cache_buf(chain, nc, buf, size); 2118 return size; 2119 } 2120 2121 /* Drain a connection data, this is to avoid out of order segments */ 2122 static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain, 2123 NetClientState *nc, 2124 const uint8_t *buf, size_t size, 2125 uint16_t ip_start, uint16_t ip_size, 2126 uint16_t tcp_port) 2127 { 2128 VirtioNetRscSeg *seg, *nseg; 2129 uint32_t ppair1, ppair2; 2130 2131 ppair1 = *(uint32_t *)(buf + tcp_port); 2132 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) { 2133 ppair2 = *(uint32_t *)(seg->buf + tcp_port); 2134 if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size) 2135 || (ppair1 != ppair2)) { 2136 continue; 2137 } 2138 if (virtio_net_rsc_drain_seg(chain, seg) == 0) { 2139 chain->stat.drain_failed++; 2140 } 2141 2142 break; 2143 } 2144 2145 return virtio_net_do_receive(nc, buf, size); 2146 } 2147 2148 static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain, 2149 struct ip_header *ip, 2150 const uint8_t *buf, size_t size) 2151 { 2152 uint16_t ip_len; 2153 2154 /* Not an ipv4 packet */ 2155 if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) { 2156 chain->stat.ip_option++; 2157 return RSC_BYPASS; 2158 } 2159 2160 /* Don't handle packets with ip option */ 2161 if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) { 2162 chain->stat.ip_option++; 2163 return RSC_BYPASS; 2164 } 2165 2166 if (ip->ip_p != IPPROTO_TCP) { 2167 chain->stat.bypass_not_tcp++; 2168 return RSC_BYPASS; 2169 } 2170 2171 /* Don't handle packets with ip fragment */ 2172 if (!(htons(ip->ip_off) & IP_DF)) { 2173 chain->stat.ip_frag++; 2174 return RSC_BYPASS; 2175 } 2176 2177 /* Don't handle packets with ecn flag */ 2178 if (IPTOS_ECN(ip->ip_tos)) { 2179 chain->stat.ip_ecn++; 2180 return RSC_BYPASS; 2181 } 2182 2183 ip_len = htons(ip->ip_len); 2184 if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header)) 2185 || ip_len > (size - chain->n->guest_hdr_len - 2186 sizeof(struct eth_header))) { 2187 chain->stat.ip_hacked++; 2188 return RSC_BYPASS; 2189 } 2190 2191 return RSC_CANDIDATE; 2192 } 2193 2194 static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain, 2195 NetClientState *nc, 2196 const uint8_t *buf, size_t size) 2197 { 2198 int32_t ret; 2199 uint16_t hdr_len; 2200 VirtioNetRscUnit unit; 2201 2202 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len; 2203 2204 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header) 2205 + sizeof(struct tcp_header))) { 2206 chain->stat.bypass_not_tcp++; 2207 return virtio_net_do_receive(nc, buf, size); 2208 } 2209 2210 virtio_net_rsc_extract_unit4(chain, buf, &unit); 2211 if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size) 2212 != RSC_CANDIDATE) { 2213 return virtio_net_do_receive(nc, buf, size); 2214 } 2215 2216 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp); 2217 if (ret == RSC_BYPASS) { 2218 return virtio_net_do_receive(nc, buf, size); 2219 } else if (ret == RSC_FINAL) { 2220 return virtio_net_rsc_drain_flow(chain, nc, buf, size, 2221 ((hdr_len + sizeof(struct eth_header)) + 12), 2222 VIRTIO_NET_IP4_ADDR_SIZE, 2223 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)); 2224 } 2225 2226 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit); 2227 } 2228 2229 static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain, 2230 struct ip6_header *ip6, 2231 const uint8_t *buf, size_t size) 2232 { 2233 uint16_t ip_len; 2234 2235 if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4) 2236 != IP_HEADER_VERSION_6) { 2237 return RSC_BYPASS; 2238 } 2239 2240 /* Both option and protocol is checked in this */ 2241 if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) { 2242 chain->stat.bypass_not_tcp++; 2243 return RSC_BYPASS; 2244 } 2245 2246 ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen); 2247 if (ip_len < sizeof(struct tcp_header) || 2248 ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header) 2249 - sizeof(struct ip6_header))) { 2250 chain->stat.ip_hacked++; 2251 return RSC_BYPASS; 2252 } 2253 2254 /* Don't handle packets with ecn flag */ 2255 if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) { 2256 chain->stat.ip_ecn++; 2257 return RSC_BYPASS; 2258 } 2259 2260 return RSC_CANDIDATE; 2261 } 2262 2263 static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc, 2264 const uint8_t *buf, size_t size) 2265 { 2266 int32_t ret; 2267 uint16_t hdr_len; 2268 VirtioNetRscChain *chain; 2269 VirtioNetRscUnit unit; 2270 2271 chain = (VirtioNetRscChain *)opq; 2272 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len; 2273 2274 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header) 2275 + sizeof(tcp_header))) { 2276 return virtio_net_do_receive(nc, buf, size); 2277 } 2278 2279 virtio_net_rsc_extract_unit6(chain, buf, &unit); 2280 if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain, 2281 unit.ip, buf, size)) { 2282 return virtio_net_do_receive(nc, buf, size); 2283 } 2284 2285 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp); 2286 if (ret == RSC_BYPASS) { 2287 return virtio_net_do_receive(nc, buf, size); 2288 } else if (ret == RSC_FINAL) { 2289 return virtio_net_rsc_drain_flow(chain, nc, buf, size, 2290 ((hdr_len + sizeof(struct eth_header)) + 8), 2291 VIRTIO_NET_IP6_ADDR_SIZE, 2292 hdr_len + sizeof(struct eth_header) 2293 + sizeof(struct ip6_header)); 2294 } 2295 2296 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit); 2297 } 2298 2299 static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n, 2300 NetClientState *nc, 2301 uint16_t proto) 2302 { 2303 VirtioNetRscChain *chain; 2304 2305 if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) { 2306 return NULL; 2307 } 2308 2309 QTAILQ_FOREACH(chain, &n->rsc_chains, next) { 2310 if (chain->proto == proto) { 2311 return chain; 2312 } 2313 } 2314 2315 chain = g_malloc(sizeof(*chain)); 2316 chain->n = n; 2317 chain->proto = proto; 2318 if (proto == (uint16_t)ETH_P_IP) { 2319 chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD; 2320 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 2321 } else { 2322 chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD; 2323 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 2324 } 2325 chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST, 2326 virtio_net_rsc_purge, chain); 2327 memset(&chain->stat, 0, sizeof(chain->stat)); 2328 2329 QTAILQ_INIT(&chain->buffers); 2330 QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next); 2331 2332 return chain; 2333 } 2334 2335 static ssize_t virtio_net_rsc_receive(NetClientState *nc, 2336 const uint8_t *buf, 2337 size_t size) 2338 { 2339 uint16_t proto; 2340 VirtioNetRscChain *chain; 2341 struct eth_header *eth; 2342 VirtIONet *n; 2343 2344 n = qemu_get_nic_opaque(nc); 2345 if (size < (n->host_hdr_len + sizeof(struct eth_header))) { 2346 return virtio_net_do_receive(nc, buf, size); 2347 } 2348 2349 eth = (struct eth_header *)(buf + n->guest_hdr_len); 2350 proto = htons(eth->h_proto); 2351 2352 chain = virtio_net_rsc_lookup_chain(n, nc, proto); 2353 if (chain) { 2354 chain->stat.received++; 2355 if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) { 2356 return virtio_net_rsc_receive4(chain, nc, buf, size); 2357 } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) { 2358 return virtio_net_rsc_receive6(chain, nc, buf, size); 2359 } 2360 } 2361 return virtio_net_do_receive(nc, buf, size); 2362 } 2363 2364 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, 2365 size_t size) 2366 { 2367 VirtIONet *n = qemu_get_nic_opaque(nc); 2368 if ((n->rsc4_enabled || n->rsc6_enabled)) { 2369 return virtio_net_rsc_receive(nc, buf, size); 2370 } else { 2371 return virtio_net_do_receive(nc, buf, size); 2372 } 2373 } 2374 2375 static int32_t virtio_net_flush_tx(VirtIONetQueue *q); 2376 2377 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) 2378 { 2379 VirtIONet *n = qemu_get_nic_opaque(nc); 2380 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 2381 VirtIODevice *vdev = VIRTIO_DEVICE(n); 2382 2383 virtqueue_push(q->tx_vq, q->async_tx.elem, 0); 2384 virtio_notify(vdev, q->tx_vq); 2385 2386 g_free(q->async_tx.elem); 2387 q->async_tx.elem = NULL; 2388 2389 virtio_queue_set_notification(q->tx_vq, 1); 2390 virtio_net_flush_tx(q); 2391 } 2392 2393 /* TX */ 2394 static int32_t virtio_net_flush_tx(VirtIONetQueue *q) 2395 { 2396 VirtIONet *n = q->n; 2397 VirtIODevice *vdev = VIRTIO_DEVICE(n); 2398 VirtQueueElement *elem; 2399 int32_t num_packets = 0; 2400 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); 2401 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 2402 return num_packets; 2403 } 2404 2405 if (q->async_tx.elem) { 2406 virtio_queue_set_notification(q->tx_vq, 0); 2407 return num_packets; 2408 } 2409 2410 for (;;) { 2411 ssize_t ret; 2412 unsigned int out_num; 2413 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg; 2414 struct virtio_net_hdr_mrg_rxbuf mhdr; 2415 2416 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement)); 2417 if (!elem) { 2418 break; 2419 } 2420 2421 out_num = elem->out_num; 2422 out_sg = elem->out_sg; 2423 if (out_num < 1) { 2424 virtio_error(vdev, "virtio-net header not in first element"); 2425 virtqueue_detach_element(q->tx_vq, elem, 0); 2426 g_free(elem); 2427 return -EINVAL; 2428 } 2429 2430 if (n->has_vnet_hdr) { 2431 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < 2432 n->guest_hdr_len) { 2433 virtio_error(vdev, "virtio-net header incorrect"); 2434 virtqueue_detach_element(q->tx_vq, elem, 0); 2435 g_free(elem); 2436 return -EINVAL; 2437 } 2438 if (n->needs_vnet_hdr_swap) { 2439 virtio_net_hdr_swap(vdev, (void *) &mhdr); 2440 sg2[0].iov_base = &mhdr; 2441 sg2[0].iov_len = n->guest_hdr_len; 2442 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, 2443 out_sg, out_num, 2444 n->guest_hdr_len, -1); 2445 if (out_num == VIRTQUEUE_MAX_SIZE) { 2446 goto drop; 2447 } 2448 out_num += 1; 2449 out_sg = sg2; 2450 } 2451 } 2452 /* 2453 * If host wants to see the guest header as is, we can 2454 * pass it on unchanged. Otherwise, copy just the parts 2455 * that host is interested in. 2456 */ 2457 assert(n->host_hdr_len <= n->guest_hdr_len); 2458 if (n->host_hdr_len != n->guest_hdr_len) { 2459 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), 2460 out_sg, out_num, 2461 0, n->host_hdr_len); 2462 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, 2463 out_sg, out_num, 2464 n->guest_hdr_len, -1); 2465 out_num = sg_num; 2466 out_sg = sg; 2467 } 2468 2469 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), 2470 out_sg, out_num, virtio_net_tx_complete); 2471 if (ret == 0) { 2472 virtio_queue_set_notification(q->tx_vq, 0); 2473 q->async_tx.elem = elem; 2474 return -EBUSY; 2475 } 2476 2477 drop: 2478 virtqueue_push(q->tx_vq, elem, 0); 2479 virtio_notify(vdev, q->tx_vq); 2480 g_free(elem); 2481 2482 if (++num_packets >= n->tx_burst) { 2483 break; 2484 } 2485 } 2486 return num_packets; 2487 } 2488 2489 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) 2490 { 2491 VirtIONet *n = VIRTIO_NET(vdev); 2492 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 2493 2494 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { 2495 virtio_net_drop_tx_queue_data(vdev, vq); 2496 return; 2497 } 2498 2499 /* This happens when device was stopped but VCPU wasn't. */ 2500 if (!vdev->vm_running) { 2501 q->tx_waiting = 1; 2502 return; 2503 } 2504 2505 if (q->tx_waiting) { 2506 virtio_queue_set_notification(vq, 1); 2507 timer_del(q->tx_timer); 2508 q->tx_waiting = 0; 2509 if (virtio_net_flush_tx(q) == -EINVAL) { 2510 return; 2511 } 2512 } else { 2513 timer_mod(q->tx_timer, 2514 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 2515 q->tx_waiting = 1; 2516 virtio_queue_set_notification(vq, 0); 2517 } 2518 } 2519 2520 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) 2521 { 2522 VirtIONet *n = VIRTIO_NET(vdev); 2523 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 2524 2525 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { 2526 virtio_net_drop_tx_queue_data(vdev, vq); 2527 return; 2528 } 2529 2530 if (unlikely(q->tx_waiting)) { 2531 return; 2532 } 2533 q->tx_waiting = 1; 2534 /* This happens when device was stopped but VCPU wasn't. */ 2535 if (!vdev->vm_running) { 2536 return; 2537 } 2538 virtio_queue_set_notification(vq, 0); 2539 qemu_bh_schedule(q->tx_bh); 2540 } 2541 2542 static void virtio_net_tx_timer(void *opaque) 2543 { 2544 VirtIONetQueue *q = opaque; 2545 VirtIONet *n = q->n; 2546 VirtIODevice *vdev = VIRTIO_DEVICE(n); 2547 /* This happens when device was stopped but BH wasn't. */ 2548 if (!vdev->vm_running) { 2549 /* Make sure tx waiting is set, so we'll run when restarted. */ 2550 assert(q->tx_waiting); 2551 return; 2552 } 2553 2554 q->tx_waiting = 0; 2555 2556 /* Just in case the driver is not ready on more */ 2557 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 2558 return; 2559 } 2560 2561 virtio_queue_set_notification(q->tx_vq, 1); 2562 virtio_net_flush_tx(q); 2563 } 2564 2565 static void virtio_net_tx_bh(void *opaque) 2566 { 2567 VirtIONetQueue *q = opaque; 2568 VirtIONet *n = q->n; 2569 VirtIODevice *vdev = VIRTIO_DEVICE(n); 2570 int32_t ret; 2571 2572 /* This happens when device was stopped but BH wasn't. */ 2573 if (!vdev->vm_running) { 2574 /* Make sure tx waiting is set, so we'll run when restarted. */ 2575 assert(q->tx_waiting); 2576 return; 2577 } 2578 2579 q->tx_waiting = 0; 2580 2581 /* Just in case the driver is not ready on more */ 2582 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { 2583 return; 2584 } 2585 2586 ret = virtio_net_flush_tx(q); 2587 if (ret == -EBUSY || ret == -EINVAL) { 2588 return; /* Notification re-enable handled by tx_complete or device 2589 * broken */ 2590 } 2591 2592 /* If we flush a full burst of packets, assume there are 2593 * more coming and immediately reschedule */ 2594 if (ret >= n->tx_burst) { 2595 qemu_bh_schedule(q->tx_bh); 2596 q->tx_waiting = 1; 2597 return; 2598 } 2599 2600 /* If less than a full burst, re-enable notification and flush 2601 * anything that may have come in while we weren't looking. If 2602 * we find something, assume the guest is still active and reschedule */ 2603 virtio_queue_set_notification(q->tx_vq, 1); 2604 ret = virtio_net_flush_tx(q); 2605 if (ret == -EINVAL) { 2606 return; 2607 } else if (ret > 0) { 2608 virtio_queue_set_notification(q->tx_vq, 0); 2609 qemu_bh_schedule(q->tx_bh); 2610 q->tx_waiting = 1; 2611 } 2612 } 2613 2614 static void virtio_net_add_queue(VirtIONet *n, int index) 2615 { 2616 VirtIODevice *vdev = VIRTIO_DEVICE(n); 2617 2618 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size, 2619 virtio_net_handle_rx); 2620 2621 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { 2622 n->vqs[index].tx_vq = 2623 virtio_add_queue(vdev, n->net_conf.tx_queue_size, 2624 virtio_net_handle_tx_timer); 2625 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 2626 virtio_net_tx_timer, 2627 &n->vqs[index]); 2628 } else { 2629 n->vqs[index].tx_vq = 2630 virtio_add_queue(vdev, n->net_conf.tx_queue_size, 2631 virtio_net_handle_tx_bh); 2632 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); 2633 } 2634 2635 n->vqs[index].tx_waiting = 0; 2636 n->vqs[index].n = n; 2637 } 2638 2639 static void virtio_net_del_queue(VirtIONet *n, int index) 2640 { 2641 VirtIODevice *vdev = VIRTIO_DEVICE(n); 2642 VirtIONetQueue *q = &n->vqs[index]; 2643 NetClientState *nc = qemu_get_subqueue(n->nic, index); 2644 2645 qemu_purge_queued_packets(nc); 2646 2647 virtio_del_queue(vdev, index * 2); 2648 if (q->tx_timer) { 2649 timer_free(q->tx_timer); 2650 q->tx_timer = NULL; 2651 } else { 2652 qemu_bh_delete(q->tx_bh); 2653 q->tx_bh = NULL; 2654 } 2655 q->tx_waiting = 0; 2656 virtio_del_queue(vdev, index * 2 + 1); 2657 } 2658 2659 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) 2660 { 2661 VirtIODevice *vdev = VIRTIO_DEVICE(n); 2662 int old_num_queues = virtio_get_num_queues(vdev); 2663 int new_num_queues = new_max_queues * 2 + 1; 2664 int i; 2665 2666 assert(old_num_queues >= 3); 2667 assert(old_num_queues % 2 == 1); 2668 2669 if (old_num_queues == new_num_queues) { 2670 return; 2671 } 2672 2673 /* 2674 * We always need to remove and add ctrl vq if 2675 * old_num_queues != new_num_queues. Remove ctrl_vq first, 2676 * and then we only enter one of the following two loops. 2677 */ 2678 virtio_del_queue(vdev, old_num_queues - 1); 2679 2680 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) { 2681 /* new_num_queues < old_num_queues */ 2682 virtio_net_del_queue(n, i / 2); 2683 } 2684 2685 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) { 2686 /* new_num_queues > old_num_queues */ 2687 virtio_net_add_queue(n, i / 2); 2688 } 2689 2690 /* add ctrl_vq last */ 2691 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 2692 } 2693 2694 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) 2695 { 2696 int max = multiqueue ? n->max_queues : 1; 2697 2698 n->multiqueue = multiqueue; 2699 virtio_net_change_num_queues(n, max); 2700 2701 virtio_net_set_queues(n); 2702 } 2703 2704 static int virtio_net_post_load_device(void *opaque, int version_id) 2705 { 2706 VirtIONet *n = opaque; 2707 VirtIODevice *vdev = VIRTIO_DEVICE(n); 2708 int i, link_down; 2709 2710 trace_virtio_net_post_load_device(); 2711 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs, 2712 virtio_vdev_has_feature(vdev, 2713 VIRTIO_F_VERSION_1), 2714 virtio_vdev_has_feature(vdev, 2715 VIRTIO_NET_F_HASH_REPORT)); 2716 2717 /* MAC_TABLE_ENTRIES may be different from the saved image */ 2718 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) { 2719 n->mac_table.in_use = 0; 2720 } 2721 2722 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 2723 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); 2724 } 2725 2726 /* 2727 * curr_guest_offloads will be later overwritten by the 2728 * virtio_set_features_nocheck call done from the virtio_load. 2729 * Here we make sure it is preserved and restored accordingly 2730 * in the virtio_net_post_load_virtio callback. 2731 */ 2732 n->saved_guest_offloads = n->curr_guest_offloads; 2733 2734 virtio_net_set_queues(n); 2735 2736 /* Find the first multicast entry in the saved MAC filter */ 2737 for (i = 0; i < n->mac_table.in_use; i++) { 2738 if (n->mac_table.macs[i * ETH_ALEN] & 1) { 2739 break; 2740 } 2741 } 2742 n->mac_table.first_multi = i; 2743 2744 /* nc.link_down can't be migrated, so infer link_down according 2745 * to link status bit in n->status */ 2746 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; 2747 for (i = 0; i < n->max_queues; i++) { 2748 qemu_get_subqueue(n->nic, i)->link_down = link_down; 2749 } 2750 2751 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && 2752 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { 2753 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(), 2754 QEMU_CLOCK_VIRTUAL, 2755 virtio_net_announce_timer, n); 2756 if (n->announce_timer.round) { 2757 timer_mod(n->announce_timer.tm, 2758 qemu_clock_get_ms(n->announce_timer.type)); 2759 } else { 2760 qemu_announce_timer_del(&n->announce_timer, false); 2761 } 2762 } 2763 2764 if (n->rss_data.enabled) { 2765 trace_virtio_net_rss_enable(n->rss_data.hash_types, 2766 n->rss_data.indirections_len, 2767 sizeof(n->rss_data.key)); 2768 } else { 2769 trace_virtio_net_rss_disable(); 2770 } 2771 return 0; 2772 } 2773 2774 static int virtio_net_post_load_virtio(VirtIODevice *vdev) 2775 { 2776 VirtIONet *n = VIRTIO_NET(vdev); 2777 /* 2778 * The actual needed state is now in saved_guest_offloads, 2779 * see virtio_net_post_load_device for detail. 2780 * Restore it back and apply the desired offloads. 2781 */ 2782 n->curr_guest_offloads = n->saved_guest_offloads; 2783 if (peer_has_vnet_hdr(n)) { 2784 virtio_net_apply_guest_offloads(n); 2785 } 2786 2787 return 0; 2788 } 2789 2790 /* tx_waiting field of a VirtIONetQueue */ 2791 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { 2792 .name = "virtio-net-queue-tx_waiting", 2793 .fields = (VMStateField[]) { 2794 VMSTATE_UINT32(tx_waiting, VirtIONetQueue), 2795 VMSTATE_END_OF_LIST() 2796 }, 2797 }; 2798 2799 static bool max_queues_gt_1(void *opaque, int version_id) 2800 { 2801 return VIRTIO_NET(opaque)->max_queues > 1; 2802 } 2803 2804 static bool has_ctrl_guest_offloads(void *opaque, int version_id) 2805 { 2806 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque), 2807 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); 2808 } 2809 2810 static bool mac_table_fits(void *opaque, int version_id) 2811 { 2812 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES; 2813 } 2814 2815 static bool mac_table_doesnt_fit(void *opaque, int version_id) 2816 { 2817 return !mac_table_fits(opaque, version_id); 2818 } 2819 2820 /* This temporary type is shared by all the WITH_TMP methods 2821 * although only some fields are used by each. 2822 */ 2823 struct VirtIONetMigTmp { 2824 VirtIONet *parent; 2825 VirtIONetQueue *vqs_1; 2826 uint16_t curr_queues_1; 2827 uint8_t has_ufo; 2828 uint32_t has_vnet_hdr; 2829 }; 2830 2831 /* The 2nd and subsequent tx_waiting flags are loaded later than 2832 * the 1st entry in the queues and only if there's more than one 2833 * entry. We use the tmp mechanism to calculate a temporary 2834 * pointer and count and also validate the count. 2835 */ 2836 2837 static int virtio_net_tx_waiting_pre_save(void *opaque) 2838 { 2839 struct VirtIONetMigTmp *tmp = opaque; 2840 2841 tmp->vqs_1 = tmp->parent->vqs + 1; 2842 tmp->curr_queues_1 = tmp->parent->curr_queues - 1; 2843 if (tmp->parent->curr_queues == 0) { 2844 tmp->curr_queues_1 = 0; 2845 } 2846 2847 return 0; 2848 } 2849 2850 static int virtio_net_tx_waiting_pre_load(void *opaque) 2851 { 2852 struct VirtIONetMigTmp *tmp = opaque; 2853 2854 /* Reuse the pointer setup from save */ 2855 virtio_net_tx_waiting_pre_save(opaque); 2856 2857 if (tmp->parent->curr_queues > tmp->parent->max_queues) { 2858 error_report("virtio-net: curr_queues %x > max_queues %x", 2859 tmp->parent->curr_queues, tmp->parent->max_queues); 2860 2861 return -EINVAL; 2862 } 2863 2864 return 0; /* all good */ 2865 } 2866 2867 static const VMStateDescription vmstate_virtio_net_tx_waiting = { 2868 .name = "virtio-net-tx_waiting", 2869 .pre_load = virtio_net_tx_waiting_pre_load, 2870 .pre_save = virtio_net_tx_waiting_pre_save, 2871 .fields = (VMStateField[]) { 2872 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, 2873 curr_queues_1, 2874 vmstate_virtio_net_queue_tx_waiting, 2875 struct VirtIONetQueue), 2876 VMSTATE_END_OF_LIST() 2877 }, 2878 }; 2879 2880 /* the 'has_ufo' flag is just tested; if the incoming stream has the 2881 * flag set we need to check that we have it 2882 */ 2883 static int virtio_net_ufo_post_load(void *opaque, int version_id) 2884 { 2885 struct VirtIONetMigTmp *tmp = opaque; 2886 2887 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) { 2888 error_report("virtio-net: saved image requires TUN_F_UFO support"); 2889 return -EINVAL; 2890 } 2891 2892 return 0; 2893 } 2894 2895 static int virtio_net_ufo_pre_save(void *opaque) 2896 { 2897 struct VirtIONetMigTmp *tmp = opaque; 2898 2899 tmp->has_ufo = tmp->parent->has_ufo; 2900 2901 return 0; 2902 } 2903 2904 static const VMStateDescription vmstate_virtio_net_has_ufo = { 2905 .name = "virtio-net-ufo", 2906 .post_load = virtio_net_ufo_post_load, 2907 .pre_save = virtio_net_ufo_pre_save, 2908 .fields = (VMStateField[]) { 2909 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp), 2910 VMSTATE_END_OF_LIST() 2911 }, 2912 }; 2913 2914 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the 2915 * flag set we need to check that we have it 2916 */ 2917 static int virtio_net_vnet_post_load(void *opaque, int version_id) 2918 { 2919 struct VirtIONetMigTmp *tmp = opaque; 2920 2921 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) { 2922 error_report("virtio-net: saved image requires vnet_hdr=on"); 2923 return -EINVAL; 2924 } 2925 2926 return 0; 2927 } 2928 2929 static int virtio_net_vnet_pre_save(void *opaque) 2930 { 2931 struct VirtIONetMigTmp *tmp = opaque; 2932 2933 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr; 2934 2935 return 0; 2936 } 2937 2938 static const VMStateDescription vmstate_virtio_net_has_vnet = { 2939 .name = "virtio-net-vnet", 2940 .post_load = virtio_net_vnet_post_load, 2941 .pre_save = virtio_net_vnet_pre_save, 2942 .fields = (VMStateField[]) { 2943 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp), 2944 VMSTATE_END_OF_LIST() 2945 }, 2946 }; 2947 2948 static bool virtio_net_rss_needed(void *opaque) 2949 { 2950 return VIRTIO_NET(opaque)->rss_data.enabled; 2951 } 2952 2953 static const VMStateDescription vmstate_virtio_net_rss = { 2954 .name = "virtio-net-device/rss", 2955 .version_id = 1, 2956 .minimum_version_id = 1, 2957 .needed = virtio_net_rss_needed, 2958 .fields = (VMStateField[]) { 2959 VMSTATE_BOOL(rss_data.enabled, VirtIONet), 2960 VMSTATE_BOOL(rss_data.redirect, VirtIONet), 2961 VMSTATE_BOOL(rss_data.populate_hash, VirtIONet), 2962 VMSTATE_UINT32(rss_data.hash_types, VirtIONet), 2963 VMSTATE_UINT16(rss_data.indirections_len, VirtIONet), 2964 VMSTATE_UINT16(rss_data.default_queue, VirtIONet), 2965 VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet, 2966 VIRTIO_NET_RSS_MAX_KEY_SIZE), 2967 VMSTATE_VARRAY_UINT16_ALLOC(rss_data.indirections_table, VirtIONet, 2968 rss_data.indirections_len, 0, 2969 vmstate_info_uint16, uint16_t), 2970 VMSTATE_END_OF_LIST() 2971 }, 2972 }; 2973 2974 static const VMStateDescription vmstate_virtio_net_device = { 2975 .name = "virtio-net-device", 2976 .version_id = VIRTIO_NET_VM_VERSION, 2977 .minimum_version_id = VIRTIO_NET_VM_VERSION, 2978 .post_load = virtio_net_post_load_device, 2979 .fields = (VMStateField[]) { 2980 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN), 2981 VMSTATE_STRUCT_POINTER(vqs, VirtIONet, 2982 vmstate_virtio_net_queue_tx_waiting, 2983 VirtIONetQueue), 2984 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet), 2985 VMSTATE_UINT16(status, VirtIONet), 2986 VMSTATE_UINT8(promisc, VirtIONet), 2987 VMSTATE_UINT8(allmulti, VirtIONet), 2988 VMSTATE_UINT32(mac_table.in_use, VirtIONet), 2989 2990 /* Guarded pair: If it fits we load it, else we throw it away 2991 * - can happen if source has a larger MAC table.; post-load 2992 * sets flags in this case. 2993 */ 2994 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet, 2995 0, mac_table_fits, mac_table.in_use, 2996 ETH_ALEN), 2997 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0, 2998 mac_table.in_use, ETH_ALEN), 2999 3000 /* Note: This is an array of uint32's that's always been saved as a 3001 * buffer; hold onto your endiannesses; it's actually used as a bitmap 3002 * but based on the uint. 3003 */ 3004 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3), 3005 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, 3006 vmstate_virtio_net_has_vnet), 3007 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet), 3008 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet), 3009 VMSTATE_UINT8(alluni, VirtIONet), 3010 VMSTATE_UINT8(nomulti, VirtIONet), 3011 VMSTATE_UINT8(nouni, VirtIONet), 3012 VMSTATE_UINT8(nobcast, VirtIONet), 3013 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, 3014 vmstate_virtio_net_has_ufo), 3015 VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0, 3016 vmstate_info_uint16_equal, uint16_t), 3017 VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1), 3018 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, 3019 vmstate_virtio_net_tx_waiting), 3020 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, 3021 has_ctrl_guest_offloads), 3022 VMSTATE_END_OF_LIST() 3023 }, 3024 .subsections = (const VMStateDescription * []) { 3025 &vmstate_virtio_net_rss, 3026 NULL 3027 } 3028 }; 3029 3030 static NetClientInfo net_virtio_info = { 3031 .type = NET_CLIENT_DRIVER_NIC, 3032 .size = sizeof(NICState), 3033 .can_receive = virtio_net_can_receive, 3034 .receive = virtio_net_receive, 3035 .link_status_changed = virtio_net_set_link_status, 3036 .query_rx_filter = virtio_net_query_rxfilter, 3037 .announce = virtio_net_announce, 3038 }; 3039 3040 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) 3041 { 3042 VirtIONet *n = VIRTIO_NET(vdev); 3043 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 3044 assert(n->vhost_started); 3045 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); 3046 } 3047 3048 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, 3049 bool mask) 3050 { 3051 VirtIONet *n = VIRTIO_NET(vdev); 3052 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 3053 assert(n->vhost_started); 3054 vhost_net_virtqueue_mask(get_vhost_net(nc->peer), 3055 vdev, idx, mask); 3056 } 3057 3058 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) 3059 { 3060 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); 3061 3062 n->config_size = virtio_feature_get_config_size(feature_sizes, 3063 host_features); 3064 } 3065 3066 void virtio_net_set_netclient_name(VirtIONet *n, const char *name, 3067 const char *type) 3068 { 3069 /* 3070 * The name can be NULL, the netclient name will be type.x. 3071 */ 3072 assert(type != NULL); 3073 3074 g_free(n->netclient_name); 3075 g_free(n->netclient_type); 3076 n->netclient_name = g_strdup(name); 3077 n->netclient_type = g_strdup(type); 3078 } 3079 3080 static bool failover_unplug_primary(VirtIONet *n, DeviceState *dev) 3081 { 3082 HotplugHandler *hotplug_ctrl; 3083 PCIDevice *pci_dev; 3084 Error *err = NULL; 3085 3086 hotplug_ctrl = qdev_get_hotplug_handler(dev); 3087 if (hotplug_ctrl) { 3088 pci_dev = PCI_DEVICE(dev); 3089 pci_dev->partially_hotplugged = true; 3090 hotplug_handler_unplug_request(hotplug_ctrl, dev, &err); 3091 if (err) { 3092 error_report_err(err); 3093 return false; 3094 } 3095 } else { 3096 return false; 3097 } 3098 return true; 3099 } 3100 3101 static bool failover_replug_primary(VirtIONet *n, DeviceState *dev, 3102 Error **errp) 3103 { 3104 Error *err = NULL; 3105 HotplugHandler *hotplug_ctrl; 3106 PCIDevice *pdev = PCI_DEVICE(dev); 3107 BusState *primary_bus; 3108 3109 if (!pdev->partially_hotplugged) { 3110 return true; 3111 } 3112 primary_bus = dev->parent_bus; 3113 if (!primary_bus) { 3114 error_setg(errp, "virtio_net: couldn't find primary bus"); 3115 return false; 3116 } 3117 qdev_set_parent_bus(dev, primary_bus, &error_abort); 3118 qatomic_set(&n->failover_primary_hidden, false); 3119 hotplug_ctrl = qdev_get_hotplug_handler(dev); 3120 if (hotplug_ctrl) { 3121 hotplug_handler_pre_plug(hotplug_ctrl, dev, &err); 3122 if (err) { 3123 goto out; 3124 } 3125 hotplug_handler_plug(hotplug_ctrl, dev, &err); 3126 } 3127 3128 out: 3129 error_propagate(errp, err); 3130 return !err; 3131 } 3132 3133 static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s) 3134 { 3135 bool should_be_hidden; 3136 Error *err = NULL; 3137 DeviceState *dev = failover_find_primary_device(n); 3138 3139 if (!dev) { 3140 return; 3141 } 3142 3143 should_be_hidden = qatomic_read(&n->failover_primary_hidden); 3144 3145 if (migration_in_setup(s) && !should_be_hidden) { 3146 if (failover_unplug_primary(n, dev)) { 3147 vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev); 3148 qapi_event_send_unplug_primary(dev->id); 3149 qatomic_set(&n->failover_primary_hidden, true); 3150 } else { 3151 warn_report("couldn't unplug primary device"); 3152 } 3153 } else if (migration_has_failed(s)) { 3154 /* We already unplugged the device let's plug it back */ 3155 if (!failover_replug_primary(n, dev, &err)) { 3156 if (err) { 3157 error_report_err(err); 3158 } 3159 } 3160 } 3161 } 3162 3163 static void virtio_net_migration_state_notifier(Notifier *notifier, void *data) 3164 { 3165 MigrationState *s = data; 3166 VirtIONet *n = container_of(notifier, VirtIONet, migration_state); 3167 virtio_net_handle_migration_primary(n, s); 3168 } 3169 3170 static bool failover_hide_primary_device(DeviceListener *listener, 3171 QemuOpts *device_opts) 3172 { 3173 VirtIONet *n = container_of(listener, VirtIONet, primary_listener); 3174 const char *standby_id; 3175 3176 if (!device_opts) { 3177 return false; 3178 } 3179 standby_id = qemu_opt_get(device_opts, "failover_pair_id"); 3180 if (g_strcmp0(standby_id, n->netclient_name) != 0) { 3181 return false; 3182 } 3183 3184 /* failover_primary_hidden is set during feature negotiation */ 3185 return qatomic_read(&n->failover_primary_hidden); 3186 } 3187 3188 static void virtio_net_device_realize(DeviceState *dev, Error **errp) 3189 { 3190 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 3191 VirtIONet *n = VIRTIO_NET(dev); 3192 NetClientState *nc; 3193 int i; 3194 3195 if (n->net_conf.mtu) { 3196 n->host_features |= (1ULL << VIRTIO_NET_F_MTU); 3197 } 3198 3199 if (n->net_conf.duplex_str) { 3200 if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) { 3201 n->net_conf.duplex = DUPLEX_HALF; 3202 } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) { 3203 n->net_conf.duplex = DUPLEX_FULL; 3204 } else { 3205 error_setg(errp, "'duplex' must be 'half' or 'full'"); 3206 return; 3207 } 3208 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX); 3209 } else { 3210 n->net_conf.duplex = DUPLEX_UNKNOWN; 3211 } 3212 3213 if (n->net_conf.speed < SPEED_UNKNOWN) { 3214 error_setg(errp, "'speed' must be between 0 and INT_MAX"); 3215 return; 3216 } 3217 if (n->net_conf.speed >= 0) { 3218 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX); 3219 } 3220 3221 if (n->failover) { 3222 n->primary_listener.hide_device = failover_hide_primary_device; 3223 qatomic_set(&n->failover_primary_hidden, true); 3224 device_listener_register(&n->primary_listener); 3225 n->migration_state.notify = virtio_net_migration_state_notifier; 3226 add_migration_state_change_notifier(&n->migration_state); 3227 n->host_features |= (1ULL << VIRTIO_NET_F_STANDBY); 3228 } 3229 3230 virtio_net_set_config_size(n, n->host_features); 3231 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); 3232 3233 /* 3234 * We set a lower limit on RX queue size to what it always was. 3235 * Guests that want a smaller ring can always resize it without 3236 * help from us (using virtio 1 and up). 3237 */ 3238 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE || 3239 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE || 3240 !is_power_of_2(n->net_conf.rx_queue_size)) { 3241 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), " 3242 "must be a power of 2 between %d and %d.", 3243 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE, 3244 VIRTQUEUE_MAX_SIZE); 3245 virtio_cleanup(vdev); 3246 return; 3247 } 3248 3249 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE || 3250 n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE || 3251 !is_power_of_2(n->net_conf.tx_queue_size)) { 3252 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), " 3253 "must be a power of 2 between %d and %d", 3254 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE, 3255 VIRTQUEUE_MAX_SIZE); 3256 virtio_cleanup(vdev); 3257 return; 3258 } 3259 3260 n->max_queues = MAX(n->nic_conf.peers.queues, 1); 3261 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) { 3262 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " 3263 "must be a positive integer less than %d.", 3264 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2); 3265 virtio_cleanup(vdev); 3266 return; 3267 } 3268 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); 3269 n->curr_queues = 1; 3270 n->tx_timeout = n->net_conf.txtimer; 3271 3272 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") 3273 && strcmp(n->net_conf.tx, "bh")) { 3274 warn_report("virtio-net: " 3275 "Unknown option tx=%s, valid options: \"timer\" \"bh\"", 3276 n->net_conf.tx); 3277 error_printf("Defaulting to \"bh\""); 3278 } 3279 3280 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n), 3281 n->net_conf.tx_queue_size); 3282 3283 for (i = 0; i < n->max_queues; i++) { 3284 virtio_net_add_queue(n, i); 3285 } 3286 3287 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 3288 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); 3289 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); 3290 n->status = VIRTIO_NET_S_LINK_UP; 3291 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(), 3292 QEMU_CLOCK_VIRTUAL, 3293 virtio_net_announce_timer, n); 3294 n->announce_timer.round = 0; 3295 3296 if (n->netclient_type) { 3297 /* 3298 * Happen when virtio_net_set_netclient_name has been called. 3299 */ 3300 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 3301 n->netclient_type, n->netclient_name, n); 3302 } else { 3303 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 3304 object_get_typename(OBJECT(dev)), dev->id, n); 3305 } 3306 3307 peer_test_vnet_hdr(n); 3308 if (peer_has_vnet_hdr(n)) { 3309 for (i = 0; i < n->max_queues; i++) { 3310 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); 3311 } 3312 n->host_hdr_len = sizeof(struct virtio_net_hdr); 3313 } else { 3314 n->host_hdr_len = 0; 3315 } 3316 3317 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); 3318 3319 n->vqs[0].tx_waiting = 0; 3320 n->tx_burst = n->net_conf.txburst; 3321 virtio_net_set_mrg_rx_bufs(n, 0, 0, 0); 3322 n->promisc = 1; /* for compatibility */ 3323 3324 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 3325 3326 n->vlans = g_malloc0(MAX_VLAN >> 3); 3327 3328 nc = qemu_get_queue(n->nic); 3329 nc->rxfilter_notify_enabled = 1; 3330 3331 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { 3332 struct virtio_net_config netcfg = {}; 3333 memcpy(&netcfg.mac, &n->nic_conf.macaddr, ETH_ALEN); 3334 vhost_net_set_config(get_vhost_net(nc->peer), 3335 (uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_MASTER); 3336 } 3337 QTAILQ_INIT(&n->rsc_chains); 3338 n->qdev = dev; 3339 3340 net_rx_pkt_init(&n->rx_pkt, false); 3341 } 3342 3343 static void virtio_net_device_unrealize(DeviceState *dev) 3344 { 3345 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 3346 VirtIONet *n = VIRTIO_NET(dev); 3347 int i, max_queues; 3348 3349 /* This will stop vhost backend if appropriate. */ 3350 virtio_net_set_status(vdev, 0); 3351 3352 g_free(n->netclient_name); 3353 n->netclient_name = NULL; 3354 g_free(n->netclient_type); 3355 n->netclient_type = NULL; 3356 3357 g_free(n->mac_table.macs); 3358 g_free(n->vlans); 3359 3360 if (n->failover) { 3361 device_listener_unregister(&n->primary_listener); 3362 } 3363 3364 max_queues = n->multiqueue ? n->max_queues : 1; 3365 for (i = 0; i < max_queues; i++) { 3366 virtio_net_del_queue(n, i); 3367 } 3368 /* delete also control vq */ 3369 virtio_del_queue(vdev, max_queues * 2); 3370 qemu_announce_timer_del(&n->announce_timer, false); 3371 g_free(n->vqs); 3372 qemu_del_nic(n->nic); 3373 virtio_net_rsc_cleanup(n); 3374 g_free(n->rss_data.indirections_table); 3375 net_rx_pkt_uninit(n->rx_pkt); 3376 virtio_cleanup(vdev); 3377 } 3378 3379 static void virtio_net_instance_init(Object *obj) 3380 { 3381 VirtIONet *n = VIRTIO_NET(obj); 3382 3383 /* 3384 * The default config_size is sizeof(struct virtio_net_config). 3385 * Can be overriden with virtio_net_set_config_size. 3386 */ 3387 n->config_size = sizeof(struct virtio_net_config); 3388 device_add_bootindex_property(obj, &n->nic_conf.bootindex, 3389 "bootindex", "/ethernet-phy@0", 3390 DEVICE(n)); 3391 } 3392 3393 static int virtio_net_pre_save(void *opaque) 3394 { 3395 VirtIONet *n = opaque; 3396 3397 /* At this point, backend must be stopped, otherwise 3398 * it might keep writing to memory. */ 3399 assert(!n->vhost_started); 3400 3401 return 0; 3402 } 3403 3404 static bool primary_unplug_pending(void *opaque) 3405 { 3406 DeviceState *dev = opaque; 3407 DeviceState *primary; 3408 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 3409 VirtIONet *n = VIRTIO_NET(vdev); 3410 3411 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { 3412 return false; 3413 } 3414 primary = failover_find_primary_device(n); 3415 return primary ? primary->pending_deleted_event : false; 3416 } 3417 3418 static bool dev_unplug_pending(void *opaque) 3419 { 3420 DeviceState *dev = opaque; 3421 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 3422 3423 return vdc->primary_unplug_pending(dev); 3424 } 3425 3426 static const VMStateDescription vmstate_virtio_net = { 3427 .name = "virtio-net", 3428 .minimum_version_id = VIRTIO_NET_VM_VERSION, 3429 .version_id = VIRTIO_NET_VM_VERSION, 3430 .fields = (VMStateField[]) { 3431 VMSTATE_VIRTIO_DEVICE, 3432 VMSTATE_END_OF_LIST() 3433 }, 3434 .pre_save = virtio_net_pre_save, 3435 .dev_unplug_pending = dev_unplug_pending, 3436 }; 3437 3438 static Property virtio_net_properties[] = { 3439 DEFINE_PROP_BIT64("csum", VirtIONet, host_features, 3440 VIRTIO_NET_F_CSUM, true), 3441 DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features, 3442 VIRTIO_NET_F_GUEST_CSUM, true), 3443 DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true), 3444 DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features, 3445 VIRTIO_NET_F_GUEST_TSO4, true), 3446 DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features, 3447 VIRTIO_NET_F_GUEST_TSO6, true), 3448 DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features, 3449 VIRTIO_NET_F_GUEST_ECN, true), 3450 DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features, 3451 VIRTIO_NET_F_GUEST_UFO, true), 3452 DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features, 3453 VIRTIO_NET_F_GUEST_ANNOUNCE, true), 3454 DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features, 3455 VIRTIO_NET_F_HOST_TSO4, true), 3456 DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features, 3457 VIRTIO_NET_F_HOST_TSO6, true), 3458 DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features, 3459 VIRTIO_NET_F_HOST_ECN, true), 3460 DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features, 3461 VIRTIO_NET_F_HOST_UFO, true), 3462 DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features, 3463 VIRTIO_NET_F_MRG_RXBUF, true), 3464 DEFINE_PROP_BIT64("status", VirtIONet, host_features, 3465 VIRTIO_NET_F_STATUS, true), 3466 DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features, 3467 VIRTIO_NET_F_CTRL_VQ, true), 3468 DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features, 3469 VIRTIO_NET_F_CTRL_RX, true), 3470 DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features, 3471 VIRTIO_NET_F_CTRL_VLAN, true), 3472 DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features, 3473 VIRTIO_NET_F_CTRL_RX_EXTRA, true), 3474 DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features, 3475 VIRTIO_NET_F_CTRL_MAC_ADDR, true), 3476 DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features, 3477 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true), 3478 DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false), 3479 DEFINE_PROP_BIT64("rss", VirtIONet, host_features, 3480 VIRTIO_NET_F_RSS, false), 3481 DEFINE_PROP_BIT64("hash", VirtIONet, host_features, 3482 VIRTIO_NET_F_HASH_REPORT, false), 3483 DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features, 3484 VIRTIO_NET_F_RSC_EXT, false), 3485 DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout, 3486 VIRTIO_NET_RSC_DEFAULT_INTERVAL), 3487 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), 3488 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, 3489 TX_TIMER_INTERVAL), 3490 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), 3491 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), 3492 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size, 3493 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE), 3494 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size, 3495 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE), 3496 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0), 3497 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend, 3498 true), 3499 DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN), 3500 DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str), 3501 DEFINE_PROP_BOOL("failover", VirtIONet, failover, false), 3502 DEFINE_PROP_END_OF_LIST(), 3503 }; 3504 3505 static void virtio_net_class_init(ObjectClass *klass, void *data) 3506 { 3507 DeviceClass *dc = DEVICE_CLASS(klass); 3508 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 3509 3510 device_class_set_props(dc, virtio_net_properties); 3511 dc->vmsd = &vmstate_virtio_net; 3512 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 3513 vdc->realize = virtio_net_device_realize; 3514 vdc->unrealize = virtio_net_device_unrealize; 3515 vdc->get_config = virtio_net_get_config; 3516 vdc->set_config = virtio_net_set_config; 3517 vdc->get_features = virtio_net_get_features; 3518 vdc->set_features = virtio_net_set_features; 3519 vdc->bad_features = virtio_net_bad_features; 3520 vdc->reset = virtio_net_reset; 3521 vdc->set_status = virtio_net_set_status; 3522 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; 3523 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; 3524 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO); 3525 vdc->post_load = virtio_net_post_load_virtio; 3526 vdc->vmsd = &vmstate_virtio_net_device; 3527 vdc->primary_unplug_pending = primary_unplug_pending; 3528 } 3529 3530 static const TypeInfo virtio_net_info = { 3531 .name = TYPE_VIRTIO_NET, 3532 .parent = TYPE_VIRTIO_DEVICE, 3533 .instance_size = sizeof(VirtIONet), 3534 .instance_init = virtio_net_instance_init, 3535 .class_init = virtio_net_class_init, 3536 }; 3537 3538 static void virtio_register_types(void) 3539 { 3540 type_register_static(&virtio_net_info); 3541 } 3542 3543 type_init(virtio_register_types) 3544