1 /* 2 * Virtio Network Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/iov.h" 16 #include "hw/virtio/virtio.h" 17 #include "net/net.h" 18 #include "net/checksum.h" 19 #include "net/tap.h" 20 #include "qemu/error-report.h" 21 #include "qemu/timer.h" 22 #include "hw/virtio/virtio-net.h" 23 #include "net/vhost_net.h" 24 #include "hw/virtio/virtio-bus.h" 25 #include "qapi/qmp/qjson.h" 26 #include "qapi-event.h" 27 #include "hw/virtio/virtio-access.h" 28 29 #define VIRTIO_NET_VM_VERSION 11 30 31 #define MAC_TABLE_ENTRIES 64 32 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ 33 34 /* previously fixed value */ 35 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256 36 /* for now, only allow larger queues; with virtio-1, guest can downsize */ 37 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 38 39 /* 40 * Calculate the number of bytes up to and including the given 'field' of 41 * 'container'. 42 */ 43 #define endof(container, field) \ 44 (offsetof(container, field) + sizeof(((container *)0)->field)) 45 46 typedef struct VirtIOFeature { 47 uint32_t flags; 48 size_t end; 49 } VirtIOFeature; 50 51 static VirtIOFeature feature_sizes[] = { 52 {.flags = 1 << VIRTIO_NET_F_MAC, 53 .end = endof(struct virtio_net_config, mac)}, 54 {.flags = 1 << VIRTIO_NET_F_STATUS, 55 .end = endof(struct virtio_net_config, status)}, 56 {.flags = 1 << VIRTIO_NET_F_MQ, 57 .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, 58 {.flags = 1 << VIRTIO_NET_F_MTU, 59 .end = endof(struct virtio_net_config, mtu)}, 60 {} 61 }; 62 63 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) 64 { 65 VirtIONet *n = qemu_get_nic_opaque(nc); 66 67 return &n->vqs[nc->queue_index]; 68 } 69 70 static int vq2q(int queue_index) 71 { 72 return queue_index / 2; 73 } 74 75 /* TODO 76 * - we could suppress RX interrupt if we were so inclined. 77 */ 78 79 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) 80 { 81 VirtIONet *n = VIRTIO_NET(vdev); 82 struct virtio_net_config netcfg; 83 84 virtio_stw_p(vdev, &netcfg.status, n->status); 85 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); 86 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu); 87 memcpy(netcfg.mac, n->mac, ETH_ALEN); 88 memcpy(config, &netcfg, n->config_size); 89 } 90 91 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) 92 { 93 VirtIONet *n = VIRTIO_NET(vdev); 94 struct virtio_net_config netcfg = {}; 95 96 memcpy(&netcfg, config, n->config_size); 97 98 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) && 99 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) && 100 memcmp(netcfg.mac, n->mac, ETH_ALEN)) { 101 memcpy(n->mac, netcfg.mac, ETH_ALEN); 102 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 103 } 104 } 105 106 static bool virtio_net_started(VirtIONet *n, uint8_t status) 107 { 108 VirtIODevice *vdev = VIRTIO_DEVICE(n); 109 return (status & VIRTIO_CONFIG_S_DRIVER_OK) && 110 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; 111 } 112 113 static void virtio_net_announce_timer(void *opaque) 114 { 115 VirtIONet *n = opaque; 116 VirtIODevice *vdev = VIRTIO_DEVICE(n); 117 118 n->announce_counter--; 119 n->status |= VIRTIO_NET_S_ANNOUNCE; 120 virtio_notify_config(vdev); 121 } 122 123 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) 124 { 125 VirtIODevice *vdev = VIRTIO_DEVICE(n); 126 NetClientState *nc = qemu_get_queue(n->nic); 127 int queues = n->multiqueue ? n->max_queues : 1; 128 129 if (!get_vhost_net(nc->peer)) { 130 return; 131 } 132 133 if ((virtio_net_started(n, status) && !nc->peer->link_down) == 134 !!n->vhost_started) { 135 return; 136 } 137 if (!n->vhost_started) { 138 int r, i; 139 140 if (n->needs_vnet_hdr_swap) { 141 error_report("backend does not support %s vnet headers; " 142 "falling back on userspace virtio", 143 virtio_is_big_endian(vdev) ? "BE" : "LE"); 144 return; 145 } 146 147 /* Any packets outstanding? Purge them to avoid touching rings 148 * when vhost is running. 149 */ 150 for (i = 0; i < queues; i++) { 151 NetClientState *qnc = qemu_get_subqueue(n->nic, i); 152 153 /* Purge both directions: TX and RX. */ 154 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc); 155 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer); 156 } 157 158 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) { 159 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu); 160 if (r < 0) { 161 error_report("%uBytes MTU not supported by the backend", 162 n->net_conf.mtu); 163 164 return; 165 } 166 } 167 168 n->vhost_started = 1; 169 r = vhost_net_start(vdev, n->nic->ncs, queues); 170 if (r < 0) { 171 error_report("unable to start vhost net: %d: " 172 "falling back on userspace virtio", -r); 173 n->vhost_started = 0; 174 } 175 } else { 176 vhost_net_stop(vdev, n->nic->ncs, queues); 177 n->vhost_started = 0; 178 } 179 } 180 181 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev, 182 NetClientState *peer, 183 bool enable) 184 { 185 if (virtio_is_big_endian(vdev)) { 186 return qemu_set_vnet_be(peer, enable); 187 } else { 188 return qemu_set_vnet_le(peer, enable); 189 } 190 } 191 192 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, 193 int queues, bool enable) 194 { 195 int i; 196 197 for (i = 0; i < queues; i++) { 198 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 && 199 enable) { 200 while (--i >= 0) { 201 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false); 202 } 203 204 return true; 205 } 206 } 207 208 return false; 209 } 210 211 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) 212 { 213 VirtIODevice *vdev = VIRTIO_DEVICE(n); 214 int queues = n->multiqueue ? n->max_queues : 1; 215 216 if (virtio_net_started(n, status)) { 217 /* Before using the device, we tell the network backend about the 218 * endianness to use when parsing vnet headers. If the backend 219 * can't do it, we fallback onto fixing the headers in the core 220 * virtio-net code. 221 */ 222 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs, 223 queues, true); 224 } else if (virtio_net_started(n, vdev->status)) { 225 /* After using the device, we need to reset the network backend to 226 * the default (guest native endianness), otherwise the guest may 227 * lose network connectivity if it is rebooted into a different 228 * endianness. 229 */ 230 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false); 231 } 232 } 233 234 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq) 235 { 236 unsigned int dropped = virtqueue_drop_all(vq); 237 if (dropped) { 238 virtio_notify(vdev, vq); 239 } 240 } 241 242 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) 243 { 244 VirtIONet *n = VIRTIO_NET(vdev); 245 VirtIONetQueue *q; 246 int i; 247 uint8_t queue_status; 248 249 virtio_net_vnet_endian_status(n, status); 250 virtio_net_vhost_status(n, status); 251 252 for (i = 0; i < n->max_queues; i++) { 253 NetClientState *ncs = qemu_get_subqueue(n->nic, i); 254 bool queue_started; 255 q = &n->vqs[i]; 256 257 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { 258 queue_status = 0; 259 } else { 260 queue_status = status; 261 } 262 queue_started = 263 virtio_net_started(n, queue_status) && !n->vhost_started; 264 265 if (queue_started) { 266 qemu_flush_queued_packets(ncs); 267 } 268 269 if (!q->tx_waiting) { 270 continue; 271 } 272 273 if (queue_started) { 274 if (q->tx_timer) { 275 timer_mod(q->tx_timer, 276 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 277 } else { 278 qemu_bh_schedule(q->tx_bh); 279 } 280 } else { 281 if (q->tx_timer) { 282 timer_del(q->tx_timer); 283 } else { 284 qemu_bh_cancel(q->tx_bh); 285 } 286 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 && 287 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK)) { 288 /* if tx is waiting we are likely have some packets in tx queue 289 * and disabled notification */ 290 q->tx_waiting = 0; 291 virtio_queue_set_notification(q->tx_vq, 1); 292 virtio_net_drop_tx_queue_data(vdev, q->tx_vq); 293 } 294 } 295 } 296 } 297 298 static void virtio_net_set_link_status(NetClientState *nc) 299 { 300 VirtIONet *n = qemu_get_nic_opaque(nc); 301 VirtIODevice *vdev = VIRTIO_DEVICE(n); 302 uint16_t old_status = n->status; 303 304 if (nc->link_down) 305 n->status &= ~VIRTIO_NET_S_LINK_UP; 306 else 307 n->status |= VIRTIO_NET_S_LINK_UP; 308 309 if (n->status != old_status) 310 virtio_notify_config(vdev); 311 312 virtio_net_set_status(vdev, vdev->status); 313 } 314 315 static void rxfilter_notify(NetClientState *nc) 316 { 317 VirtIONet *n = qemu_get_nic_opaque(nc); 318 319 if (nc->rxfilter_notify_enabled) { 320 gchar *path = object_get_canonical_path(OBJECT(n->qdev)); 321 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name, 322 n->netclient_name, path, &error_abort); 323 g_free(path); 324 325 /* disable event notification to avoid events flooding */ 326 nc->rxfilter_notify_enabled = 0; 327 } 328 } 329 330 static intList *get_vlan_table(VirtIONet *n) 331 { 332 intList *list, *entry; 333 int i, j; 334 335 list = NULL; 336 for (i = 0; i < MAX_VLAN >> 5; i++) { 337 for (j = 0; n->vlans[i] && j <= 0x1f; j++) { 338 if (n->vlans[i] & (1U << j)) { 339 entry = g_malloc0(sizeof(*entry)); 340 entry->value = (i << 5) + j; 341 entry->next = list; 342 list = entry; 343 } 344 } 345 } 346 347 return list; 348 } 349 350 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) 351 { 352 VirtIONet *n = qemu_get_nic_opaque(nc); 353 VirtIODevice *vdev = VIRTIO_DEVICE(n); 354 RxFilterInfo *info; 355 strList *str_list, *entry; 356 int i; 357 358 info = g_malloc0(sizeof(*info)); 359 info->name = g_strdup(nc->name); 360 info->promiscuous = n->promisc; 361 362 if (n->nouni) { 363 info->unicast = RX_STATE_NONE; 364 } else if (n->alluni) { 365 info->unicast = RX_STATE_ALL; 366 } else { 367 info->unicast = RX_STATE_NORMAL; 368 } 369 370 if (n->nomulti) { 371 info->multicast = RX_STATE_NONE; 372 } else if (n->allmulti) { 373 info->multicast = RX_STATE_ALL; 374 } else { 375 info->multicast = RX_STATE_NORMAL; 376 } 377 378 info->broadcast_allowed = n->nobcast; 379 info->multicast_overflow = n->mac_table.multi_overflow; 380 info->unicast_overflow = n->mac_table.uni_overflow; 381 382 info->main_mac = qemu_mac_strdup_printf(n->mac); 383 384 str_list = NULL; 385 for (i = 0; i < n->mac_table.first_multi; i++) { 386 entry = g_malloc0(sizeof(*entry)); 387 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 388 entry->next = str_list; 389 str_list = entry; 390 } 391 info->unicast_table = str_list; 392 393 str_list = NULL; 394 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 395 entry = g_malloc0(sizeof(*entry)); 396 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 397 entry->next = str_list; 398 str_list = entry; 399 } 400 info->multicast_table = str_list; 401 info->vlan_table = get_vlan_table(n); 402 403 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) { 404 info->vlan = RX_STATE_ALL; 405 } else if (!info->vlan_table) { 406 info->vlan = RX_STATE_NONE; 407 } else { 408 info->vlan = RX_STATE_NORMAL; 409 } 410 411 /* enable event notification after query */ 412 nc->rxfilter_notify_enabled = 1; 413 414 return info; 415 } 416 417 static void virtio_net_reset(VirtIODevice *vdev) 418 { 419 VirtIONet *n = VIRTIO_NET(vdev); 420 421 /* Reset back to compatibility mode */ 422 n->promisc = 1; 423 n->allmulti = 0; 424 n->alluni = 0; 425 n->nomulti = 0; 426 n->nouni = 0; 427 n->nobcast = 0; 428 /* multiqueue is disabled by default */ 429 n->curr_queues = 1; 430 timer_del(n->announce_timer); 431 n->announce_counter = 0; 432 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 433 434 /* Flush any MAC and VLAN filter table state */ 435 n->mac_table.in_use = 0; 436 n->mac_table.first_multi = 0; 437 n->mac_table.multi_overflow = 0; 438 n->mac_table.uni_overflow = 0; 439 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 440 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); 441 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 442 memset(n->vlans, 0, MAX_VLAN >> 3); 443 } 444 445 static void peer_test_vnet_hdr(VirtIONet *n) 446 { 447 NetClientState *nc = qemu_get_queue(n->nic); 448 if (!nc->peer) { 449 return; 450 } 451 452 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer); 453 } 454 455 static int peer_has_vnet_hdr(VirtIONet *n) 456 { 457 return n->has_vnet_hdr; 458 } 459 460 static int peer_has_ufo(VirtIONet *n) 461 { 462 if (!peer_has_vnet_hdr(n)) 463 return 0; 464 465 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer); 466 467 return n->has_ufo; 468 } 469 470 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, 471 int version_1) 472 { 473 int i; 474 NetClientState *nc; 475 476 n->mergeable_rx_bufs = mergeable_rx_bufs; 477 478 if (version_1) { 479 n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 480 } else { 481 n->guest_hdr_len = n->mergeable_rx_bufs ? 482 sizeof(struct virtio_net_hdr_mrg_rxbuf) : 483 sizeof(struct virtio_net_hdr); 484 } 485 486 for (i = 0; i < n->max_queues; i++) { 487 nc = qemu_get_subqueue(n->nic, i); 488 489 if (peer_has_vnet_hdr(n) && 490 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { 491 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); 492 n->host_hdr_len = n->guest_hdr_len; 493 } 494 } 495 } 496 497 static int peer_attach(VirtIONet *n, int index) 498 { 499 NetClientState *nc = qemu_get_subqueue(n->nic, index); 500 501 if (!nc->peer) { 502 return 0; 503 } 504 505 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { 506 vhost_set_vring_enable(nc->peer, 1); 507 } 508 509 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { 510 return 0; 511 } 512 513 if (n->max_queues == 1) { 514 return 0; 515 } 516 517 return tap_enable(nc->peer); 518 } 519 520 static int peer_detach(VirtIONet *n, int index) 521 { 522 NetClientState *nc = qemu_get_subqueue(n->nic, index); 523 524 if (!nc->peer) { 525 return 0; 526 } 527 528 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { 529 vhost_set_vring_enable(nc->peer, 0); 530 } 531 532 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { 533 return 0; 534 } 535 536 return tap_disable(nc->peer); 537 } 538 539 static void virtio_net_set_queues(VirtIONet *n) 540 { 541 int i; 542 int r; 543 544 if (n->nic->peer_deleted) { 545 return; 546 } 547 548 for (i = 0; i < n->max_queues; i++) { 549 if (i < n->curr_queues) { 550 r = peer_attach(n, i); 551 assert(!r); 552 } else { 553 r = peer_detach(n, i); 554 assert(!r); 555 } 556 } 557 } 558 559 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); 560 561 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, 562 Error **errp) 563 { 564 VirtIONet *n = VIRTIO_NET(vdev); 565 NetClientState *nc = qemu_get_queue(n->nic); 566 567 /* Firstly sync all virtio-net possible supported features */ 568 features |= n->host_features; 569 570 virtio_add_feature(&features, VIRTIO_NET_F_MAC); 571 572 if (!peer_has_vnet_hdr(n)) { 573 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM); 574 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4); 575 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6); 576 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN); 577 578 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM); 579 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4); 580 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); 581 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); 582 } 583 584 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { 585 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO); 586 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); 587 } 588 589 if (!get_vhost_net(nc->peer)) { 590 return features; 591 } 592 features = vhost_net_get_features(get_vhost_net(nc->peer), features); 593 vdev->backend_features = features; 594 595 if (n->mtu_bypass_backend && 596 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) { 597 features |= (1ULL << VIRTIO_NET_F_MTU); 598 } 599 600 return features; 601 } 602 603 static uint64_t virtio_net_bad_features(VirtIODevice *vdev) 604 { 605 uint64_t features = 0; 606 607 /* Linux kernel 2.6.25. It understood MAC (as everyone must), 608 * but also these: */ 609 virtio_add_feature(&features, VIRTIO_NET_F_MAC); 610 virtio_add_feature(&features, VIRTIO_NET_F_CSUM); 611 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4); 612 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6); 613 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN); 614 615 return features; 616 } 617 618 static void virtio_net_apply_guest_offloads(VirtIONet *n) 619 { 620 qemu_set_offload(qemu_get_queue(n->nic)->peer, 621 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), 622 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), 623 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), 624 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), 625 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); 626 } 627 628 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) 629 { 630 static const uint64_t guest_offloads_mask = 631 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | 632 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 633 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | 634 (1ULL << VIRTIO_NET_F_GUEST_ECN) | 635 (1ULL << VIRTIO_NET_F_GUEST_UFO); 636 637 return guest_offloads_mask & features; 638 } 639 640 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) 641 { 642 VirtIODevice *vdev = VIRTIO_DEVICE(n); 643 return virtio_net_guest_offloads_by_features(vdev->guest_features); 644 } 645 646 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) 647 { 648 VirtIONet *n = VIRTIO_NET(vdev); 649 int i; 650 651 if (n->mtu_bypass_backend && 652 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) { 653 features &= ~(1ULL << VIRTIO_NET_F_MTU); 654 } 655 656 virtio_net_set_multiqueue(n, 657 virtio_has_feature(features, VIRTIO_NET_F_MQ)); 658 659 virtio_net_set_mrg_rx_bufs(n, 660 virtio_has_feature(features, 661 VIRTIO_NET_F_MRG_RXBUF), 662 virtio_has_feature(features, 663 VIRTIO_F_VERSION_1)); 664 665 if (n->has_vnet_hdr) { 666 n->curr_guest_offloads = 667 virtio_net_guest_offloads_by_features(features); 668 virtio_net_apply_guest_offloads(n); 669 } 670 671 for (i = 0; i < n->max_queues; i++) { 672 NetClientState *nc = qemu_get_subqueue(n->nic, i); 673 674 if (!get_vhost_net(nc->peer)) { 675 continue; 676 } 677 vhost_net_ack_features(get_vhost_net(nc->peer), features); 678 } 679 680 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) { 681 memset(n->vlans, 0, MAX_VLAN >> 3); 682 } else { 683 memset(n->vlans, 0xff, MAX_VLAN >> 3); 684 } 685 } 686 687 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, 688 struct iovec *iov, unsigned int iov_cnt) 689 { 690 uint8_t on; 691 size_t s; 692 NetClientState *nc = qemu_get_queue(n->nic); 693 694 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); 695 if (s != sizeof(on)) { 696 return VIRTIO_NET_ERR; 697 } 698 699 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { 700 n->promisc = on; 701 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { 702 n->allmulti = on; 703 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { 704 n->alluni = on; 705 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { 706 n->nomulti = on; 707 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { 708 n->nouni = on; 709 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { 710 n->nobcast = on; 711 } else { 712 return VIRTIO_NET_ERR; 713 } 714 715 rxfilter_notify(nc); 716 717 return VIRTIO_NET_OK; 718 } 719 720 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, 721 struct iovec *iov, unsigned int iov_cnt) 722 { 723 VirtIODevice *vdev = VIRTIO_DEVICE(n); 724 uint64_t offloads; 725 size_t s; 726 727 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 728 return VIRTIO_NET_ERR; 729 } 730 731 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); 732 if (s != sizeof(offloads)) { 733 return VIRTIO_NET_ERR; 734 } 735 736 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { 737 uint64_t supported_offloads; 738 739 if (!n->has_vnet_hdr) { 740 return VIRTIO_NET_ERR; 741 } 742 743 supported_offloads = virtio_net_supported_guest_offloads(n); 744 if (offloads & ~supported_offloads) { 745 return VIRTIO_NET_ERR; 746 } 747 748 n->curr_guest_offloads = offloads; 749 virtio_net_apply_guest_offloads(n); 750 751 return VIRTIO_NET_OK; 752 } else { 753 return VIRTIO_NET_ERR; 754 } 755 } 756 757 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, 758 struct iovec *iov, unsigned int iov_cnt) 759 { 760 VirtIODevice *vdev = VIRTIO_DEVICE(n); 761 struct virtio_net_ctrl_mac mac_data; 762 size_t s; 763 NetClientState *nc = qemu_get_queue(n->nic); 764 765 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { 766 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { 767 return VIRTIO_NET_ERR; 768 } 769 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); 770 assert(s == sizeof(n->mac)); 771 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 772 rxfilter_notify(nc); 773 774 return VIRTIO_NET_OK; 775 } 776 777 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { 778 return VIRTIO_NET_ERR; 779 } 780 781 int in_use = 0; 782 int first_multi = 0; 783 uint8_t uni_overflow = 0; 784 uint8_t multi_overflow = 0; 785 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 786 787 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 788 sizeof(mac_data.entries)); 789 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 790 if (s != sizeof(mac_data.entries)) { 791 goto error; 792 } 793 iov_discard_front(&iov, &iov_cnt, s); 794 795 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { 796 goto error; 797 } 798 799 if (mac_data.entries <= MAC_TABLE_ENTRIES) { 800 s = iov_to_buf(iov, iov_cnt, 0, macs, 801 mac_data.entries * ETH_ALEN); 802 if (s != mac_data.entries * ETH_ALEN) { 803 goto error; 804 } 805 in_use += mac_data.entries; 806 } else { 807 uni_overflow = 1; 808 } 809 810 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); 811 812 first_multi = in_use; 813 814 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 815 sizeof(mac_data.entries)); 816 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 817 if (s != sizeof(mac_data.entries)) { 818 goto error; 819 } 820 821 iov_discard_front(&iov, &iov_cnt, s); 822 823 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { 824 goto error; 825 } 826 827 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) { 828 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN], 829 mac_data.entries * ETH_ALEN); 830 if (s != mac_data.entries * ETH_ALEN) { 831 goto error; 832 } 833 in_use += mac_data.entries; 834 } else { 835 multi_overflow = 1; 836 } 837 838 n->mac_table.in_use = in_use; 839 n->mac_table.first_multi = first_multi; 840 n->mac_table.uni_overflow = uni_overflow; 841 n->mac_table.multi_overflow = multi_overflow; 842 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN); 843 g_free(macs); 844 rxfilter_notify(nc); 845 846 return VIRTIO_NET_OK; 847 848 error: 849 g_free(macs); 850 return VIRTIO_NET_ERR; 851 } 852 853 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, 854 struct iovec *iov, unsigned int iov_cnt) 855 { 856 VirtIODevice *vdev = VIRTIO_DEVICE(n); 857 uint16_t vid; 858 size_t s; 859 NetClientState *nc = qemu_get_queue(n->nic); 860 861 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); 862 vid = virtio_lduw_p(vdev, &vid); 863 if (s != sizeof(vid)) { 864 return VIRTIO_NET_ERR; 865 } 866 867 if (vid >= MAX_VLAN) 868 return VIRTIO_NET_ERR; 869 870 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) 871 n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); 872 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) 873 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); 874 else 875 return VIRTIO_NET_ERR; 876 877 rxfilter_notify(nc); 878 879 return VIRTIO_NET_OK; 880 } 881 882 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd, 883 struct iovec *iov, unsigned int iov_cnt) 884 { 885 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK && 886 n->status & VIRTIO_NET_S_ANNOUNCE) { 887 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 888 if (n->announce_counter) { 889 timer_mod(n->announce_timer, 890 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 891 self_announce_delay(n->announce_counter)); 892 } 893 return VIRTIO_NET_OK; 894 } else { 895 return VIRTIO_NET_ERR; 896 } 897 } 898 899 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, 900 struct iovec *iov, unsigned int iov_cnt) 901 { 902 VirtIODevice *vdev = VIRTIO_DEVICE(n); 903 struct virtio_net_ctrl_mq mq; 904 size_t s; 905 uint16_t queues; 906 907 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); 908 if (s != sizeof(mq)) { 909 return VIRTIO_NET_ERR; 910 } 911 912 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 913 return VIRTIO_NET_ERR; 914 } 915 916 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs); 917 918 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 919 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 920 queues > n->max_queues || 921 !n->multiqueue) { 922 return VIRTIO_NET_ERR; 923 } 924 925 n->curr_queues = queues; 926 /* stop the backend before changing the number of queues to avoid handling a 927 * disabled queue */ 928 virtio_net_set_status(vdev, vdev->status); 929 virtio_net_set_queues(n); 930 931 return VIRTIO_NET_OK; 932 } 933 934 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 935 { 936 VirtIONet *n = VIRTIO_NET(vdev); 937 struct virtio_net_ctrl_hdr ctrl; 938 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 939 VirtQueueElement *elem; 940 size_t s; 941 struct iovec *iov, *iov2; 942 unsigned int iov_cnt; 943 944 for (;;) { 945 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 946 if (!elem) { 947 break; 948 } 949 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || 950 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { 951 virtio_error(vdev, "virtio-net ctrl missing headers"); 952 virtqueue_detach_element(vq, elem, 0); 953 g_free(elem); 954 break; 955 } 956 957 iov_cnt = elem->out_num; 958 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num); 959 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); 960 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); 961 if (s != sizeof(ctrl)) { 962 status = VIRTIO_NET_ERR; 963 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { 964 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); 965 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { 966 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); 967 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { 968 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); 969 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { 970 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); 971 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { 972 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); 973 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { 974 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); 975 } 976 977 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); 978 assert(s == sizeof(status)); 979 980 virtqueue_push(vq, elem, sizeof(status)); 981 virtio_notify(vdev, vq); 982 g_free(iov2); 983 g_free(elem); 984 } 985 } 986 987 /* RX */ 988 989 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) 990 { 991 VirtIONet *n = VIRTIO_NET(vdev); 992 int queue_index = vq2q(virtio_get_queue_index(vq)); 993 994 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); 995 } 996 997 static int virtio_net_can_receive(NetClientState *nc) 998 { 999 VirtIONet *n = qemu_get_nic_opaque(nc); 1000 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1001 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1002 1003 if (!vdev->vm_running) { 1004 return 0; 1005 } 1006 1007 if (nc->queue_index >= n->curr_queues) { 1008 return 0; 1009 } 1010 1011 if (!virtio_queue_ready(q->rx_vq) || 1012 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1013 return 0; 1014 } 1015 1016 return 1; 1017 } 1018 1019 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) 1020 { 1021 VirtIONet *n = q->n; 1022 if (virtio_queue_empty(q->rx_vq) || 1023 (n->mergeable_rx_bufs && 1024 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 1025 virtio_queue_set_notification(q->rx_vq, 1); 1026 1027 /* To avoid a race condition where the guest has made some buffers 1028 * available after the above check but before notification was 1029 * enabled, check for available buffers again. 1030 */ 1031 if (virtio_queue_empty(q->rx_vq) || 1032 (n->mergeable_rx_bufs && 1033 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 1034 return 0; 1035 } 1036 } 1037 1038 virtio_queue_set_notification(q->rx_vq, 0); 1039 return 1; 1040 } 1041 1042 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr) 1043 { 1044 virtio_tswap16s(vdev, &hdr->hdr_len); 1045 virtio_tswap16s(vdev, &hdr->gso_size); 1046 virtio_tswap16s(vdev, &hdr->csum_start); 1047 virtio_tswap16s(vdev, &hdr->csum_offset); 1048 } 1049 1050 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so 1051 * it never finds out that the packets don't have valid checksums. This 1052 * causes dhclient to get upset. Fedora's carried a patch for ages to 1053 * fix this with Xen but it hasn't appeared in an upstream release of 1054 * dhclient yet. 1055 * 1056 * To avoid breaking existing guests, we catch udp packets and add 1057 * checksums. This is terrible but it's better than hacking the guest 1058 * kernels. 1059 * 1060 * N.B. if we introduce a zero-copy API, this operation is no longer free so 1061 * we should provide a mechanism to disable it to avoid polluting the host 1062 * cache. 1063 */ 1064 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, 1065 uint8_t *buf, size_t size) 1066 { 1067 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ 1068 (size > 27 && size < 1500) && /* normal sized MTU */ 1069 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ 1070 (buf[23] == 17) && /* ip.protocol == UDP */ 1071 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ 1072 net_checksum_calculate(buf, size); 1073 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; 1074 } 1075 } 1076 1077 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, 1078 const void *buf, size_t size) 1079 { 1080 if (n->has_vnet_hdr) { 1081 /* FIXME this cast is evil */ 1082 void *wbuf = (void *)buf; 1083 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, 1084 size - n->host_hdr_len); 1085 1086 if (n->needs_vnet_hdr_swap) { 1087 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf); 1088 } 1089 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); 1090 } else { 1091 struct virtio_net_hdr hdr = { 1092 .flags = 0, 1093 .gso_type = VIRTIO_NET_HDR_GSO_NONE 1094 }; 1095 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); 1096 } 1097 } 1098 1099 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) 1100 { 1101 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1102 static const uint8_t vlan[] = {0x81, 0x00}; 1103 uint8_t *ptr = (uint8_t *)buf; 1104 int i; 1105 1106 if (n->promisc) 1107 return 1; 1108 1109 ptr += n->host_hdr_len; 1110 1111 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { 1112 int vid = lduw_be_p(ptr + 14) & 0xfff; 1113 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) 1114 return 0; 1115 } 1116 1117 if (ptr[0] & 1) { // multicast 1118 if (!memcmp(ptr, bcast, sizeof(bcast))) { 1119 return !n->nobcast; 1120 } else if (n->nomulti) { 1121 return 0; 1122 } else if (n->allmulti || n->mac_table.multi_overflow) { 1123 return 1; 1124 } 1125 1126 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 1127 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 1128 return 1; 1129 } 1130 } 1131 } else { // unicast 1132 if (n->nouni) { 1133 return 0; 1134 } else if (n->alluni || n->mac_table.uni_overflow) { 1135 return 1; 1136 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { 1137 return 1; 1138 } 1139 1140 for (i = 0; i < n->mac_table.first_multi; i++) { 1141 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 1142 return 1; 1143 } 1144 } 1145 } 1146 1147 return 0; 1148 } 1149 1150 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, 1151 size_t size) 1152 { 1153 VirtIONet *n = qemu_get_nic_opaque(nc); 1154 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1155 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1156 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; 1157 struct virtio_net_hdr_mrg_rxbuf mhdr; 1158 unsigned mhdr_cnt = 0; 1159 size_t offset, i, guest_offset; 1160 1161 if (!virtio_net_can_receive(nc)) { 1162 return -1; 1163 } 1164 1165 /* hdr_len refers to the header we supply to the guest */ 1166 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { 1167 return 0; 1168 } 1169 1170 if (!receive_filter(n, buf, size)) 1171 return size; 1172 1173 offset = i = 0; 1174 1175 while (offset < size) { 1176 VirtQueueElement *elem; 1177 int len, total; 1178 const struct iovec *sg; 1179 1180 total = 0; 1181 1182 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); 1183 if (!elem) { 1184 if (i) { 1185 virtio_error(vdev, "virtio-net unexpected empty queue: " 1186 "i %zd mergeable %d offset %zd, size %zd, " 1187 "guest hdr len %zd, host hdr len %zd " 1188 "guest features 0x%" PRIx64, 1189 i, n->mergeable_rx_bufs, offset, size, 1190 n->guest_hdr_len, n->host_hdr_len, 1191 vdev->guest_features); 1192 } 1193 return -1; 1194 } 1195 1196 if (elem->in_num < 1) { 1197 virtio_error(vdev, 1198 "virtio-net receive queue contains no in buffers"); 1199 virtqueue_detach_element(q->rx_vq, elem, 0); 1200 g_free(elem); 1201 return -1; 1202 } 1203 1204 sg = elem->in_sg; 1205 if (i == 0) { 1206 assert(offset == 0); 1207 if (n->mergeable_rx_bufs) { 1208 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), 1209 sg, elem->in_num, 1210 offsetof(typeof(mhdr), num_buffers), 1211 sizeof(mhdr.num_buffers)); 1212 } 1213 1214 receive_header(n, sg, elem->in_num, buf, size); 1215 offset = n->host_hdr_len; 1216 total += n->guest_hdr_len; 1217 guest_offset = n->guest_hdr_len; 1218 } else { 1219 guest_offset = 0; 1220 } 1221 1222 /* copy in packet. ugh */ 1223 len = iov_from_buf(sg, elem->in_num, guest_offset, 1224 buf + offset, size - offset); 1225 total += len; 1226 offset += len; 1227 /* If buffers can't be merged, at this point we 1228 * must have consumed the complete packet. 1229 * Otherwise, drop it. */ 1230 if (!n->mergeable_rx_bufs && offset < size) { 1231 virtqueue_unpop(q->rx_vq, elem, total); 1232 g_free(elem); 1233 return size; 1234 } 1235 1236 /* signal other side */ 1237 virtqueue_fill(q->rx_vq, elem, total, i++); 1238 g_free(elem); 1239 } 1240 1241 if (mhdr_cnt) { 1242 virtio_stw_p(vdev, &mhdr.num_buffers, i); 1243 iov_from_buf(mhdr_sg, mhdr_cnt, 1244 0, 1245 &mhdr.num_buffers, sizeof mhdr.num_buffers); 1246 } 1247 1248 virtqueue_flush(q->rx_vq, i); 1249 virtio_notify(vdev, q->rx_vq); 1250 1251 return size; 1252 } 1253 1254 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, 1255 size_t size) 1256 { 1257 ssize_t r; 1258 1259 rcu_read_lock(); 1260 r = virtio_net_receive_rcu(nc, buf, size); 1261 rcu_read_unlock(); 1262 return r; 1263 } 1264 1265 static int32_t virtio_net_flush_tx(VirtIONetQueue *q); 1266 1267 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) 1268 { 1269 VirtIONet *n = qemu_get_nic_opaque(nc); 1270 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1271 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1272 1273 virtqueue_push(q->tx_vq, q->async_tx.elem, 0); 1274 virtio_notify(vdev, q->tx_vq); 1275 1276 g_free(q->async_tx.elem); 1277 q->async_tx.elem = NULL; 1278 1279 virtio_queue_set_notification(q->tx_vq, 1); 1280 virtio_net_flush_tx(q); 1281 } 1282 1283 /* TX */ 1284 static int32_t virtio_net_flush_tx(VirtIONetQueue *q) 1285 { 1286 VirtIONet *n = q->n; 1287 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1288 VirtQueueElement *elem; 1289 int32_t num_packets = 0; 1290 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); 1291 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1292 return num_packets; 1293 } 1294 1295 if (q->async_tx.elem) { 1296 virtio_queue_set_notification(q->tx_vq, 0); 1297 return num_packets; 1298 } 1299 1300 for (;;) { 1301 ssize_t ret; 1302 unsigned int out_num; 1303 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg; 1304 struct virtio_net_hdr_mrg_rxbuf mhdr; 1305 1306 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement)); 1307 if (!elem) { 1308 break; 1309 } 1310 1311 out_num = elem->out_num; 1312 out_sg = elem->out_sg; 1313 if (out_num < 1) { 1314 virtio_error(vdev, "virtio-net header not in first element"); 1315 virtqueue_detach_element(q->tx_vq, elem, 0); 1316 g_free(elem); 1317 return -EINVAL; 1318 } 1319 1320 if (n->has_vnet_hdr) { 1321 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < 1322 n->guest_hdr_len) { 1323 virtio_error(vdev, "virtio-net header incorrect"); 1324 virtqueue_detach_element(q->tx_vq, elem, 0); 1325 g_free(elem); 1326 return -EINVAL; 1327 } 1328 if (n->needs_vnet_hdr_swap) { 1329 virtio_net_hdr_swap(vdev, (void *) &mhdr); 1330 sg2[0].iov_base = &mhdr; 1331 sg2[0].iov_len = n->guest_hdr_len; 1332 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, 1333 out_sg, out_num, 1334 n->guest_hdr_len, -1); 1335 if (out_num == VIRTQUEUE_MAX_SIZE) { 1336 goto drop; 1337 } 1338 out_num += 1; 1339 out_sg = sg2; 1340 } 1341 } 1342 /* 1343 * If host wants to see the guest header as is, we can 1344 * pass it on unchanged. Otherwise, copy just the parts 1345 * that host is interested in. 1346 */ 1347 assert(n->host_hdr_len <= n->guest_hdr_len); 1348 if (n->host_hdr_len != n->guest_hdr_len) { 1349 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), 1350 out_sg, out_num, 1351 0, n->host_hdr_len); 1352 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, 1353 out_sg, out_num, 1354 n->guest_hdr_len, -1); 1355 out_num = sg_num; 1356 out_sg = sg; 1357 } 1358 1359 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), 1360 out_sg, out_num, virtio_net_tx_complete); 1361 if (ret == 0) { 1362 virtio_queue_set_notification(q->tx_vq, 0); 1363 q->async_tx.elem = elem; 1364 return -EBUSY; 1365 } 1366 1367 drop: 1368 virtqueue_push(q->tx_vq, elem, 0); 1369 virtio_notify(vdev, q->tx_vq); 1370 g_free(elem); 1371 1372 if (++num_packets >= n->tx_burst) { 1373 break; 1374 } 1375 } 1376 return num_packets; 1377 } 1378 1379 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) 1380 { 1381 VirtIONet *n = VIRTIO_NET(vdev); 1382 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1383 1384 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { 1385 virtio_net_drop_tx_queue_data(vdev, vq); 1386 return; 1387 } 1388 1389 /* This happens when device was stopped but VCPU wasn't. */ 1390 if (!vdev->vm_running) { 1391 q->tx_waiting = 1; 1392 return; 1393 } 1394 1395 if (q->tx_waiting) { 1396 virtio_queue_set_notification(vq, 1); 1397 timer_del(q->tx_timer); 1398 q->tx_waiting = 0; 1399 if (virtio_net_flush_tx(q) == -EINVAL) { 1400 return; 1401 } 1402 } else { 1403 timer_mod(q->tx_timer, 1404 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 1405 q->tx_waiting = 1; 1406 virtio_queue_set_notification(vq, 0); 1407 } 1408 } 1409 1410 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) 1411 { 1412 VirtIONet *n = VIRTIO_NET(vdev); 1413 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1414 1415 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { 1416 virtio_net_drop_tx_queue_data(vdev, vq); 1417 return; 1418 } 1419 1420 if (unlikely(q->tx_waiting)) { 1421 return; 1422 } 1423 q->tx_waiting = 1; 1424 /* This happens when device was stopped but VCPU wasn't. */ 1425 if (!vdev->vm_running) { 1426 return; 1427 } 1428 virtio_queue_set_notification(vq, 0); 1429 qemu_bh_schedule(q->tx_bh); 1430 } 1431 1432 static void virtio_net_tx_timer(void *opaque) 1433 { 1434 VirtIONetQueue *q = opaque; 1435 VirtIONet *n = q->n; 1436 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1437 /* This happens when device was stopped but BH wasn't. */ 1438 if (!vdev->vm_running) { 1439 /* Make sure tx waiting is set, so we'll run when restarted. */ 1440 assert(q->tx_waiting); 1441 return; 1442 } 1443 1444 q->tx_waiting = 0; 1445 1446 /* Just in case the driver is not ready on more */ 1447 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1448 return; 1449 } 1450 1451 virtio_queue_set_notification(q->tx_vq, 1); 1452 virtio_net_flush_tx(q); 1453 } 1454 1455 static void virtio_net_tx_bh(void *opaque) 1456 { 1457 VirtIONetQueue *q = opaque; 1458 VirtIONet *n = q->n; 1459 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1460 int32_t ret; 1461 1462 /* This happens when device was stopped but BH wasn't. */ 1463 if (!vdev->vm_running) { 1464 /* Make sure tx waiting is set, so we'll run when restarted. */ 1465 assert(q->tx_waiting); 1466 return; 1467 } 1468 1469 q->tx_waiting = 0; 1470 1471 /* Just in case the driver is not ready on more */ 1472 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { 1473 return; 1474 } 1475 1476 ret = virtio_net_flush_tx(q); 1477 if (ret == -EBUSY || ret == -EINVAL) { 1478 return; /* Notification re-enable handled by tx_complete or device 1479 * broken */ 1480 } 1481 1482 /* If we flush a full burst of packets, assume there are 1483 * more coming and immediately reschedule */ 1484 if (ret >= n->tx_burst) { 1485 qemu_bh_schedule(q->tx_bh); 1486 q->tx_waiting = 1; 1487 return; 1488 } 1489 1490 /* If less than a full burst, re-enable notification and flush 1491 * anything that may have come in while we weren't looking. If 1492 * we find something, assume the guest is still active and reschedule */ 1493 virtio_queue_set_notification(q->tx_vq, 1); 1494 ret = virtio_net_flush_tx(q); 1495 if (ret == -EINVAL) { 1496 return; 1497 } else if (ret > 0) { 1498 virtio_queue_set_notification(q->tx_vq, 0); 1499 qemu_bh_schedule(q->tx_bh); 1500 q->tx_waiting = 1; 1501 } 1502 } 1503 1504 static void virtio_net_add_queue(VirtIONet *n, int index) 1505 { 1506 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1507 1508 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size, 1509 virtio_net_handle_rx); 1510 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { 1511 n->vqs[index].tx_vq = 1512 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); 1513 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1514 virtio_net_tx_timer, 1515 &n->vqs[index]); 1516 } else { 1517 n->vqs[index].tx_vq = 1518 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); 1519 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); 1520 } 1521 1522 n->vqs[index].tx_waiting = 0; 1523 n->vqs[index].n = n; 1524 } 1525 1526 static void virtio_net_del_queue(VirtIONet *n, int index) 1527 { 1528 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1529 VirtIONetQueue *q = &n->vqs[index]; 1530 NetClientState *nc = qemu_get_subqueue(n->nic, index); 1531 1532 qemu_purge_queued_packets(nc); 1533 1534 virtio_del_queue(vdev, index * 2); 1535 if (q->tx_timer) { 1536 timer_del(q->tx_timer); 1537 timer_free(q->tx_timer); 1538 q->tx_timer = NULL; 1539 } else { 1540 qemu_bh_delete(q->tx_bh); 1541 q->tx_bh = NULL; 1542 } 1543 q->tx_waiting = 0; 1544 virtio_del_queue(vdev, index * 2 + 1); 1545 } 1546 1547 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) 1548 { 1549 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1550 int old_num_queues = virtio_get_num_queues(vdev); 1551 int new_num_queues = new_max_queues * 2 + 1; 1552 int i; 1553 1554 assert(old_num_queues >= 3); 1555 assert(old_num_queues % 2 == 1); 1556 1557 if (old_num_queues == new_num_queues) { 1558 return; 1559 } 1560 1561 /* 1562 * We always need to remove and add ctrl vq if 1563 * old_num_queues != new_num_queues. Remove ctrl_vq first, 1564 * and then we only enter one of the following too loops. 1565 */ 1566 virtio_del_queue(vdev, old_num_queues - 1); 1567 1568 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) { 1569 /* new_num_queues < old_num_queues */ 1570 virtio_net_del_queue(n, i / 2); 1571 } 1572 1573 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) { 1574 /* new_num_queues > old_num_queues */ 1575 virtio_net_add_queue(n, i / 2); 1576 } 1577 1578 /* add ctrl_vq last */ 1579 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1580 } 1581 1582 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) 1583 { 1584 int max = multiqueue ? n->max_queues : 1; 1585 1586 n->multiqueue = multiqueue; 1587 virtio_net_change_num_queues(n, max); 1588 1589 virtio_net_set_queues(n); 1590 } 1591 1592 static int virtio_net_post_load_device(void *opaque, int version_id) 1593 { 1594 VirtIONet *n = opaque; 1595 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1596 int i, link_down; 1597 1598 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs, 1599 virtio_vdev_has_feature(vdev, 1600 VIRTIO_F_VERSION_1)); 1601 1602 /* MAC_TABLE_ENTRIES may be different from the saved image */ 1603 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) { 1604 n->mac_table.in_use = 0; 1605 } 1606 1607 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 1608 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); 1609 } 1610 1611 if (peer_has_vnet_hdr(n)) { 1612 virtio_net_apply_guest_offloads(n); 1613 } 1614 1615 virtio_net_set_queues(n); 1616 1617 /* Find the first multicast entry in the saved MAC filter */ 1618 for (i = 0; i < n->mac_table.in_use; i++) { 1619 if (n->mac_table.macs[i * ETH_ALEN] & 1) { 1620 break; 1621 } 1622 } 1623 n->mac_table.first_multi = i; 1624 1625 /* nc.link_down can't be migrated, so infer link_down according 1626 * to link status bit in n->status */ 1627 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; 1628 for (i = 0; i < n->max_queues; i++) { 1629 qemu_get_subqueue(n->nic, i)->link_down = link_down; 1630 } 1631 1632 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && 1633 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { 1634 n->announce_counter = SELF_ANNOUNCE_ROUNDS; 1635 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL)); 1636 } 1637 1638 return 0; 1639 } 1640 1641 /* tx_waiting field of a VirtIONetQueue */ 1642 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { 1643 .name = "virtio-net-queue-tx_waiting", 1644 .fields = (VMStateField[]) { 1645 VMSTATE_UINT32(tx_waiting, VirtIONetQueue), 1646 VMSTATE_END_OF_LIST() 1647 }, 1648 }; 1649 1650 static bool max_queues_gt_1(void *opaque, int version_id) 1651 { 1652 return VIRTIO_NET(opaque)->max_queues > 1; 1653 } 1654 1655 static bool has_ctrl_guest_offloads(void *opaque, int version_id) 1656 { 1657 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque), 1658 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); 1659 } 1660 1661 static bool mac_table_fits(void *opaque, int version_id) 1662 { 1663 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES; 1664 } 1665 1666 static bool mac_table_doesnt_fit(void *opaque, int version_id) 1667 { 1668 return !mac_table_fits(opaque, version_id); 1669 } 1670 1671 /* This temporary type is shared by all the WITH_TMP methods 1672 * although only some fields are used by each. 1673 */ 1674 struct VirtIONetMigTmp { 1675 VirtIONet *parent; 1676 VirtIONetQueue *vqs_1; 1677 uint16_t curr_queues_1; 1678 uint8_t has_ufo; 1679 uint32_t has_vnet_hdr; 1680 }; 1681 1682 /* The 2nd and subsequent tx_waiting flags are loaded later than 1683 * the 1st entry in the queues and only if there's more than one 1684 * entry. We use the tmp mechanism to calculate a temporary 1685 * pointer and count and also validate the count. 1686 */ 1687 1688 static void virtio_net_tx_waiting_pre_save(void *opaque) 1689 { 1690 struct VirtIONetMigTmp *tmp = opaque; 1691 1692 tmp->vqs_1 = tmp->parent->vqs + 1; 1693 tmp->curr_queues_1 = tmp->parent->curr_queues - 1; 1694 if (tmp->parent->curr_queues == 0) { 1695 tmp->curr_queues_1 = 0; 1696 } 1697 } 1698 1699 static int virtio_net_tx_waiting_pre_load(void *opaque) 1700 { 1701 struct VirtIONetMigTmp *tmp = opaque; 1702 1703 /* Reuse the pointer setup from save */ 1704 virtio_net_tx_waiting_pre_save(opaque); 1705 1706 if (tmp->parent->curr_queues > tmp->parent->max_queues) { 1707 error_report("virtio-net: curr_queues %x > max_queues %x", 1708 tmp->parent->curr_queues, tmp->parent->max_queues); 1709 1710 return -EINVAL; 1711 } 1712 1713 return 0; /* all good */ 1714 } 1715 1716 static const VMStateDescription vmstate_virtio_net_tx_waiting = { 1717 .name = "virtio-net-tx_waiting", 1718 .pre_load = virtio_net_tx_waiting_pre_load, 1719 .pre_save = virtio_net_tx_waiting_pre_save, 1720 .fields = (VMStateField[]) { 1721 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, 1722 curr_queues_1, 1723 vmstate_virtio_net_queue_tx_waiting, 1724 struct VirtIONetQueue), 1725 VMSTATE_END_OF_LIST() 1726 }, 1727 }; 1728 1729 /* the 'has_ufo' flag is just tested; if the incoming stream has the 1730 * flag set we need to check that we have it 1731 */ 1732 static int virtio_net_ufo_post_load(void *opaque, int version_id) 1733 { 1734 struct VirtIONetMigTmp *tmp = opaque; 1735 1736 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) { 1737 error_report("virtio-net: saved image requires TUN_F_UFO support"); 1738 return -EINVAL; 1739 } 1740 1741 return 0; 1742 } 1743 1744 static void virtio_net_ufo_pre_save(void *opaque) 1745 { 1746 struct VirtIONetMigTmp *tmp = opaque; 1747 1748 tmp->has_ufo = tmp->parent->has_ufo; 1749 } 1750 1751 static const VMStateDescription vmstate_virtio_net_has_ufo = { 1752 .name = "virtio-net-ufo", 1753 .post_load = virtio_net_ufo_post_load, 1754 .pre_save = virtio_net_ufo_pre_save, 1755 .fields = (VMStateField[]) { 1756 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp), 1757 VMSTATE_END_OF_LIST() 1758 }, 1759 }; 1760 1761 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the 1762 * flag set we need to check that we have it 1763 */ 1764 static int virtio_net_vnet_post_load(void *opaque, int version_id) 1765 { 1766 struct VirtIONetMigTmp *tmp = opaque; 1767 1768 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) { 1769 error_report("virtio-net: saved image requires vnet_hdr=on"); 1770 return -EINVAL; 1771 } 1772 1773 return 0; 1774 } 1775 1776 static void virtio_net_vnet_pre_save(void *opaque) 1777 { 1778 struct VirtIONetMigTmp *tmp = opaque; 1779 1780 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr; 1781 } 1782 1783 static const VMStateDescription vmstate_virtio_net_has_vnet = { 1784 .name = "virtio-net-vnet", 1785 .post_load = virtio_net_vnet_post_load, 1786 .pre_save = virtio_net_vnet_pre_save, 1787 .fields = (VMStateField[]) { 1788 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp), 1789 VMSTATE_END_OF_LIST() 1790 }, 1791 }; 1792 1793 static const VMStateDescription vmstate_virtio_net_device = { 1794 .name = "virtio-net-device", 1795 .version_id = VIRTIO_NET_VM_VERSION, 1796 .minimum_version_id = VIRTIO_NET_VM_VERSION, 1797 .post_load = virtio_net_post_load_device, 1798 .fields = (VMStateField[]) { 1799 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN), 1800 VMSTATE_STRUCT_POINTER(vqs, VirtIONet, 1801 vmstate_virtio_net_queue_tx_waiting, 1802 VirtIONetQueue), 1803 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet), 1804 VMSTATE_UINT16(status, VirtIONet), 1805 VMSTATE_UINT8(promisc, VirtIONet), 1806 VMSTATE_UINT8(allmulti, VirtIONet), 1807 VMSTATE_UINT32(mac_table.in_use, VirtIONet), 1808 1809 /* Guarded pair: If it fits we load it, else we throw it away 1810 * - can happen if source has a larger MAC table.; post-load 1811 * sets flags in this case. 1812 */ 1813 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet, 1814 0, mac_table_fits, mac_table.in_use, 1815 ETH_ALEN), 1816 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0, 1817 mac_table.in_use, ETH_ALEN), 1818 1819 /* Note: This is an array of uint32's that's always been saved as a 1820 * buffer; hold onto your endiannesses; it's actually used as a bitmap 1821 * but based on the uint. 1822 */ 1823 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3), 1824 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, 1825 vmstate_virtio_net_has_vnet), 1826 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet), 1827 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet), 1828 VMSTATE_UINT8(alluni, VirtIONet), 1829 VMSTATE_UINT8(nomulti, VirtIONet), 1830 VMSTATE_UINT8(nouni, VirtIONet), 1831 VMSTATE_UINT8(nobcast, VirtIONet), 1832 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, 1833 vmstate_virtio_net_has_ufo), 1834 VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0, 1835 vmstate_info_uint16_equal, uint16_t), 1836 VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1), 1837 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, 1838 vmstate_virtio_net_tx_waiting), 1839 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, 1840 has_ctrl_guest_offloads), 1841 VMSTATE_END_OF_LIST() 1842 }, 1843 }; 1844 1845 static NetClientInfo net_virtio_info = { 1846 .type = NET_CLIENT_DRIVER_NIC, 1847 .size = sizeof(NICState), 1848 .can_receive = virtio_net_can_receive, 1849 .receive = virtio_net_receive, 1850 .link_status_changed = virtio_net_set_link_status, 1851 .query_rx_filter = virtio_net_query_rxfilter, 1852 }; 1853 1854 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) 1855 { 1856 VirtIONet *n = VIRTIO_NET(vdev); 1857 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1858 assert(n->vhost_started); 1859 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); 1860 } 1861 1862 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, 1863 bool mask) 1864 { 1865 VirtIONet *n = VIRTIO_NET(vdev); 1866 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1867 assert(n->vhost_started); 1868 vhost_net_virtqueue_mask(get_vhost_net(nc->peer), 1869 vdev, idx, mask); 1870 } 1871 1872 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) 1873 { 1874 int i, config_size = 0; 1875 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); 1876 1877 for (i = 0; feature_sizes[i].flags != 0; i++) { 1878 if (host_features & feature_sizes[i].flags) { 1879 config_size = MAX(feature_sizes[i].end, config_size); 1880 } 1881 } 1882 n->config_size = config_size; 1883 } 1884 1885 void virtio_net_set_netclient_name(VirtIONet *n, const char *name, 1886 const char *type) 1887 { 1888 /* 1889 * The name can be NULL, the netclient name will be type.x. 1890 */ 1891 assert(type != NULL); 1892 1893 g_free(n->netclient_name); 1894 g_free(n->netclient_type); 1895 n->netclient_name = g_strdup(name); 1896 n->netclient_type = g_strdup(type); 1897 } 1898 1899 static void virtio_net_device_realize(DeviceState *dev, Error **errp) 1900 { 1901 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1902 VirtIONet *n = VIRTIO_NET(dev); 1903 NetClientState *nc; 1904 int i; 1905 1906 if (n->net_conf.mtu) { 1907 n->host_features |= (0x1 << VIRTIO_NET_F_MTU); 1908 } 1909 1910 virtio_net_set_config_size(n, n->host_features); 1911 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); 1912 1913 /* 1914 * We set a lower limit on RX queue size to what it always was. 1915 * Guests that want a smaller ring can always resize it without 1916 * help from us (using virtio 1 and up). 1917 */ 1918 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE || 1919 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE || 1920 (n->net_conf.rx_queue_size & (n->net_conf.rx_queue_size - 1))) { 1921 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), " 1922 "must be a power of 2 between %d and %d.", 1923 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE, 1924 VIRTQUEUE_MAX_SIZE); 1925 virtio_cleanup(vdev); 1926 return; 1927 } 1928 1929 n->max_queues = MAX(n->nic_conf.peers.queues, 1); 1930 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) { 1931 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " 1932 "must be a positive integer less than %d.", 1933 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2); 1934 virtio_cleanup(vdev); 1935 return; 1936 } 1937 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); 1938 n->curr_queues = 1; 1939 n->tx_timeout = n->net_conf.txtimer; 1940 1941 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") 1942 && strcmp(n->net_conf.tx, "bh")) { 1943 error_report("virtio-net: " 1944 "Unknown option tx=%s, valid options: \"timer\" \"bh\"", 1945 n->net_conf.tx); 1946 error_report("Defaulting to \"bh\""); 1947 } 1948 1949 for (i = 0; i < n->max_queues; i++) { 1950 virtio_net_add_queue(n, i); 1951 } 1952 1953 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1954 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); 1955 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); 1956 n->status = VIRTIO_NET_S_LINK_UP; 1957 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 1958 virtio_net_announce_timer, n); 1959 1960 if (n->netclient_type) { 1961 /* 1962 * Happen when virtio_net_set_netclient_name has been called. 1963 */ 1964 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1965 n->netclient_type, n->netclient_name, n); 1966 } else { 1967 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1968 object_get_typename(OBJECT(dev)), dev->id, n); 1969 } 1970 1971 peer_test_vnet_hdr(n); 1972 if (peer_has_vnet_hdr(n)) { 1973 for (i = 0; i < n->max_queues; i++) { 1974 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); 1975 } 1976 n->host_hdr_len = sizeof(struct virtio_net_hdr); 1977 } else { 1978 n->host_hdr_len = 0; 1979 } 1980 1981 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); 1982 1983 n->vqs[0].tx_waiting = 0; 1984 n->tx_burst = n->net_conf.txburst; 1985 virtio_net_set_mrg_rx_bufs(n, 0, 0); 1986 n->promisc = 1; /* for compatibility */ 1987 1988 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 1989 1990 n->vlans = g_malloc0(MAX_VLAN >> 3); 1991 1992 nc = qemu_get_queue(n->nic); 1993 nc->rxfilter_notify_enabled = 1; 1994 1995 n->qdev = dev; 1996 } 1997 1998 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) 1999 { 2000 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 2001 VirtIONet *n = VIRTIO_NET(dev); 2002 int i, max_queues; 2003 2004 /* This will stop vhost backend if appropriate. */ 2005 virtio_net_set_status(vdev, 0); 2006 2007 g_free(n->netclient_name); 2008 n->netclient_name = NULL; 2009 g_free(n->netclient_type); 2010 n->netclient_type = NULL; 2011 2012 g_free(n->mac_table.macs); 2013 g_free(n->vlans); 2014 2015 max_queues = n->multiqueue ? n->max_queues : 1; 2016 for (i = 0; i < max_queues; i++) { 2017 virtio_net_del_queue(n, i); 2018 } 2019 2020 timer_del(n->announce_timer); 2021 timer_free(n->announce_timer); 2022 g_free(n->vqs); 2023 qemu_del_nic(n->nic); 2024 virtio_cleanup(vdev); 2025 } 2026 2027 static void virtio_net_instance_init(Object *obj) 2028 { 2029 VirtIONet *n = VIRTIO_NET(obj); 2030 2031 /* 2032 * The default config_size is sizeof(struct virtio_net_config). 2033 * Can be overriden with virtio_net_set_config_size. 2034 */ 2035 n->config_size = sizeof(struct virtio_net_config); 2036 device_add_bootindex_property(obj, &n->nic_conf.bootindex, 2037 "bootindex", "/ethernet-phy@0", 2038 DEVICE(n), NULL); 2039 } 2040 2041 static void virtio_net_pre_save(void *opaque) 2042 { 2043 VirtIONet *n = opaque; 2044 2045 /* At this point, backend must be stopped, otherwise 2046 * it might keep writing to memory. */ 2047 assert(!n->vhost_started); 2048 } 2049 2050 static const VMStateDescription vmstate_virtio_net = { 2051 .name = "virtio-net", 2052 .minimum_version_id = VIRTIO_NET_VM_VERSION, 2053 .version_id = VIRTIO_NET_VM_VERSION, 2054 .fields = (VMStateField[]) { 2055 VMSTATE_VIRTIO_DEVICE, 2056 VMSTATE_END_OF_LIST() 2057 }, 2058 .pre_save = virtio_net_pre_save, 2059 }; 2060 2061 static Property virtio_net_properties[] = { 2062 DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true), 2063 DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features, 2064 VIRTIO_NET_F_GUEST_CSUM, true), 2065 DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true), 2066 DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features, 2067 VIRTIO_NET_F_GUEST_TSO4, true), 2068 DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features, 2069 VIRTIO_NET_F_GUEST_TSO6, true), 2070 DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features, 2071 VIRTIO_NET_F_GUEST_ECN, true), 2072 DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features, 2073 VIRTIO_NET_F_GUEST_UFO, true), 2074 DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features, 2075 VIRTIO_NET_F_GUEST_ANNOUNCE, true), 2076 DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features, 2077 VIRTIO_NET_F_HOST_TSO4, true), 2078 DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features, 2079 VIRTIO_NET_F_HOST_TSO6, true), 2080 DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features, 2081 VIRTIO_NET_F_HOST_ECN, true), 2082 DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features, 2083 VIRTIO_NET_F_HOST_UFO, true), 2084 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features, 2085 VIRTIO_NET_F_MRG_RXBUF, true), 2086 DEFINE_PROP_BIT("status", VirtIONet, host_features, 2087 VIRTIO_NET_F_STATUS, true), 2088 DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features, 2089 VIRTIO_NET_F_CTRL_VQ, true), 2090 DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features, 2091 VIRTIO_NET_F_CTRL_RX, true), 2092 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features, 2093 VIRTIO_NET_F_CTRL_VLAN, true), 2094 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features, 2095 VIRTIO_NET_F_CTRL_RX_EXTRA, true), 2096 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features, 2097 VIRTIO_NET_F_CTRL_MAC_ADDR, true), 2098 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features, 2099 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true), 2100 DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false), 2101 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), 2102 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, 2103 TX_TIMER_INTERVAL), 2104 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), 2105 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), 2106 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size, 2107 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE), 2108 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0), 2109 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend, 2110 true), 2111 DEFINE_PROP_END_OF_LIST(), 2112 }; 2113 2114 static void virtio_net_class_init(ObjectClass *klass, void *data) 2115 { 2116 DeviceClass *dc = DEVICE_CLASS(klass); 2117 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 2118 2119 dc->props = virtio_net_properties; 2120 dc->vmsd = &vmstate_virtio_net; 2121 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 2122 vdc->realize = virtio_net_device_realize; 2123 vdc->unrealize = virtio_net_device_unrealize; 2124 vdc->get_config = virtio_net_get_config; 2125 vdc->set_config = virtio_net_set_config; 2126 vdc->get_features = virtio_net_get_features; 2127 vdc->set_features = virtio_net_set_features; 2128 vdc->bad_features = virtio_net_bad_features; 2129 vdc->reset = virtio_net_reset; 2130 vdc->set_status = virtio_net_set_status; 2131 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; 2132 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; 2133 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO); 2134 vdc->vmsd = &vmstate_virtio_net_device; 2135 } 2136 2137 static const TypeInfo virtio_net_info = { 2138 .name = TYPE_VIRTIO_NET, 2139 .parent = TYPE_VIRTIO_DEVICE, 2140 .instance_size = sizeof(VirtIONet), 2141 .instance_init = virtio_net_instance_init, 2142 .class_init = virtio_net_class_init, 2143 }; 2144 2145 static void virtio_register_types(void) 2146 { 2147 type_register_static(&virtio_net_info); 2148 } 2149 2150 type_init(virtio_register_types) 2151