1 /* 2 * Virtio Network Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/iov.h" 16 #include "hw/virtio/virtio.h" 17 #include "net/net.h" 18 #include "net/checksum.h" 19 #include "net/tap.h" 20 #include "qemu/error-report.h" 21 #include "qemu/timer.h" 22 #include "hw/virtio/virtio-net.h" 23 #include "net/vhost_net.h" 24 #include "hw/virtio/virtio-bus.h" 25 #include "qapi/qmp/qjson.h" 26 #include "qapi-event.h" 27 #include "hw/virtio/virtio-access.h" 28 29 #define VIRTIO_NET_VM_VERSION 11 30 31 #define MAC_TABLE_ENTRIES 64 32 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ 33 34 /* 35 * Calculate the number of bytes up to and including the given 'field' of 36 * 'container'. 37 */ 38 #define endof(container, field) \ 39 (offsetof(container, field) + sizeof(((container *)0)->field)) 40 41 typedef struct VirtIOFeature { 42 uint32_t flags; 43 size_t end; 44 } VirtIOFeature; 45 46 static VirtIOFeature feature_sizes[] = { 47 {.flags = 1 << VIRTIO_NET_F_MAC, 48 .end = endof(struct virtio_net_config, mac)}, 49 {.flags = 1 << VIRTIO_NET_F_STATUS, 50 .end = endof(struct virtio_net_config, status)}, 51 {.flags = 1 << VIRTIO_NET_F_MQ, 52 .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, 53 {} 54 }; 55 56 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) 57 { 58 VirtIONet *n = qemu_get_nic_opaque(nc); 59 60 return &n->vqs[nc->queue_index]; 61 } 62 63 static int vq2q(int queue_index) 64 { 65 return queue_index / 2; 66 } 67 68 /* TODO 69 * - we could suppress RX interrupt if we were so inclined. 70 */ 71 72 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) 73 { 74 VirtIONet *n = VIRTIO_NET(vdev); 75 struct virtio_net_config netcfg; 76 77 virtio_stw_p(vdev, &netcfg.status, n->status); 78 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); 79 memcpy(netcfg.mac, n->mac, ETH_ALEN); 80 memcpy(config, &netcfg, n->config_size); 81 } 82 83 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) 84 { 85 VirtIONet *n = VIRTIO_NET(vdev); 86 struct virtio_net_config netcfg = {}; 87 88 memcpy(&netcfg, config, n->config_size); 89 90 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) && 91 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) && 92 memcmp(netcfg.mac, n->mac, ETH_ALEN)) { 93 memcpy(n->mac, netcfg.mac, ETH_ALEN); 94 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 95 } 96 } 97 98 static bool virtio_net_started(VirtIONet *n, uint8_t status) 99 { 100 VirtIODevice *vdev = VIRTIO_DEVICE(n); 101 return (status & VIRTIO_CONFIG_S_DRIVER_OK) && 102 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; 103 } 104 105 static void virtio_net_announce_timer(void *opaque) 106 { 107 VirtIONet *n = opaque; 108 VirtIODevice *vdev = VIRTIO_DEVICE(n); 109 110 n->announce_counter--; 111 n->status |= VIRTIO_NET_S_ANNOUNCE; 112 virtio_notify_config(vdev); 113 } 114 115 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) 116 { 117 VirtIODevice *vdev = VIRTIO_DEVICE(n); 118 NetClientState *nc = qemu_get_queue(n->nic); 119 int queues = n->multiqueue ? n->max_queues : 1; 120 121 if (!get_vhost_net(nc->peer)) { 122 return; 123 } 124 125 if ((virtio_net_started(n, status) && !nc->peer->link_down) == 126 !!n->vhost_started) { 127 return; 128 } 129 if (!n->vhost_started) { 130 int r, i; 131 132 if (n->needs_vnet_hdr_swap) { 133 error_report("backend does not support %s vnet headers; " 134 "falling back on userspace virtio", 135 virtio_is_big_endian(vdev) ? "BE" : "LE"); 136 return; 137 } 138 139 /* Any packets outstanding? Purge them to avoid touching rings 140 * when vhost is running. 141 */ 142 for (i = 0; i < queues; i++) { 143 NetClientState *qnc = qemu_get_subqueue(n->nic, i); 144 145 /* Purge both directions: TX and RX. */ 146 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc); 147 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer); 148 } 149 150 n->vhost_started = 1; 151 r = vhost_net_start(vdev, n->nic->ncs, queues); 152 if (r < 0) { 153 error_report("unable to start vhost net: %d: " 154 "falling back on userspace virtio", -r); 155 n->vhost_started = 0; 156 } 157 } else { 158 vhost_net_stop(vdev, n->nic->ncs, queues); 159 n->vhost_started = 0; 160 } 161 } 162 163 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev, 164 NetClientState *peer, 165 bool enable) 166 { 167 if (virtio_is_big_endian(vdev)) { 168 return qemu_set_vnet_be(peer, enable); 169 } else { 170 return qemu_set_vnet_le(peer, enable); 171 } 172 } 173 174 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, 175 int queues, bool enable) 176 { 177 int i; 178 179 for (i = 0; i < queues; i++) { 180 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 && 181 enable) { 182 while (--i >= 0) { 183 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false); 184 } 185 186 return true; 187 } 188 } 189 190 return false; 191 } 192 193 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) 194 { 195 VirtIODevice *vdev = VIRTIO_DEVICE(n); 196 int queues = n->multiqueue ? n->max_queues : 1; 197 198 if (virtio_net_started(n, status)) { 199 /* Before using the device, we tell the network backend about the 200 * endianness to use when parsing vnet headers. If the backend 201 * can't do it, we fallback onto fixing the headers in the core 202 * virtio-net code. 203 */ 204 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs, 205 queues, true); 206 } else if (virtio_net_started(n, vdev->status)) { 207 /* After using the device, we need to reset the network backend to 208 * the default (guest native endianness), otherwise the guest may 209 * lose network connectivity if it is rebooted into a different 210 * endianness. 211 */ 212 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false); 213 } 214 } 215 216 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) 217 { 218 VirtIONet *n = VIRTIO_NET(vdev); 219 VirtIONetQueue *q; 220 int i; 221 uint8_t queue_status; 222 223 virtio_net_vnet_endian_status(n, status); 224 virtio_net_vhost_status(n, status); 225 226 for (i = 0; i < n->max_queues; i++) { 227 NetClientState *ncs = qemu_get_subqueue(n->nic, i); 228 bool queue_started; 229 q = &n->vqs[i]; 230 231 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { 232 queue_status = 0; 233 } else { 234 queue_status = status; 235 } 236 queue_started = 237 virtio_net_started(n, queue_status) && !n->vhost_started; 238 239 if (queue_started) { 240 qemu_flush_queued_packets(ncs); 241 } 242 243 if (!q->tx_waiting) { 244 continue; 245 } 246 247 if (queue_started) { 248 if (q->tx_timer) { 249 timer_mod(q->tx_timer, 250 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 251 } else { 252 qemu_bh_schedule(q->tx_bh); 253 } 254 } else { 255 if (q->tx_timer) { 256 timer_del(q->tx_timer); 257 } else { 258 qemu_bh_cancel(q->tx_bh); 259 } 260 } 261 } 262 } 263 264 static void virtio_net_set_link_status(NetClientState *nc) 265 { 266 VirtIONet *n = qemu_get_nic_opaque(nc); 267 VirtIODevice *vdev = VIRTIO_DEVICE(n); 268 uint16_t old_status = n->status; 269 270 if (nc->link_down) 271 n->status &= ~VIRTIO_NET_S_LINK_UP; 272 else 273 n->status |= VIRTIO_NET_S_LINK_UP; 274 275 if (n->status != old_status) 276 virtio_notify_config(vdev); 277 278 virtio_net_set_status(vdev, vdev->status); 279 } 280 281 static void rxfilter_notify(NetClientState *nc) 282 { 283 VirtIONet *n = qemu_get_nic_opaque(nc); 284 285 if (nc->rxfilter_notify_enabled) { 286 gchar *path = object_get_canonical_path(OBJECT(n->qdev)); 287 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name, 288 n->netclient_name, path, &error_abort); 289 g_free(path); 290 291 /* disable event notification to avoid events flooding */ 292 nc->rxfilter_notify_enabled = 0; 293 } 294 } 295 296 static intList *get_vlan_table(VirtIONet *n) 297 { 298 intList *list, *entry; 299 int i, j; 300 301 list = NULL; 302 for (i = 0; i < MAX_VLAN >> 5; i++) { 303 for (j = 0; n->vlans[i] && j <= 0x1f; j++) { 304 if (n->vlans[i] & (1U << j)) { 305 entry = g_malloc0(sizeof(*entry)); 306 entry->value = (i << 5) + j; 307 entry->next = list; 308 list = entry; 309 } 310 } 311 } 312 313 return list; 314 } 315 316 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) 317 { 318 VirtIONet *n = qemu_get_nic_opaque(nc); 319 VirtIODevice *vdev = VIRTIO_DEVICE(n); 320 RxFilterInfo *info; 321 strList *str_list, *entry; 322 int i; 323 324 info = g_malloc0(sizeof(*info)); 325 info->name = g_strdup(nc->name); 326 info->promiscuous = n->promisc; 327 328 if (n->nouni) { 329 info->unicast = RX_STATE_NONE; 330 } else if (n->alluni) { 331 info->unicast = RX_STATE_ALL; 332 } else { 333 info->unicast = RX_STATE_NORMAL; 334 } 335 336 if (n->nomulti) { 337 info->multicast = RX_STATE_NONE; 338 } else if (n->allmulti) { 339 info->multicast = RX_STATE_ALL; 340 } else { 341 info->multicast = RX_STATE_NORMAL; 342 } 343 344 info->broadcast_allowed = n->nobcast; 345 info->multicast_overflow = n->mac_table.multi_overflow; 346 info->unicast_overflow = n->mac_table.uni_overflow; 347 348 info->main_mac = qemu_mac_strdup_printf(n->mac); 349 350 str_list = NULL; 351 for (i = 0; i < n->mac_table.first_multi; i++) { 352 entry = g_malloc0(sizeof(*entry)); 353 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 354 entry->next = str_list; 355 str_list = entry; 356 } 357 info->unicast_table = str_list; 358 359 str_list = NULL; 360 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 361 entry = g_malloc0(sizeof(*entry)); 362 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 363 entry->next = str_list; 364 str_list = entry; 365 } 366 info->multicast_table = str_list; 367 info->vlan_table = get_vlan_table(n); 368 369 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) { 370 info->vlan = RX_STATE_ALL; 371 } else if (!info->vlan_table) { 372 info->vlan = RX_STATE_NONE; 373 } else { 374 info->vlan = RX_STATE_NORMAL; 375 } 376 377 /* enable event notification after query */ 378 nc->rxfilter_notify_enabled = 1; 379 380 return info; 381 } 382 383 static void virtio_net_reset(VirtIODevice *vdev) 384 { 385 VirtIONet *n = VIRTIO_NET(vdev); 386 387 /* Reset back to compatibility mode */ 388 n->promisc = 1; 389 n->allmulti = 0; 390 n->alluni = 0; 391 n->nomulti = 0; 392 n->nouni = 0; 393 n->nobcast = 0; 394 /* multiqueue is disabled by default */ 395 n->curr_queues = 1; 396 timer_del(n->announce_timer); 397 n->announce_counter = 0; 398 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 399 400 /* Flush any MAC and VLAN filter table state */ 401 n->mac_table.in_use = 0; 402 n->mac_table.first_multi = 0; 403 n->mac_table.multi_overflow = 0; 404 n->mac_table.uni_overflow = 0; 405 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 406 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); 407 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 408 memset(n->vlans, 0, MAX_VLAN >> 3); 409 } 410 411 static void peer_test_vnet_hdr(VirtIONet *n) 412 { 413 NetClientState *nc = qemu_get_queue(n->nic); 414 if (!nc->peer) { 415 return; 416 } 417 418 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer); 419 } 420 421 static int peer_has_vnet_hdr(VirtIONet *n) 422 { 423 return n->has_vnet_hdr; 424 } 425 426 static int peer_has_ufo(VirtIONet *n) 427 { 428 if (!peer_has_vnet_hdr(n)) 429 return 0; 430 431 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer); 432 433 return n->has_ufo; 434 } 435 436 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, 437 int version_1) 438 { 439 int i; 440 NetClientState *nc; 441 442 n->mergeable_rx_bufs = mergeable_rx_bufs; 443 444 if (version_1) { 445 n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 446 } else { 447 n->guest_hdr_len = n->mergeable_rx_bufs ? 448 sizeof(struct virtio_net_hdr_mrg_rxbuf) : 449 sizeof(struct virtio_net_hdr); 450 } 451 452 for (i = 0; i < n->max_queues; i++) { 453 nc = qemu_get_subqueue(n->nic, i); 454 455 if (peer_has_vnet_hdr(n) && 456 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { 457 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); 458 n->host_hdr_len = n->guest_hdr_len; 459 } 460 } 461 } 462 463 static int peer_attach(VirtIONet *n, int index) 464 { 465 NetClientState *nc = qemu_get_subqueue(n->nic, index); 466 467 if (!nc->peer) { 468 return 0; 469 } 470 471 if (nc->peer->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) { 472 vhost_set_vring_enable(nc->peer, 1); 473 } 474 475 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 476 return 0; 477 } 478 479 return tap_enable(nc->peer); 480 } 481 482 static int peer_detach(VirtIONet *n, int index) 483 { 484 NetClientState *nc = qemu_get_subqueue(n->nic, index); 485 486 if (!nc->peer) { 487 return 0; 488 } 489 490 if (nc->peer->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) { 491 vhost_set_vring_enable(nc->peer, 0); 492 } 493 494 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 495 return 0; 496 } 497 498 return tap_disable(nc->peer); 499 } 500 501 static void virtio_net_set_queues(VirtIONet *n) 502 { 503 int i; 504 int r; 505 506 for (i = 0; i < n->max_queues; i++) { 507 if (i < n->curr_queues) { 508 r = peer_attach(n, i); 509 assert(!r); 510 } else { 511 r = peer_detach(n, i); 512 assert(!r); 513 } 514 } 515 } 516 517 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); 518 519 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, 520 Error **errp) 521 { 522 VirtIONet *n = VIRTIO_NET(vdev); 523 NetClientState *nc = qemu_get_queue(n->nic); 524 525 /* Firstly sync all virtio-net possible supported features */ 526 features |= n->host_features; 527 528 virtio_add_feature(&features, VIRTIO_NET_F_MAC); 529 530 if (!peer_has_vnet_hdr(n)) { 531 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM); 532 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4); 533 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6); 534 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN); 535 536 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM); 537 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4); 538 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); 539 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); 540 } 541 542 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { 543 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO); 544 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); 545 } 546 547 if (!get_vhost_net(nc->peer)) { 548 return features; 549 } 550 return vhost_net_get_features(get_vhost_net(nc->peer), features); 551 } 552 553 static uint64_t virtio_net_bad_features(VirtIODevice *vdev) 554 { 555 uint64_t features = 0; 556 557 /* Linux kernel 2.6.25. It understood MAC (as everyone must), 558 * but also these: */ 559 virtio_add_feature(&features, VIRTIO_NET_F_MAC); 560 virtio_add_feature(&features, VIRTIO_NET_F_CSUM); 561 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4); 562 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6); 563 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN); 564 565 return features; 566 } 567 568 static void virtio_net_apply_guest_offloads(VirtIONet *n) 569 { 570 qemu_set_offload(qemu_get_queue(n->nic)->peer, 571 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), 572 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), 573 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), 574 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), 575 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); 576 } 577 578 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) 579 { 580 static const uint64_t guest_offloads_mask = 581 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | 582 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 583 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | 584 (1ULL << VIRTIO_NET_F_GUEST_ECN) | 585 (1ULL << VIRTIO_NET_F_GUEST_UFO); 586 587 return guest_offloads_mask & features; 588 } 589 590 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) 591 { 592 VirtIODevice *vdev = VIRTIO_DEVICE(n); 593 return virtio_net_guest_offloads_by_features(vdev->guest_features); 594 } 595 596 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) 597 { 598 VirtIONet *n = VIRTIO_NET(vdev); 599 int i; 600 601 virtio_net_set_multiqueue(n, 602 virtio_has_feature(features, VIRTIO_NET_F_MQ)); 603 604 virtio_net_set_mrg_rx_bufs(n, 605 virtio_has_feature(features, 606 VIRTIO_NET_F_MRG_RXBUF), 607 virtio_has_feature(features, 608 VIRTIO_F_VERSION_1)); 609 610 if (n->has_vnet_hdr) { 611 n->curr_guest_offloads = 612 virtio_net_guest_offloads_by_features(features); 613 virtio_net_apply_guest_offloads(n); 614 } 615 616 for (i = 0; i < n->max_queues; i++) { 617 NetClientState *nc = qemu_get_subqueue(n->nic, i); 618 619 if (!get_vhost_net(nc->peer)) { 620 continue; 621 } 622 vhost_net_ack_features(get_vhost_net(nc->peer), features); 623 } 624 625 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) { 626 memset(n->vlans, 0, MAX_VLAN >> 3); 627 } else { 628 memset(n->vlans, 0xff, MAX_VLAN >> 3); 629 } 630 } 631 632 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, 633 struct iovec *iov, unsigned int iov_cnt) 634 { 635 uint8_t on; 636 size_t s; 637 NetClientState *nc = qemu_get_queue(n->nic); 638 639 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); 640 if (s != sizeof(on)) { 641 return VIRTIO_NET_ERR; 642 } 643 644 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { 645 n->promisc = on; 646 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { 647 n->allmulti = on; 648 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { 649 n->alluni = on; 650 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { 651 n->nomulti = on; 652 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { 653 n->nouni = on; 654 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { 655 n->nobcast = on; 656 } else { 657 return VIRTIO_NET_ERR; 658 } 659 660 rxfilter_notify(nc); 661 662 return VIRTIO_NET_OK; 663 } 664 665 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, 666 struct iovec *iov, unsigned int iov_cnt) 667 { 668 VirtIODevice *vdev = VIRTIO_DEVICE(n); 669 uint64_t offloads; 670 size_t s; 671 672 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 673 return VIRTIO_NET_ERR; 674 } 675 676 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); 677 if (s != sizeof(offloads)) { 678 return VIRTIO_NET_ERR; 679 } 680 681 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { 682 uint64_t supported_offloads; 683 684 if (!n->has_vnet_hdr) { 685 return VIRTIO_NET_ERR; 686 } 687 688 supported_offloads = virtio_net_supported_guest_offloads(n); 689 if (offloads & ~supported_offloads) { 690 return VIRTIO_NET_ERR; 691 } 692 693 n->curr_guest_offloads = offloads; 694 virtio_net_apply_guest_offloads(n); 695 696 return VIRTIO_NET_OK; 697 } else { 698 return VIRTIO_NET_ERR; 699 } 700 } 701 702 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, 703 struct iovec *iov, unsigned int iov_cnt) 704 { 705 VirtIODevice *vdev = VIRTIO_DEVICE(n); 706 struct virtio_net_ctrl_mac mac_data; 707 size_t s; 708 NetClientState *nc = qemu_get_queue(n->nic); 709 710 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { 711 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { 712 return VIRTIO_NET_ERR; 713 } 714 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); 715 assert(s == sizeof(n->mac)); 716 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 717 rxfilter_notify(nc); 718 719 return VIRTIO_NET_OK; 720 } 721 722 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { 723 return VIRTIO_NET_ERR; 724 } 725 726 int in_use = 0; 727 int first_multi = 0; 728 uint8_t uni_overflow = 0; 729 uint8_t multi_overflow = 0; 730 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 731 732 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 733 sizeof(mac_data.entries)); 734 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 735 if (s != sizeof(mac_data.entries)) { 736 goto error; 737 } 738 iov_discard_front(&iov, &iov_cnt, s); 739 740 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { 741 goto error; 742 } 743 744 if (mac_data.entries <= MAC_TABLE_ENTRIES) { 745 s = iov_to_buf(iov, iov_cnt, 0, macs, 746 mac_data.entries * ETH_ALEN); 747 if (s != mac_data.entries * ETH_ALEN) { 748 goto error; 749 } 750 in_use += mac_data.entries; 751 } else { 752 uni_overflow = 1; 753 } 754 755 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); 756 757 first_multi = in_use; 758 759 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 760 sizeof(mac_data.entries)); 761 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 762 if (s != sizeof(mac_data.entries)) { 763 goto error; 764 } 765 766 iov_discard_front(&iov, &iov_cnt, s); 767 768 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { 769 goto error; 770 } 771 772 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) { 773 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN], 774 mac_data.entries * ETH_ALEN); 775 if (s != mac_data.entries * ETH_ALEN) { 776 goto error; 777 } 778 in_use += mac_data.entries; 779 } else { 780 multi_overflow = 1; 781 } 782 783 n->mac_table.in_use = in_use; 784 n->mac_table.first_multi = first_multi; 785 n->mac_table.uni_overflow = uni_overflow; 786 n->mac_table.multi_overflow = multi_overflow; 787 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN); 788 g_free(macs); 789 rxfilter_notify(nc); 790 791 return VIRTIO_NET_OK; 792 793 error: 794 g_free(macs); 795 return VIRTIO_NET_ERR; 796 } 797 798 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, 799 struct iovec *iov, unsigned int iov_cnt) 800 { 801 VirtIODevice *vdev = VIRTIO_DEVICE(n); 802 uint16_t vid; 803 size_t s; 804 NetClientState *nc = qemu_get_queue(n->nic); 805 806 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); 807 vid = virtio_lduw_p(vdev, &vid); 808 if (s != sizeof(vid)) { 809 return VIRTIO_NET_ERR; 810 } 811 812 if (vid >= MAX_VLAN) 813 return VIRTIO_NET_ERR; 814 815 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) 816 n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); 817 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) 818 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); 819 else 820 return VIRTIO_NET_ERR; 821 822 rxfilter_notify(nc); 823 824 return VIRTIO_NET_OK; 825 } 826 827 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd, 828 struct iovec *iov, unsigned int iov_cnt) 829 { 830 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK && 831 n->status & VIRTIO_NET_S_ANNOUNCE) { 832 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 833 if (n->announce_counter) { 834 timer_mod(n->announce_timer, 835 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 836 self_announce_delay(n->announce_counter)); 837 } 838 return VIRTIO_NET_OK; 839 } else { 840 return VIRTIO_NET_ERR; 841 } 842 } 843 844 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, 845 struct iovec *iov, unsigned int iov_cnt) 846 { 847 VirtIODevice *vdev = VIRTIO_DEVICE(n); 848 struct virtio_net_ctrl_mq mq; 849 size_t s; 850 uint16_t queues; 851 852 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); 853 if (s != sizeof(mq)) { 854 return VIRTIO_NET_ERR; 855 } 856 857 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 858 return VIRTIO_NET_ERR; 859 } 860 861 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs); 862 863 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 864 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 865 queues > n->max_queues || 866 !n->multiqueue) { 867 return VIRTIO_NET_ERR; 868 } 869 870 n->curr_queues = queues; 871 /* stop the backend before changing the number of queues to avoid handling a 872 * disabled queue */ 873 virtio_net_set_status(vdev, vdev->status); 874 virtio_net_set_queues(n); 875 876 return VIRTIO_NET_OK; 877 } 878 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 879 { 880 VirtIONet *n = VIRTIO_NET(vdev); 881 struct virtio_net_ctrl_hdr ctrl; 882 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 883 VirtQueueElement *elem; 884 size_t s; 885 struct iovec *iov, *iov2; 886 unsigned int iov_cnt; 887 888 for (;;) { 889 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 890 if (!elem) { 891 break; 892 } 893 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || 894 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { 895 error_report("virtio-net ctrl missing headers"); 896 exit(1); 897 } 898 899 iov_cnt = elem->out_num; 900 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num); 901 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); 902 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); 903 if (s != sizeof(ctrl)) { 904 status = VIRTIO_NET_ERR; 905 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { 906 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); 907 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { 908 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); 909 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { 910 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); 911 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { 912 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); 913 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { 914 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); 915 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { 916 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); 917 } 918 919 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); 920 assert(s == sizeof(status)); 921 922 virtqueue_push(vq, elem, sizeof(status)); 923 virtio_notify(vdev, vq); 924 g_free(iov2); 925 g_free(elem); 926 } 927 } 928 929 /* RX */ 930 931 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) 932 { 933 VirtIONet *n = VIRTIO_NET(vdev); 934 int queue_index = vq2q(virtio_get_queue_index(vq)); 935 936 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); 937 } 938 939 static int virtio_net_can_receive(NetClientState *nc) 940 { 941 VirtIONet *n = qemu_get_nic_opaque(nc); 942 VirtIODevice *vdev = VIRTIO_DEVICE(n); 943 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 944 945 if (!vdev->vm_running) { 946 return 0; 947 } 948 949 if (nc->queue_index >= n->curr_queues) { 950 return 0; 951 } 952 953 if (!virtio_queue_ready(q->rx_vq) || 954 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 955 return 0; 956 } 957 958 return 1; 959 } 960 961 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) 962 { 963 VirtIONet *n = q->n; 964 if (virtio_queue_empty(q->rx_vq) || 965 (n->mergeable_rx_bufs && 966 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 967 virtio_queue_set_notification(q->rx_vq, 1); 968 969 /* To avoid a race condition where the guest has made some buffers 970 * available after the above check but before notification was 971 * enabled, check for available buffers again. 972 */ 973 if (virtio_queue_empty(q->rx_vq) || 974 (n->mergeable_rx_bufs && 975 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 976 return 0; 977 } 978 } 979 980 virtio_queue_set_notification(q->rx_vq, 0); 981 return 1; 982 } 983 984 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr) 985 { 986 virtio_tswap16s(vdev, &hdr->hdr_len); 987 virtio_tswap16s(vdev, &hdr->gso_size); 988 virtio_tswap16s(vdev, &hdr->csum_start); 989 virtio_tswap16s(vdev, &hdr->csum_offset); 990 } 991 992 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so 993 * it never finds out that the packets don't have valid checksums. This 994 * causes dhclient to get upset. Fedora's carried a patch for ages to 995 * fix this with Xen but it hasn't appeared in an upstream release of 996 * dhclient yet. 997 * 998 * To avoid breaking existing guests, we catch udp packets and add 999 * checksums. This is terrible but it's better than hacking the guest 1000 * kernels. 1001 * 1002 * N.B. if we introduce a zero-copy API, this operation is no longer free so 1003 * we should provide a mechanism to disable it to avoid polluting the host 1004 * cache. 1005 */ 1006 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, 1007 uint8_t *buf, size_t size) 1008 { 1009 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ 1010 (size > 27 && size < 1500) && /* normal sized MTU */ 1011 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ 1012 (buf[23] == 17) && /* ip.protocol == UDP */ 1013 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ 1014 net_checksum_calculate(buf, size); 1015 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; 1016 } 1017 } 1018 1019 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, 1020 const void *buf, size_t size) 1021 { 1022 if (n->has_vnet_hdr) { 1023 /* FIXME this cast is evil */ 1024 void *wbuf = (void *)buf; 1025 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, 1026 size - n->host_hdr_len); 1027 1028 if (n->needs_vnet_hdr_swap) { 1029 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf); 1030 } 1031 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); 1032 } else { 1033 struct virtio_net_hdr hdr = { 1034 .flags = 0, 1035 .gso_type = VIRTIO_NET_HDR_GSO_NONE 1036 }; 1037 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); 1038 } 1039 } 1040 1041 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) 1042 { 1043 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1044 static const uint8_t vlan[] = {0x81, 0x00}; 1045 uint8_t *ptr = (uint8_t *)buf; 1046 int i; 1047 1048 if (n->promisc) 1049 return 1; 1050 1051 ptr += n->host_hdr_len; 1052 1053 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { 1054 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff; 1055 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) 1056 return 0; 1057 } 1058 1059 if (ptr[0] & 1) { // multicast 1060 if (!memcmp(ptr, bcast, sizeof(bcast))) { 1061 return !n->nobcast; 1062 } else if (n->nomulti) { 1063 return 0; 1064 } else if (n->allmulti || n->mac_table.multi_overflow) { 1065 return 1; 1066 } 1067 1068 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 1069 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 1070 return 1; 1071 } 1072 } 1073 } else { // unicast 1074 if (n->nouni) { 1075 return 0; 1076 } else if (n->alluni || n->mac_table.uni_overflow) { 1077 return 1; 1078 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { 1079 return 1; 1080 } 1081 1082 for (i = 0; i < n->mac_table.first_multi; i++) { 1083 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 1084 return 1; 1085 } 1086 } 1087 } 1088 1089 return 0; 1090 } 1091 1092 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size) 1093 { 1094 VirtIONet *n = qemu_get_nic_opaque(nc); 1095 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1096 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1097 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; 1098 struct virtio_net_hdr_mrg_rxbuf mhdr; 1099 unsigned mhdr_cnt = 0; 1100 size_t offset, i, guest_offset; 1101 1102 if (!virtio_net_can_receive(nc)) { 1103 return -1; 1104 } 1105 1106 /* hdr_len refers to the header we supply to the guest */ 1107 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { 1108 return 0; 1109 } 1110 1111 if (!receive_filter(n, buf, size)) 1112 return size; 1113 1114 offset = i = 0; 1115 1116 while (offset < size) { 1117 VirtQueueElement *elem; 1118 int len, total; 1119 const struct iovec *sg; 1120 1121 total = 0; 1122 1123 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); 1124 if (!elem) { 1125 if (i == 0) 1126 return -1; 1127 error_report("virtio-net unexpected empty queue: " 1128 "i %zd mergeable %d offset %zd, size %zd, " 1129 "guest hdr len %zd, host hdr len %zd " 1130 "guest features 0x%" PRIx64, 1131 i, n->mergeable_rx_bufs, offset, size, 1132 n->guest_hdr_len, n->host_hdr_len, 1133 vdev->guest_features); 1134 exit(1); 1135 } 1136 1137 if (elem->in_num < 1) { 1138 error_report("virtio-net receive queue contains no in buffers"); 1139 exit(1); 1140 } 1141 1142 sg = elem->in_sg; 1143 if (i == 0) { 1144 assert(offset == 0); 1145 if (n->mergeable_rx_bufs) { 1146 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), 1147 sg, elem->in_num, 1148 offsetof(typeof(mhdr), num_buffers), 1149 sizeof(mhdr.num_buffers)); 1150 } 1151 1152 receive_header(n, sg, elem->in_num, buf, size); 1153 offset = n->host_hdr_len; 1154 total += n->guest_hdr_len; 1155 guest_offset = n->guest_hdr_len; 1156 } else { 1157 guest_offset = 0; 1158 } 1159 1160 /* copy in packet. ugh */ 1161 len = iov_from_buf(sg, elem->in_num, guest_offset, 1162 buf + offset, size - offset); 1163 total += len; 1164 offset += len; 1165 /* If buffers can't be merged, at this point we 1166 * must have consumed the complete packet. 1167 * Otherwise, drop it. */ 1168 if (!n->mergeable_rx_bufs && offset < size) { 1169 virtqueue_discard(q->rx_vq, elem, total); 1170 g_free(elem); 1171 return size; 1172 } 1173 1174 /* signal other side */ 1175 virtqueue_fill(q->rx_vq, elem, total, i++); 1176 g_free(elem); 1177 } 1178 1179 if (mhdr_cnt) { 1180 virtio_stw_p(vdev, &mhdr.num_buffers, i); 1181 iov_from_buf(mhdr_sg, mhdr_cnt, 1182 0, 1183 &mhdr.num_buffers, sizeof mhdr.num_buffers); 1184 } 1185 1186 virtqueue_flush(q->rx_vq, i); 1187 virtio_notify(vdev, q->rx_vq); 1188 1189 return size; 1190 } 1191 1192 static int32_t virtio_net_flush_tx(VirtIONetQueue *q); 1193 1194 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) 1195 { 1196 VirtIONet *n = qemu_get_nic_opaque(nc); 1197 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1198 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1199 1200 virtqueue_push(q->tx_vq, q->async_tx.elem, 0); 1201 virtio_notify(vdev, q->tx_vq); 1202 1203 g_free(q->async_tx.elem); 1204 q->async_tx.elem = NULL; 1205 1206 virtio_queue_set_notification(q->tx_vq, 1); 1207 virtio_net_flush_tx(q); 1208 } 1209 1210 /* TX */ 1211 static int32_t virtio_net_flush_tx(VirtIONetQueue *q) 1212 { 1213 VirtIONet *n = q->n; 1214 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1215 VirtQueueElement *elem; 1216 int32_t num_packets = 0; 1217 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); 1218 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1219 return num_packets; 1220 } 1221 1222 if (q->async_tx.elem) { 1223 virtio_queue_set_notification(q->tx_vq, 0); 1224 return num_packets; 1225 } 1226 1227 for (;;) { 1228 ssize_t ret; 1229 unsigned int out_num; 1230 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg; 1231 struct virtio_net_hdr_mrg_rxbuf mhdr; 1232 1233 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement)); 1234 if (!elem) { 1235 break; 1236 } 1237 1238 out_num = elem->out_num; 1239 out_sg = elem->out_sg; 1240 if (out_num < 1) { 1241 error_report("virtio-net header not in first element"); 1242 exit(1); 1243 } 1244 1245 if (n->has_vnet_hdr) { 1246 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < 1247 n->guest_hdr_len) { 1248 error_report("virtio-net header incorrect"); 1249 exit(1); 1250 } 1251 if (n->needs_vnet_hdr_swap) { 1252 virtio_net_hdr_swap(vdev, (void *) &mhdr); 1253 sg2[0].iov_base = &mhdr; 1254 sg2[0].iov_len = n->guest_hdr_len; 1255 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, 1256 out_sg, out_num, 1257 n->guest_hdr_len, -1); 1258 if (out_num == VIRTQUEUE_MAX_SIZE) { 1259 goto drop; 1260 } 1261 out_num += 1; 1262 out_sg = sg2; 1263 } 1264 } 1265 /* 1266 * If host wants to see the guest header as is, we can 1267 * pass it on unchanged. Otherwise, copy just the parts 1268 * that host is interested in. 1269 */ 1270 assert(n->host_hdr_len <= n->guest_hdr_len); 1271 if (n->host_hdr_len != n->guest_hdr_len) { 1272 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), 1273 out_sg, out_num, 1274 0, n->host_hdr_len); 1275 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, 1276 out_sg, out_num, 1277 n->guest_hdr_len, -1); 1278 out_num = sg_num; 1279 out_sg = sg; 1280 } 1281 1282 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), 1283 out_sg, out_num, virtio_net_tx_complete); 1284 if (ret == 0) { 1285 virtio_queue_set_notification(q->tx_vq, 0); 1286 q->async_tx.elem = elem; 1287 return -EBUSY; 1288 } 1289 1290 drop: 1291 virtqueue_push(q->tx_vq, elem, 0); 1292 virtio_notify(vdev, q->tx_vq); 1293 g_free(elem); 1294 1295 if (++num_packets >= n->tx_burst) { 1296 break; 1297 } 1298 } 1299 return num_packets; 1300 } 1301 1302 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) 1303 { 1304 VirtIONet *n = VIRTIO_NET(vdev); 1305 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1306 1307 /* This happens when device was stopped but VCPU wasn't. */ 1308 if (!vdev->vm_running) { 1309 q->tx_waiting = 1; 1310 return; 1311 } 1312 1313 if (q->tx_waiting) { 1314 virtio_queue_set_notification(vq, 1); 1315 timer_del(q->tx_timer); 1316 q->tx_waiting = 0; 1317 virtio_net_flush_tx(q); 1318 } else { 1319 timer_mod(q->tx_timer, 1320 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 1321 q->tx_waiting = 1; 1322 virtio_queue_set_notification(vq, 0); 1323 } 1324 } 1325 1326 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) 1327 { 1328 VirtIONet *n = VIRTIO_NET(vdev); 1329 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1330 1331 if (unlikely(q->tx_waiting)) { 1332 return; 1333 } 1334 q->tx_waiting = 1; 1335 /* This happens when device was stopped but VCPU wasn't. */ 1336 if (!vdev->vm_running) { 1337 return; 1338 } 1339 virtio_queue_set_notification(vq, 0); 1340 qemu_bh_schedule(q->tx_bh); 1341 } 1342 1343 static void virtio_net_tx_timer(void *opaque) 1344 { 1345 VirtIONetQueue *q = opaque; 1346 VirtIONet *n = q->n; 1347 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1348 /* This happens when device was stopped but BH wasn't. */ 1349 if (!vdev->vm_running) { 1350 /* Make sure tx waiting is set, so we'll run when restarted. */ 1351 assert(q->tx_waiting); 1352 return; 1353 } 1354 1355 q->tx_waiting = 0; 1356 1357 /* Just in case the driver is not ready on more */ 1358 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1359 return; 1360 } 1361 1362 virtio_queue_set_notification(q->tx_vq, 1); 1363 virtio_net_flush_tx(q); 1364 } 1365 1366 static void virtio_net_tx_bh(void *opaque) 1367 { 1368 VirtIONetQueue *q = opaque; 1369 VirtIONet *n = q->n; 1370 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1371 int32_t ret; 1372 1373 /* This happens when device was stopped but BH wasn't. */ 1374 if (!vdev->vm_running) { 1375 /* Make sure tx waiting is set, so we'll run when restarted. */ 1376 assert(q->tx_waiting); 1377 return; 1378 } 1379 1380 q->tx_waiting = 0; 1381 1382 /* Just in case the driver is not ready on more */ 1383 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { 1384 return; 1385 } 1386 1387 ret = virtio_net_flush_tx(q); 1388 if (ret == -EBUSY) { 1389 return; /* Notification re-enable handled by tx_complete */ 1390 } 1391 1392 /* If we flush a full burst of packets, assume there are 1393 * more coming and immediately reschedule */ 1394 if (ret >= n->tx_burst) { 1395 qemu_bh_schedule(q->tx_bh); 1396 q->tx_waiting = 1; 1397 return; 1398 } 1399 1400 /* If less than a full burst, re-enable notification and flush 1401 * anything that may have come in while we weren't looking. If 1402 * we find something, assume the guest is still active and reschedule */ 1403 virtio_queue_set_notification(q->tx_vq, 1); 1404 if (virtio_net_flush_tx(q) > 0) { 1405 virtio_queue_set_notification(q->tx_vq, 0); 1406 qemu_bh_schedule(q->tx_bh); 1407 q->tx_waiting = 1; 1408 } 1409 } 1410 1411 static void virtio_net_add_queue(VirtIONet *n, int index) 1412 { 1413 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1414 1415 n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); 1416 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { 1417 n->vqs[index].tx_vq = 1418 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); 1419 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1420 virtio_net_tx_timer, 1421 &n->vqs[index]); 1422 } else { 1423 n->vqs[index].tx_vq = 1424 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); 1425 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); 1426 } 1427 1428 n->vqs[index].tx_waiting = 0; 1429 n->vqs[index].n = n; 1430 } 1431 1432 static void virtio_net_del_queue(VirtIONet *n, int index) 1433 { 1434 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1435 VirtIONetQueue *q = &n->vqs[index]; 1436 NetClientState *nc = qemu_get_subqueue(n->nic, index); 1437 1438 qemu_purge_queued_packets(nc); 1439 1440 virtio_del_queue(vdev, index * 2); 1441 if (q->tx_timer) { 1442 timer_del(q->tx_timer); 1443 timer_free(q->tx_timer); 1444 } else { 1445 qemu_bh_delete(q->tx_bh); 1446 } 1447 virtio_del_queue(vdev, index * 2 + 1); 1448 } 1449 1450 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) 1451 { 1452 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1453 int old_num_queues = virtio_get_num_queues(vdev); 1454 int new_num_queues = new_max_queues * 2 + 1; 1455 int i; 1456 1457 assert(old_num_queues >= 3); 1458 assert(old_num_queues % 2 == 1); 1459 1460 if (old_num_queues == new_num_queues) { 1461 return; 1462 } 1463 1464 /* 1465 * We always need to remove and add ctrl vq if 1466 * old_num_queues != new_num_queues. Remove ctrl_vq first, 1467 * and then we only enter one of the following too loops. 1468 */ 1469 virtio_del_queue(vdev, old_num_queues - 1); 1470 1471 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) { 1472 /* new_num_queues < old_num_queues */ 1473 virtio_net_del_queue(n, i / 2); 1474 } 1475 1476 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) { 1477 /* new_num_queues > old_num_queues */ 1478 virtio_net_add_queue(n, i / 2); 1479 } 1480 1481 /* add ctrl_vq last */ 1482 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1483 } 1484 1485 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) 1486 { 1487 int max = multiqueue ? n->max_queues : 1; 1488 1489 n->multiqueue = multiqueue; 1490 virtio_net_change_num_queues(n, max); 1491 1492 virtio_net_set_queues(n); 1493 } 1494 1495 static void virtio_net_save(QEMUFile *f, void *opaque) 1496 { 1497 VirtIONet *n = opaque; 1498 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1499 1500 /* At this point, backend must be stopped, otherwise 1501 * it might keep writing to memory. */ 1502 assert(!n->vhost_started); 1503 virtio_save(vdev, f); 1504 } 1505 1506 static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f) 1507 { 1508 VirtIONet *n = VIRTIO_NET(vdev); 1509 int i; 1510 1511 qemu_put_buffer(f, n->mac, ETH_ALEN); 1512 qemu_put_be32(f, n->vqs[0].tx_waiting); 1513 qemu_put_be32(f, n->mergeable_rx_bufs); 1514 qemu_put_be16(f, n->status); 1515 qemu_put_byte(f, n->promisc); 1516 qemu_put_byte(f, n->allmulti); 1517 qemu_put_be32(f, n->mac_table.in_use); 1518 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); 1519 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1520 qemu_put_be32(f, n->has_vnet_hdr); 1521 qemu_put_byte(f, n->mac_table.multi_overflow); 1522 qemu_put_byte(f, n->mac_table.uni_overflow); 1523 qemu_put_byte(f, n->alluni); 1524 qemu_put_byte(f, n->nomulti); 1525 qemu_put_byte(f, n->nouni); 1526 qemu_put_byte(f, n->nobcast); 1527 qemu_put_byte(f, n->has_ufo); 1528 if (n->max_queues > 1) { 1529 qemu_put_be16(f, n->max_queues); 1530 qemu_put_be16(f, n->curr_queues); 1531 for (i = 1; i < n->curr_queues; i++) { 1532 qemu_put_be32(f, n->vqs[i].tx_waiting); 1533 } 1534 } 1535 1536 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 1537 qemu_put_be64(f, n->curr_guest_offloads); 1538 } 1539 } 1540 1541 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) 1542 { 1543 VirtIONet *n = opaque; 1544 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1545 int ret; 1546 1547 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) 1548 return -EINVAL; 1549 1550 ret = virtio_load(vdev, f, version_id); 1551 if (ret) { 1552 return ret; 1553 } 1554 1555 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 1556 n->curr_guest_offloads = qemu_get_be64(f); 1557 } else { 1558 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); 1559 } 1560 1561 if (peer_has_vnet_hdr(n)) { 1562 virtio_net_apply_guest_offloads(n); 1563 } 1564 1565 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && 1566 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { 1567 n->announce_counter = SELF_ANNOUNCE_ROUNDS; 1568 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL)); 1569 } 1570 1571 return 0; 1572 } 1573 1574 static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f, 1575 int version_id) 1576 { 1577 VirtIONet *n = VIRTIO_NET(vdev); 1578 int i, link_down; 1579 1580 qemu_get_buffer(f, n->mac, ETH_ALEN); 1581 n->vqs[0].tx_waiting = qemu_get_be32(f); 1582 1583 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f), 1584 virtio_vdev_has_feature(vdev, 1585 VIRTIO_F_VERSION_1)); 1586 1587 if (version_id >= 3) 1588 n->status = qemu_get_be16(f); 1589 1590 if (version_id >= 4) { 1591 if (version_id < 8) { 1592 n->promisc = qemu_get_be32(f); 1593 n->allmulti = qemu_get_be32(f); 1594 } else { 1595 n->promisc = qemu_get_byte(f); 1596 n->allmulti = qemu_get_byte(f); 1597 } 1598 } 1599 1600 if (version_id >= 5) { 1601 n->mac_table.in_use = qemu_get_be32(f); 1602 /* MAC_TABLE_ENTRIES may be different from the saved image */ 1603 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) { 1604 qemu_get_buffer(f, n->mac_table.macs, 1605 n->mac_table.in_use * ETH_ALEN); 1606 } else { 1607 int64_t i; 1608 1609 /* Overflow detected - can happen if source has a larger MAC table. 1610 * We simply set overflow flag so there's no need to maintain the 1611 * table of addresses, discard them all. 1612 * Note: 64 bit math to avoid integer overflow. 1613 */ 1614 for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) { 1615 qemu_get_byte(f); 1616 } 1617 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1; 1618 n->mac_table.in_use = 0; 1619 } 1620 } 1621 1622 if (version_id >= 6) 1623 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1624 1625 if (version_id >= 7) { 1626 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) { 1627 error_report("virtio-net: saved image requires vnet_hdr=on"); 1628 return -1; 1629 } 1630 } 1631 1632 if (version_id >= 9) { 1633 n->mac_table.multi_overflow = qemu_get_byte(f); 1634 n->mac_table.uni_overflow = qemu_get_byte(f); 1635 } 1636 1637 if (version_id >= 10) { 1638 n->alluni = qemu_get_byte(f); 1639 n->nomulti = qemu_get_byte(f); 1640 n->nouni = qemu_get_byte(f); 1641 n->nobcast = qemu_get_byte(f); 1642 } 1643 1644 if (version_id >= 11) { 1645 if (qemu_get_byte(f) && !peer_has_ufo(n)) { 1646 error_report("virtio-net: saved image requires TUN_F_UFO support"); 1647 return -1; 1648 } 1649 } 1650 1651 if (n->max_queues > 1) { 1652 if (n->max_queues != qemu_get_be16(f)) { 1653 error_report("virtio-net: different max_queues "); 1654 return -1; 1655 } 1656 1657 n->curr_queues = qemu_get_be16(f); 1658 if (n->curr_queues > n->max_queues) { 1659 error_report("virtio-net: curr_queues %x > max_queues %x", 1660 n->curr_queues, n->max_queues); 1661 return -1; 1662 } 1663 for (i = 1; i < n->curr_queues; i++) { 1664 n->vqs[i].tx_waiting = qemu_get_be32(f); 1665 } 1666 } 1667 1668 virtio_net_set_queues(n); 1669 1670 /* Find the first multicast entry in the saved MAC filter */ 1671 for (i = 0; i < n->mac_table.in_use; i++) { 1672 if (n->mac_table.macs[i * ETH_ALEN] & 1) { 1673 break; 1674 } 1675 } 1676 n->mac_table.first_multi = i; 1677 1678 /* nc.link_down can't be migrated, so infer link_down according 1679 * to link status bit in n->status */ 1680 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; 1681 for (i = 0; i < n->max_queues; i++) { 1682 qemu_get_subqueue(n->nic, i)->link_down = link_down; 1683 } 1684 1685 return 0; 1686 } 1687 1688 static NetClientInfo net_virtio_info = { 1689 .type = NET_CLIENT_OPTIONS_KIND_NIC, 1690 .size = sizeof(NICState), 1691 .can_receive = virtio_net_can_receive, 1692 .receive = virtio_net_receive, 1693 .link_status_changed = virtio_net_set_link_status, 1694 .query_rx_filter = virtio_net_query_rxfilter, 1695 }; 1696 1697 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) 1698 { 1699 VirtIONet *n = VIRTIO_NET(vdev); 1700 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1701 assert(n->vhost_started); 1702 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); 1703 } 1704 1705 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, 1706 bool mask) 1707 { 1708 VirtIONet *n = VIRTIO_NET(vdev); 1709 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1710 assert(n->vhost_started); 1711 vhost_net_virtqueue_mask(get_vhost_net(nc->peer), 1712 vdev, idx, mask); 1713 } 1714 1715 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) 1716 { 1717 int i, config_size = 0; 1718 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); 1719 for (i = 0; feature_sizes[i].flags != 0; i++) { 1720 if (host_features & feature_sizes[i].flags) { 1721 config_size = MAX(feature_sizes[i].end, config_size); 1722 } 1723 } 1724 n->config_size = config_size; 1725 } 1726 1727 void virtio_net_set_netclient_name(VirtIONet *n, const char *name, 1728 const char *type) 1729 { 1730 /* 1731 * The name can be NULL, the netclient name will be type.x. 1732 */ 1733 assert(type != NULL); 1734 1735 g_free(n->netclient_name); 1736 g_free(n->netclient_type); 1737 n->netclient_name = g_strdup(name); 1738 n->netclient_type = g_strdup(type); 1739 } 1740 1741 static void virtio_net_device_realize(DeviceState *dev, Error **errp) 1742 { 1743 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1744 VirtIONet *n = VIRTIO_NET(dev); 1745 NetClientState *nc; 1746 int i; 1747 1748 virtio_net_set_config_size(n, n->host_features); 1749 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); 1750 1751 n->max_queues = MAX(n->nic_conf.peers.queues, 1); 1752 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) { 1753 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " 1754 "must be a positive integer less than %d.", 1755 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2); 1756 virtio_cleanup(vdev); 1757 return; 1758 } 1759 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); 1760 n->curr_queues = 1; 1761 n->tx_timeout = n->net_conf.txtimer; 1762 1763 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") 1764 && strcmp(n->net_conf.tx, "bh")) { 1765 error_report("virtio-net: " 1766 "Unknown option tx=%s, valid options: \"timer\" \"bh\"", 1767 n->net_conf.tx); 1768 error_report("Defaulting to \"bh\""); 1769 } 1770 1771 for (i = 0; i < n->max_queues; i++) { 1772 virtio_net_add_queue(n, i); 1773 } 1774 1775 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1776 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); 1777 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); 1778 n->status = VIRTIO_NET_S_LINK_UP; 1779 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 1780 virtio_net_announce_timer, n); 1781 1782 if (n->netclient_type) { 1783 /* 1784 * Happen when virtio_net_set_netclient_name has been called. 1785 */ 1786 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1787 n->netclient_type, n->netclient_name, n); 1788 } else { 1789 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1790 object_get_typename(OBJECT(dev)), dev->id, n); 1791 } 1792 1793 peer_test_vnet_hdr(n); 1794 if (peer_has_vnet_hdr(n)) { 1795 for (i = 0; i < n->max_queues; i++) { 1796 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); 1797 } 1798 n->host_hdr_len = sizeof(struct virtio_net_hdr); 1799 } else { 1800 n->host_hdr_len = 0; 1801 } 1802 1803 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); 1804 1805 n->vqs[0].tx_waiting = 0; 1806 n->tx_burst = n->net_conf.txburst; 1807 virtio_net_set_mrg_rx_bufs(n, 0, 0); 1808 n->promisc = 1; /* for compatibility */ 1809 1810 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 1811 1812 n->vlans = g_malloc0(MAX_VLAN >> 3); 1813 1814 nc = qemu_get_queue(n->nic); 1815 nc->rxfilter_notify_enabled = 1; 1816 1817 n->qdev = dev; 1818 register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION, 1819 virtio_net_save, virtio_net_load, n); 1820 } 1821 1822 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) 1823 { 1824 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1825 VirtIONet *n = VIRTIO_NET(dev); 1826 int i, max_queues; 1827 1828 /* This will stop vhost backend if appropriate. */ 1829 virtio_net_set_status(vdev, 0); 1830 1831 unregister_savevm(dev, "virtio-net", n); 1832 1833 g_free(n->netclient_name); 1834 n->netclient_name = NULL; 1835 g_free(n->netclient_type); 1836 n->netclient_type = NULL; 1837 1838 g_free(n->mac_table.macs); 1839 g_free(n->vlans); 1840 1841 max_queues = n->multiqueue ? n->max_queues : 1; 1842 for (i = 0; i < max_queues; i++) { 1843 virtio_net_del_queue(n, i); 1844 } 1845 1846 timer_del(n->announce_timer); 1847 timer_free(n->announce_timer); 1848 g_free(n->vqs); 1849 qemu_del_nic(n->nic); 1850 virtio_cleanup(vdev); 1851 } 1852 1853 static void virtio_net_instance_init(Object *obj) 1854 { 1855 VirtIONet *n = VIRTIO_NET(obj); 1856 1857 /* 1858 * The default config_size is sizeof(struct virtio_net_config). 1859 * Can be overriden with virtio_net_set_config_size. 1860 */ 1861 n->config_size = sizeof(struct virtio_net_config); 1862 device_add_bootindex_property(obj, &n->nic_conf.bootindex, 1863 "bootindex", "/ethernet-phy@0", 1864 DEVICE(n), NULL); 1865 } 1866 1867 static Property virtio_net_properties[] = { 1868 DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true), 1869 DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features, 1870 VIRTIO_NET_F_GUEST_CSUM, true), 1871 DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true), 1872 DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features, 1873 VIRTIO_NET_F_GUEST_TSO4, true), 1874 DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features, 1875 VIRTIO_NET_F_GUEST_TSO6, true), 1876 DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features, 1877 VIRTIO_NET_F_GUEST_ECN, true), 1878 DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features, 1879 VIRTIO_NET_F_GUEST_UFO, true), 1880 DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features, 1881 VIRTIO_NET_F_GUEST_ANNOUNCE, true), 1882 DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features, 1883 VIRTIO_NET_F_HOST_TSO4, true), 1884 DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features, 1885 VIRTIO_NET_F_HOST_TSO6, true), 1886 DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features, 1887 VIRTIO_NET_F_HOST_ECN, true), 1888 DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features, 1889 VIRTIO_NET_F_HOST_UFO, true), 1890 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features, 1891 VIRTIO_NET_F_MRG_RXBUF, true), 1892 DEFINE_PROP_BIT("status", VirtIONet, host_features, 1893 VIRTIO_NET_F_STATUS, true), 1894 DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features, 1895 VIRTIO_NET_F_CTRL_VQ, true), 1896 DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features, 1897 VIRTIO_NET_F_CTRL_RX, true), 1898 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features, 1899 VIRTIO_NET_F_CTRL_VLAN, true), 1900 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features, 1901 VIRTIO_NET_F_CTRL_RX_EXTRA, true), 1902 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features, 1903 VIRTIO_NET_F_CTRL_MAC_ADDR, true), 1904 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features, 1905 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true), 1906 DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false), 1907 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), 1908 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, 1909 TX_TIMER_INTERVAL), 1910 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), 1911 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), 1912 DEFINE_PROP_END_OF_LIST(), 1913 }; 1914 1915 static void virtio_net_class_init(ObjectClass *klass, void *data) 1916 { 1917 DeviceClass *dc = DEVICE_CLASS(klass); 1918 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1919 1920 dc->props = virtio_net_properties; 1921 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 1922 vdc->realize = virtio_net_device_realize; 1923 vdc->unrealize = virtio_net_device_unrealize; 1924 vdc->get_config = virtio_net_get_config; 1925 vdc->set_config = virtio_net_set_config; 1926 vdc->get_features = virtio_net_get_features; 1927 vdc->set_features = virtio_net_set_features; 1928 vdc->bad_features = virtio_net_bad_features; 1929 vdc->reset = virtio_net_reset; 1930 vdc->set_status = virtio_net_set_status; 1931 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; 1932 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; 1933 vdc->load = virtio_net_load_device; 1934 vdc->save = virtio_net_save_device; 1935 } 1936 1937 static const TypeInfo virtio_net_info = { 1938 .name = TYPE_VIRTIO_NET, 1939 .parent = TYPE_VIRTIO_DEVICE, 1940 .instance_size = sizeof(VirtIONet), 1941 .instance_init = virtio_net_instance_init, 1942 .class_init = virtio_net_class_init, 1943 }; 1944 1945 static void virtio_register_types(void) 1946 { 1947 type_register_static(&virtio_net_info); 1948 } 1949 1950 type_init(virtio_register_types) 1951