1 /* 2 * Virtio Network Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/iov.h" 15 #include "hw/virtio/virtio.h" 16 #include "net/net.h" 17 #include "net/checksum.h" 18 #include "net/tap.h" 19 #include "qemu/error-report.h" 20 #include "qemu/timer.h" 21 #include "hw/virtio/virtio-net.h" 22 #include "net/vhost_net.h" 23 #include "hw/virtio/virtio-bus.h" 24 #include "qapi/qmp/qjson.h" 25 #include "qapi-event.h" 26 #include "hw/virtio/virtio-access.h" 27 28 #define VIRTIO_NET_VM_VERSION 11 29 30 #define MAC_TABLE_ENTRIES 64 31 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ 32 33 /* 34 * Calculate the number of bytes up to and including the given 'field' of 35 * 'container'. 36 */ 37 #define endof(container, field) \ 38 (offsetof(container, field) + sizeof(((container *)0)->field)) 39 40 typedef struct VirtIOFeature { 41 uint32_t flags; 42 size_t end; 43 } VirtIOFeature; 44 45 static VirtIOFeature feature_sizes[] = { 46 {.flags = 1 << VIRTIO_NET_F_MAC, 47 .end = endof(struct virtio_net_config, mac)}, 48 {.flags = 1 << VIRTIO_NET_F_STATUS, 49 .end = endof(struct virtio_net_config, status)}, 50 {.flags = 1 << VIRTIO_NET_F_MQ, 51 .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, 52 {} 53 }; 54 55 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) 56 { 57 VirtIONet *n = qemu_get_nic_opaque(nc); 58 59 return &n->vqs[nc->queue_index]; 60 } 61 62 static int vq2q(int queue_index) 63 { 64 return queue_index / 2; 65 } 66 67 /* TODO 68 * - we could suppress RX interrupt if we were so inclined. 69 */ 70 71 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) 72 { 73 VirtIONet *n = VIRTIO_NET(vdev); 74 struct virtio_net_config netcfg; 75 76 virtio_stw_p(vdev, &netcfg.status, n->status); 77 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); 78 memcpy(netcfg.mac, n->mac, ETH_ALEN); 79 memcpy(config, &netcfg, n->config_size); 80 } 81 82 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) 83 { 84 VirtIONet *n = VIRTIO_NET(vdev); 85 struct virtio_net_config netcfg = {}; 86 87 memcpy(&netcfg, config, n->config_size); 88 89 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) && 90 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1) && 91 memcmp(netcfg.mac, n->mac, ETH_ALEN)) { 92 memcpy(n->mac, netcfg.mac, ETH_ALEN); 93 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 94 } 95 } 96 97 static bool virtio_net_started(VirtIONet *n, uint8_t status) 98 { 99 VirtIODevice *vdev = VIRTIO_DEVICE(n); 100 return (status & VIRTIO_CONFIG_S_DRIVER_OK) && 101 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; 102 } 103 104 static void virtio_net_announce_timer(void *opaque) 105 { 106 VirtIONet *n = opaque; 107 VirtIODevice *vdev = VIRTIO_DEVICE(n); 108 109 n->announce_counter--; 110 n->status |= VIRTIO_NET_S_ANNOUNCE; 111 virtio_notify_config(vdev); 112 } 113 114 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) 115 { 116 VirtIODevice *vdev = VIRTIO_DEVICE(n); 117 NetClientState *nc = qemu_get_queue(n->nic); 118 int queues = n->multiqueue ? n->max_queues : 1; 119 120 if (!get_vhost_net(nc->peer)) { 121 return; 122 } 123 124 if ((virtio_net_started(n, status) && !nc->peer->link_down) == 125 !!n->vhost_started) { 126 return; 127 } 128 if (!n->vhost_started) { 129 int r, i; 130 131 /* Any packets outstanding? Purge them to avoid touching rings 132 * when vhost is running. 133 */ 134 for (i = 0; i < queues; i++) { 135 NetClientState *qnc = qemu_get_subqueue(n->nic, i); 136 137 /* Purge both directions: TX and RX. */ 138 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc); 139 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer); 140 } 141 142 n->vhost_started = 1; 143 r = vhost_net_start(vdev, n->nic->ncs, queues); 144 if (r < 0) { 145 error_report("unable to start vhost net: %d: " 146 "falling back on userspace virtio", -r); 147 n->vhost_started = 0; 148 } 149 } else { 150 vhost_net_stop(vdev, n->nic->ncs, queues); 151 n->vhost_started = 0; 152 } 153 } 154 155 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) 156 { 157 VirtIONet *n = VIRTIO_NET(vdev); 158 VirtIONetQueue *q; 159 int i; 160 uint8_t queue_status; 161 162 virtio_net_vhost_status(n, status); 163 164 for (i = 0; i < n->max_queues; i++) { 165 NetClientState *ncs = qemu_get_subqueue(n->nic, i); 166 bool queue_started; 167 q = &n->vqs[i]; 168 169 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { 170 queue_status = 0; 171 } else { 172 queue_status = status; 173 } 174 queue_started = 175 virtio_net_started(n, queue_status) && !n->vhost_started; 176 177 if (queue_started) { 178 qemu_flush_queued_packets(ncs); 179 } 180 181 if (!q->tx_waiting) { 182 continue; 183 } 184 185 if (queue_started) { 186 if (q->tx_timer) { 187 timer_mod(q->tx_timer, 188 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 189 } else { 190 qemu_bh_schedule(q->tx_bh); 191 } 192 } else { 193 if (q->tx_timer) { 194 timer_del(q->tx_timer); 195 } else { 196 qemu_bh_cancel(q->tx_bh); 197 } 198 } 199 } 200 } 201 202 static void virtio_net_set_link_status(NetClientState *nc) 203 { 204 VirtIONet *n = qemu_get_nic_opaque(nc); 205 VirtIODevice *vdev = VIRTIO_DEVICE(n); 206 uint16_t old_status = n->status; 207 208 if (nc->link_down) 209 n->status &= ~VIRTIO_NET_S_LINK_UP; 210 else 211 n->status |= VIRTIO_NET_S_LINK_UP; 212 213 if (n->status != old_status) 214 virtio_notify_config(vdev); 215 216 virtio_net_set_status(vdev, vdev->status); 217 } 218 219 static void rxfilter_notify(NetClientState *nc) 220 { 221 VirtIONet *n = qemu_get_nic_opaque(nc); 222 223 if (nc->rxfilter_notify_enabled) { 224 gchar *path = object_get_canonical_path(OBJECT(n->qdev)); 225 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name, 226 n->netclient_name, path, &error_abort); 227 g_free(path); 228 229 /* disable event notification to avoid events flooding */ 230 nc->rxfilter_notify_enabled = 0; 231 } 232 } 233 234 static intList *get_vlan_table(VirtIONet *n) 235 { 236 intList *list, *entry; 237 int i, j; 238 239 list = NULL; 240 for (i = 0; i < MAX_VLAN >> 5; i++) { 241 for (j = 0; n->vlans[i] && j <= 0x1f; j++) { 242 if (n->vlans[i] & (1U << j)) { 243 entry = g_malloc0(sizeof(*entry)); 244 entry->value = (i << 5) + j; 245 entry->next = list; 246 list = entry; 247 } 248 } 249 } 250 251 return list; 252 } 253 254 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) 255 { 256 VirtIONet *n = qemu_get_nic_opaque(nc); 257 VirtIODevice *vdev = VIRTIO_DEVICE(n); 258 RxFilterInfo *info; 259 strList *str_list, *entry; 260 int i; 261 262 info = g_malloc0(sizeof(*info)); 263 info->name = g_strdup(nc->name); 264 info->promiscuous = n->promisc; 265 266 if (n->nouni) { 267 info->unicast = RX_STATE_NONE; 268 } else if (n->alluni) { 269 info->unicast = RX_STATE_ALL; 270 } else { 271 info->unicast = RX_STATE_NORMAL; 272 } 273 274 if (n->nomulti) { 275 info->multicast = RX_STATE_NONE; 276 } else if (n->allmulti) { 277 info->multicast = RX_STATE_ALL; 278 } else { 279 info->multicast = RX_STATE_NORMAL; 280 } 281 282 info->broadcast_allowed = n->nobcast; 283 info->multicast_overflow = n->mac_table.multi_overflow; 284 info->unicast_overflow = n->mac_table.uni_overflow; 285 286 info->main_mac = qemu_mac_strdup_printf(n->mac); 287 288 str_list = NULL; 289 for (i = 0; i < n->mac_table.first_multi; i++) { 290 entry = g_malloc0(sizeof(*entry)); 291 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 292 entry->next = str_list; 293 str_list = entry; 294 } 295 info->unicast_table = str_list; 296 297 str_list = NULL; 298 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 299 entry = g_malloc0(sizeof(*entry)); 300 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 301 entry->next = str_list; 302 str_list = entry; 303 } 304 info->multicast_table = str_list; 305 info->vlan_table = get_vlan_table(n); 306 307 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) { 308 info->vlan = RX_STATE_ALL; 309 } else if (!info->vlan_table) { 310 info->vlan = RX_STATE_NONE; 311 } else { 312 info->vlan = RX_STATE_NORMAL; 313 } 314 315 /* enable event notification after query */ 316 nc->rxfilter_notify_enabled = 1; 317 318 return info; 319 } 320 321 static void virtio_net_reset(VirtIODevice *vdev) 322 { 323 VirtIONet *n = VIRTIO_NET(vdev); 324 325 /* Reset back to compatibility mode */ 326 n->promisc = 1; 327 n->allmulti = 0; 328 n->alluni = 0; 329 n->nomulti = 0; 330 n->nouni = 0; 331 n->nobcast = 0; 332 /* multiqueue is disabled by default */ 333 n->curr_queues = 1; 334 timer_del(n->announce_timer); 335 n->announce_counter = 0; 336 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 337 338 /* Flush any MAC and VLAN filter table state */ 339 n->mac_table.in_use = 0; 340 n->mac_table.first_multi = 0; 341 n->mac_table.multi_overflow = 0; 342 n->mac_table.uni_overflow = 0; 343 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 344 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); 345 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 346 memset(n->vlans, 0, MAX_VLAN >> 3); 347 } 348 349 static void peer_test_vnet_hdr(VirtIONet *n) 350 { 351 NetClientState *nc = qemu_get_queue(n->nic); 352 if (!nc->peer) { 353 return; 354 } 355 356 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer); 357 } 358 359 static int peer_has_vnet_hdr(VirtIONet *n) 360 { 361 return n->has_vnet_hdr; 362 } 363 364 static int peer_has_ufo(VirtIONet *n) 365 { 366 if (!peer_has_vnet_hdr(n)) 367 return 0; 368 369 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer); 370 371 return n->has_ufo; 372 } 373 374 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, 375 int version_1) 376 { 377 int i; 378 NetClientState *nc; 379 380 n->mergeable_rx_bufs = mergeable_rx_bufs; 381 382 if (version_1) { 383 n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 384 } else { 385 n->guest_hdr_len = n->mergeable_rx_bufs ? 386 sizeof(struct virtio_net_hdr_mrg_rxbuf) : 387 sizeof(struct virtio_net_hdr); 388 } 389 390 for (i = 0; i < n->max_queues; i++) { 391 nc = qemu_get_subqueue(n->nic, i); 392 393 if (peer_has_vnet_hdr(n) && 394 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { 395 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); 396 n->host_hdr_len = n->guest_hdr_len; 397 } 398 } 399 } 400 401 static int peer_attach(VirtIONet *n, int index) 402 { 403 NetClientState *nc = qemu_get_subqueue(n->nic, index); 404 405 if (!nc->peer) { 406 return 0; 407 } 408 409 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 410 return 0; 411 } 412 413 return tap_enable(nc->peer); 414 } 415 416 static int peer_detach(VirtIONet *n, int index) 417 { 418 NetClientState *nc = qemu_get_subqueue(n->nic, index); 419 420 if (!nc->peer) { 421 return 0; 422 } 423 424 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 425 return 0; 426 } 427 428 return tap_disable(nc->peer); 429 } 430 431 static void virtio_net_set_queues(VirtIONet *n) 432 { 433 int i; 434 int r; 435 436 for (i = 0; i < n->max_queues; i++) { 437 if (i < n->curr_queues) { 438 r = peer_attach(n, i); 439 assert(!r); 440 } else { 441 r = peer_detach(n, i); 442 assert(!r); 443 } 444 } 445 } 446 447 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); 448 449 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features) 450 { 451 VirtIONet *n = VIRTIO_NET(vdev); 452 NetClientState *nc = qemu_get_queue(n->nic); 453 454 /* Firstly sync all virtio-net possible supported features */ 455 features |= n->host_features; 456 457 virtio_add_feature(&features, VIRTIO_NET_F_MAC); 458 459 if (!peer_has_vnet_hdr(n)) { 460 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM); 461 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4); 462 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6); 463 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN); 464 465 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM); 466 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4); 467 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); 468 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); 469 } 470 471 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { 472 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO); 473 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); 474 } 475 476 if (!get_vhost_net(nc->peer)) { 477 return features; 478 } 479 return vhost_net_get_features(get_vhost_net(nc->peer), features); 480 } 481 482 static uint64_t virtio_net_bad_features(VirtIODevice *vdev) 483 { 484 uint64_t features = 0; 485 486 /* Linux kernel 2.6.25. It understood MAC (as everyone must), 487 * but also these: */ 488 virtio_add_feature(&features, VIRTIO_NET_F_MAC); 489 virtio_add_feature(&features, VIRTIO_NET_F_CSUM); 490 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4); 491 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6); 492 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN); 493 494 return features; 495 } 496 497 static void virtio_net_apply_guest_offloads(VirtIONet *n) 498 { 499 qemu_set_offload(qemu_get_queue(n->nic)->peer, 500 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), 501 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), 502 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), 503 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), 504 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); 505 } 506 507 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) 508 { 509 static const uint64_t guest_offloads_mask = 510 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | 511 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 512 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | 513 (1ULL << VIRTIO_NET_F_GUEST_ECN) | 514 (1ULL << VIRTIO_NET_F_GUEST_UFO); 515 516 return guest_offloads_mask & features; 517 } 518 519 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) 520 { 521 VirtIODevice *vdev = VIRTIO_DEVICE(n); 522 return virtio_net_guest_offloads_by_features(vdev->guest_features); 523 } 524 525 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) 526 { 527 VirtIONet *n = VIRTIO_NET(vdev); 528 int i; 529 530 virtio_net_set_multiqueue(n, 531 __virtio_has_feature(features, VIRTIO_NET_F_MQ)); 532 533 virtio_net_set_mrg_rx_bufs(n, 534 __virtio_has_feature(features, 535 VIRTIO_NET_F_MRG_RXBUF), 536 __virtio_has_feature(features, 537 VIRTIO_F_VERSION_1)); 538 539 if (n->has_vnet_hdr) { 540 n->curr_guest_offloads = 541 virtio_net_guest_offloads_by_features(features); 542 virtio_net_apply_guest_offloads(n); 543 } 544 545 for (i = 0; i < n->max_queues; i++) { 546 NetClientState *nc = qemu_get_subqueue(n->nic, i); 547 548 if (!get_vhost_net(nc->peer)) { 549 continue; 550 } 551 vhost_net_ack_features(get_vhost_net(nc->peer), features); 552 } 553 554 if (__virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) { 555 memset(n->vlans, 0, MAX_VLAN >> 3); 556 } else { 557 memset(n->vlans, 0xff, MAX_VLAN >> 3); 558 } 559 } 560 561 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, 562 struct iovec *iov, unsigned int iov_cnt) 563 { 564 uint8_t on; 565 size_t s; 566 NetClientState *nc = qemu_get_queue(n->nic); 567 568 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); 569 if (s != sizeof(on)) { 570 return VIRTIO_NET_ERR; 571 } 572 573 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { 574 n->promisc = on; 575 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { 576 n->allmulti = on; 577 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { 578 n->alluni = on; 579 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { 580 n->nomulti = on; 581 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { 582 n->nouni = on; 583 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { 584 n->nobcast = on; 585 } else { 586 return VIRTIO_NET_ERR; 587 } 588 589 rxfilter_notify(nc); 590 591 return VIRTIO_NET_OK; 592 } 593 594 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, 595 struct iovec *iov, unsigned int iov_cnt) 596 { 597 VirtIODevice *vdev = VIRTIO_DEVICE(n); 598 uint64_t offloads; 599 size_t s; 600 601 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 602 return VIRTIO_NET_ERR; 603 } 604 605 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); 606 if (s != sizeof(offloads)) { 607 return VIRTIO_NET_ERR; 608 } 609 610 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { 611 uint64_t supported_offloads; 612 613 if (!n->has_vnet_hdr) { 614 return VIRTIO_NET_ERR; 615 } 616 617 supported_offloads = virtio_net_supported_guest_offloads(n); 618 if (offloads & ~supported_offloads) { 619 return VIRTIO_NET_ERR; 620 } 621 622 n->curr_guest_offloads = offloads; 623 virtio_net_apply_guest_offloads(n); 624 625 return VIRTIO_NET_OK; 626 } else { 627 return VIRTIO_NET_ERR; 628 } 629 } 630 631 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, 632 struct iovec *iov, unsigned int iov_cnt) 633 { 634 VirtIODevice *vdev = VIRTIO_DEVICE(n); 635 struct virtio_net_ctrl_mac mac_data; 636 size_t s; 637 NetClientState *nc = qemu_get_queue(n->nic); 638 639 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { 640 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { 641 return VIRTIO_NET_ERR; 642 } 643 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); 644 assert(s == sizeof(n->mac)); 645 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 646 rxfilter_notify(nc); 647 648 return VIRTIO_NET_OK; 649 } 650 651 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { 652 return VIRTIO_NET_ERR; 653 } 654 655 int in_use = 0; 656 int first_multi = 0; 657 uint8_t uni_overflow = 0; 658 uint8_t multi_overflow = 0; 659 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 660 661 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 662 sizeof(mac_data.entries)); 663 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 664 if (s != sizeof(mac_data.entries)) { 665 goto error; 666 } 667 iov_discard_front(&iov, &iov_cnt, s); 668 669 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { 670 goto error; 671 } 672 673 if (mac_data.entries <= MAC_TABLE_ENTRIES) { 674 s = iov_to_buf(iov, iov_cnt, 0, macs, 675 mac_data.entries * ETH_ALEN); 676 if (s != mac_data.entries * ETH_ALEN) { 677 goto error; 678 } 679 in_use += mac_data.entries; 680 } else { 681 uni_overflow = 1; 682 } 683 684 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); 685 686 first_multi = in_use; 687 688 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 689 sizeof(mac_data.entries)); 690 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 691 if (s != sizeof(mac_data.entries)) { 692 goto error; 693 } 694 695 iov_discard_front(&iov, &iov_cnt, s); 696 697 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { 698 goto error; 699 } 700 701 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) { 702 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN], 703 mac_data.entries * ETH_ALEN); 704 if (s != mac_data.entries * ETH_ALEN) { 705 goto error; 706 } 707 in_use += mac_data.entries; 708 } else { 709 multi_overflow = 1; 710 } 711 712 n->mac_table.in_use = in_use; 713 n->mac_table.first_multi = first_multi; 714 n->mac_table.uni_overflow = uni_overflow; 715 n->mac_table.multi_overflow = multi_overflow; 716 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN); 717 g_free(macs); 718 rxfilter_notify(nc); 719 720 return VIRTIO_NET_OK; 721 722 error: 723 g_free(macs); 724 return VIRTIO_NET_ERR; 725 } 726 727 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, 728 struct iovec *iov, unsigned int iov_cnt) 729 { 730 VirtIODevice *vdev = VIRTIO_DEVICE(n); 731 uint16_t vid; 732 size_t s; 733 NetClientState *nc = qemu_get_queue(n->nic); 734 735 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); 736 vid = virtio_lduw_p(vdev, &vid); 737 if (s != sizeof(vid)) { 738 return VIRTIO_NET_ERR; 739 } 740 741 if (vid >= MAX_VLAN) 742 return VIRTIO_NET_ERR; 743 744 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) 745 n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); 746 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) 747 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); 748 else 749 return VIRTIO_NET_ERR; 750 751 rxfilter_notify(nc); 752 753 return VIRTIO_NET_OK; 754 } 755 756 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd, 757 struct iovec *iov, unsigned int iov_cnt) 758 { 759 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK && 760 n->status & VIRTIO_NET_S_ANNOUNCE) { 761 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 762 if (n->announce_counter) { 763 timer_mod(n->announce_timer, 764 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 765 self_announce_delay(n->announce_counter)); 766 } 767 return VIRTIO_NET_OK; 768 } else { 769 return VIRTIO_NET_ERR; 770 } 771 } 772 773 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, 774 struct iovec *iov, unsigned int iov_cnt) 775 { 776 VirtIODevice *vdev = VIRTIO_DEVICE(n); 777 struct virtio_net_ctrl_mq mq; 778 size_t s; 779 uint16_t queues; 780 781 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); 782 if (s != sizeof(mq)) { 783 return VIRTIO_NET_ERR; 784 } 785 786 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 787 return VIRTIO_NET_ERR; 788 } 789 790 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs); 791 792 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 793 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 794 queues > n->max_queues || 795 !n->multiqueue) { 796 return VIRTIO_NET_ERR; 797 } 798 799 n->curr_queues = queues; 800 /* stop the backend before changing the number of queues to avoid handling a 801 * disabled queue */ 802 virtio_net_set_status(vdev, vdev->status); 803 virtio_net_set_queues(n); 804 805 return VIRTIO_NET_OK; 806 } 807 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 808 { 809 VirtIONet *n = VIRTIO_NET(vdev); 810 struct virtio_net_ctrl_hdr ctrl; 811 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 812 VirtQueueElement elem; 813 size_t s; 814 struct iovec *iov, *iov2; 815 unsigned int iov_cnt; 816 817 while (virtqueue_pop(vq, &elem)) { 818 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) || 819 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) { 820 error_report("virtio-net ctrl missing headers"); 821 exit(1); 822 } 823 824 iov_cnt = elem.out_num; 825 iov2 = iov = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num); 826 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); 827 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); 828 if (s != sizeof(ctrl)) { 829 status = VIRTIO_NET_ERR; 830 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { 831 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); 832 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { 833 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); 834 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { 835 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); 836 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { 837 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); 838 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { 839 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); 840 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { 841 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); 842 } 843 844 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status)); 845 assert(s == sizeof(status)); 846 847 virtqueue_push(vq, &elem, sizeof(status)); 848 virtio_notify(vdev, vq); 849 g_free(iov2); 850 } 851 } 852 853 /* RX */ 854 855 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) 856 { 857 VirtIONet *n = VIRTIO_NET(vdev); 858 int queue_index = vq2q(virtio_get_queue_index(vq)); 859 860 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); 861 } 862 863 static int virtio_net_can_receive(NetClientState *nc) 864 { 865 VirtIONet *n = qemu_get_nic_opaque(nc); 866 VirtIODevice *vdev = VIRTIO_DEVICE(n); 867 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 868 869 if (!vdev->vm_running) { 870 return 0; 871 } 872 873 if (nc->queue_index >= n->curr_queues) { 874 return 0; 875 } 876 877 if (!virtio_queue_ready(q->rx_vq) || 878 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 879 return 0; 880 } 881 882 return 1; 883 } 884 885 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) 886 { 887 VirtIONet *n = q->n; 888 if (virtio_queue_empty(q->rx_vq) || 889 (n->mergeable_rx_bufs && 890 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 891 virtio_queue_set_notification(q->rx_vq, 1); 892 893 /* To avoid a race condition where the guest has made some buffers 894 * available after the above check but before notification was 895 * enabled, check for available buffers again. 896 */ 897 if (virtio_queue_empty(q->rx_vq) || 898 (n->mergeable_rx_bufs && 899 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 900 return 0; 901 } 902 } 903 904 virtio_queue_set_notification(q->rx_vq, 0); 905 return 1; 906 } 907 908 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr) 909 { 910 virtio_tswap16s(vdev, &hdr->hdr_len); 911 virtio_tswap16s(vdev, &hdr->gso_size); 912 virtio_tswap16s(vdev, &hdr->csum_start); 913 virtio_tswap16s(vdev, &hdr->csum_offset); 914 } 915 916 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so 917 * it never finds out that the packets don't have valid checksums. This 918 * causes dhclient to get upset. Fedora's carried a patch for ages to 919 * fix this with Xen but it hasn't appeared in an upstream release of 920 * dhclient yet. 921 * 922 * To avoid breaking existing guests, we catch udp packets and add 923 * checksums. This is terrible but it's better than hacking the guest 924 * kernels. 925 * 926 * N.B. if we introduce a zero-copy API, this operation is no longer free so 927 * we should provide a mechanism to disable it to avoid polluting the host 928 * cache. 929 */ 930 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, 931 uint8_t *buf, size_t size) 932 { 933 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ 934 (size > 27 && size < 1500) && /* normal sized MTU */ 935 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ 936 (buf[23] == 17) && /* ip.protocol == UDP */ 937 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ 938 net_checksum_calculate(buf, size); 939 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; 940 } 941 } 942 943 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, 944 const void *buf, size_t size) 945 { 946 if (n->has_vnet_hdr) { 947 /* FIXME this cast is evil */ 948 void *wbuf = (void *)buf; 949 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, 950 size - n->host_hdr_len); 951 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf); 952 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); 953 } else { 954 struct virtio_net_hdr hdr = { 955 .flags = 0, 956 .gso_type = VIRTIO_NET_HDR_GSO_NONE 957 }; 958 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); 959 } 960 } 961 962 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) 963 { 964 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 965 static const uint8_t vlan[] = {0x81, 0x00}; 966 uint8_t *ptr = (uint8_t *)buf; 967 int i; 968 969 if (n->promisc) 970 return 1; 971 972 ptr += n->host_hdr_len; 973 974 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { 975 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff; 976 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) 977 return 0; 978 } 979 980 if (ptr[0] & 1) { // multicast 981 if (!memcmp(ptr, bcast, sizeof(bcast))) { 982 return !n->nobcast; 983 } else if (n->nomulti) { 984 return 0; 985 } else if (n->allmulti || n->mac_table.multi_overflow) { 986 return 1; 987 } 988 989 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 990 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 991 return 1; 992 } 993 } 994 } else { // unicast 995 if (n->nouni) { 996 return 0; 997 } else if (n->alluni || n->mac_table.uni_overflow) { 998 return 1; 999 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { 1000 return 1; 1001 } 1002 1003 for (i = 0; i < n->mac_table.first_multi; i++) { 1004 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 1005 return 1; 1006 } 1007 } 1008 } 1009 1010 return 0; 1011 } 1012 1013 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size) 1014 { 1015 VirtIONet *n = qemu_get_nic_opaque(nc); 1016 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1017 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1018 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; 1019 struct virtio_net_hdr_mrg_rxbuf mhdr; 1020 unsigned mhdr_cnt = 0; 1021 size_t offset, i, guest_offset; 1022 1023 if (!virtio_net_can_receive(nc)) { 1024 return -1; 1025 } 1026 1027 /* hdr_len refers to the header we supply to the guest */ 1028 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { 1029 return 0; 1030 } 1031 1032 if (!receive_filter(n, buf, size)) 1033 return size; 1034 1035 offset = i = 0; 1036 1037 while (offset < size) { 1038 VirtQueueElement elem; 1039 int len, total; 1040 const struct iovec *sg = elem.in_sg; 1041 1042 total = 0; 1043 1044 if (virtqueue_pop(q->rx_vq, &elem) == 0) { 1045 if (i == 0) 1046 return -1; 1047 error_report("virtio-net unexpected empty queue: " 1048 "i %zd mergeable %d offset %zd, size %zd, " 1049 "guest hdr len %zd, host hdr len %zd " 1050 "guest features 0x%" PRIx64, 1051 i, n->mergeable_rx_bufs, offset, size, 1052 n->guest_hdr_len, n->host_hdr_len, 1053 vdev->guest_features); 1054 exit(1); 1055 } 1056 1057 if (elem.in_num < 1) { 1058 error_report("virtio-net receive queue contains no in buffers"); 1059 exit(1); 1060 } 1061 1062 if (i == 0) { 1063 assert(offset == 0); 1064 if (n->mergeable_rx_bufs) { 1065 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), 1066 sg, elem.in_num, 1067 offsetof(typeof(mhdr), num_buffers), 1068 sizeof(mhdr.num_buffers)); 1069 } 1070 1071 receive_header(n, sg, elem.in_num, buf, size); 1072 offset = n->host_hdr_len; 1073 total += n->guest_hdr_len; 1074 guest_offset = n->guest_hdr_len; 1075 } else { 1076 guest_offset = 0; 1077 } 1078 1079 /* copy in packet. ugh */ 1080 len = iov_from_buf(sg, elem.in_num, guest_offset, 1081 buf + offset, size - offset); 1082 total += len; 1083 offset += len; 1084 /* If buffers can't be merged, at this point we 1085 * must have consumed the complete packet. 1086 * Otherwise, drop it. */ 1087 if (!n->mergeable_rx_bufs && offset < size) { 1088 #if 0 1089 error_report("virtio-net truncated non-mergeable packet: " 1090 "i %zd mergeable %d offset %zd, size %zd, " 1091 "guest hdr len %zd, host hdr len %zd", 1092 i, n->mergeable_rx_bufs, 1093 offset, size, n->guest_hdr_len, n->host_hdr_len); 1094 #endif 1095 return size; 1096 } 1097 1098 /* signal other side */ 1099 virtqueue_fill(q->rx_vq, &elem, total, i++); 1100 } 1101 1102 if (mhdr_cnt) { 1103 virtio_stw_p(vdev, &mhdr.num_buffers, i); 1104 iov_from_buf(mhdr_sg, mhdr_cnt, 1105 0, 1106 &mhdr.num_buffers, sizeof mhdr.num_buffers); 1107 } 1108 1109 virtqueue_flush(q->rx_vq, i); 1110 virtio_notify(vdev, q->rx_vq); 1111 1112 return size; 1113 } 1114 1115 static int32_t virtio_net_flush_tx(VirtIONetQueue *q); 1116 1117 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) 1118 { 1119 VirtIONet *n = qemu_get_nic_opaque(nc); 1120 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1121 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1122 1123 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0); 1124 virtio_notify(vdev, q->tx_vq); 1125 1126 q->async_tx.elem.out_num = q->async_tx.len = 0; 1127 1128 virtio_queue_set_notification(q->tx_vq, 1); 1129 virtio_net_flush_tx(q); 1130 } 1131 1132 /* TX */ 1133 static int32_t virtio_net_flush_tx(VirtIONetQueue *q) 1134 { 1135 VirtIONet *n = q->n; 1136 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1137 VirtQueueElement elem; 1138 int32_t num_packets = 0; 1139 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); 1140 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1141 return num_packets; 1142 } 1143 1144 if (q->async_tx.elem.out_num) { 1145 virtio_queue_set_notification(q->tx_vq, 0); 1146 return num_packets; 1147 } 1148 1149 while (virtqueue_pop(q->tx_vq, &elem)) { 1150 ssize_t ret, len; 1151 unsigned int out_num = elem.out_num; 1152 struct iovec *out_sg = &elem.out_sg[0]; 1153 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1]; 1154 struct virtio_net_hdr_mrg_rxbuf mhdr; 1155 1156 if (out_num < 1) { 1157 error_report("virtio-net header not in first element"); 1158 exit(1); 1159 } 1160 1161 if (n->has_vnet_hdr) { 1162 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < 1163 n->guest_hdr_len) { 1164 error_report("virtio-net header incorrect"); 1165 exit(1); 1166 } 1167 if (virtio_needs_swap(vdev)) { 1168 virtio_net_hdr_swap(vdev, (void *) &mhdr); 1169 sg2[0].iov_base = &mhdr; 1170 sg2[0].iov_len = n->guest_hdr_len; 1171 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, 1172 out_sg, out_num, 1173 n->guest_hdr_len, -1); 1174 if (out_num == VIRTQUEUE_MAX_SIZE) { 1175 goto drop; 1176 } 1177 out_num += 1; 1178 out_sg = sg2; 1179 } 1180 } 1181 /* 1182 * If host wants to see the guest header as is, we can 1183 * pass it on unchanged. Otherwise, copy just the parts 1184 * that host is interested in. 1185 */ 1186 assert(n->host_hdr_len <= n->guest_hdr_len); 1187 if (n->host_hdr_len != n->guest_hdr_len) { 1188 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), 1189 out_sg, out_num, 1190 0, n->host_hdr_len); 1191 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, 1192 out_sg, out_num, 1193 n->guest_hdr_len, -1); 1194 out_num = sg_num; 1195 out_sg = sg; 1196 } 1197 1198 len = n->guest_hdr_len; 1199 1200 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), 1201 out_sg, out_num, virtio_net_tx_complete); 1202 if (ret == 0) { 1203 virtio_queue_set_notification(q->tx_vq, 0); 1204 q->async_tx.elem = elem; 1205 q->async_tx.len = len; 1206 return -EBUSY; 1207 } 1208 1209 len += ret; 1210 drop: 1211 virtqueue_push(q->tx_vq, &elem, 0); 1212 virtio_notify(vdev, q->tx_vq); 1213 1214 if (++num_packets >= n->tx_burst) { 1215 break; 1216 } 1217 } 1218 return num_packets; 1219 } 1220 1221 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) 1222 { 1223 VirtIONet *n = VIRTIO_NET(vdev); 1224 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1225 1226 /* This happens when device was stopped but VCPU wasn't. */ 1227 if (!vdev->vm_running) { 1228 q->tx_waiting = 1; 1229 return; 1230 } 1231 1232 if (q->tx_waiting) { 1233 virtio_queue_set_notification(vq, 1); 1234 timer_del(q->tx_timer); 1235 q->tx_waiting = 0; 1236 virtio_net_flush_tx(q); 1237 } else { 1238 timer_mod(q->tx_timer, 1239 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 1240 q->tx_waiting = 1; 1241 virtio_queue_set_notification(vq, 0); 1242 } 1243 } 1244 1245 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) 1246 { 1247 VirtIONet *n = VIRTIO_NET(vdev); 1248 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1249 1250 if (unlikely(q->tx_waiting)) { 1251 return; 1252 } 1253 q->tx_waiting = 1; 1254 /* This happens when device was stopped but VCPU wasn't. */ 1255 if (!vdev->vm_running) { 1256 return; 1257 } 1258 virtio_queue_set_notification(vq, 0); 1259 qemu_bh_schedule(q->tx_bh); 1260 } 1261 1262 static void virtio_net_tx_timer(void *opaque) 1263 { 1264 VirtIONetQueue *q = opaque; 1265 VirtIONet *n = q->n; 1266 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1267 /* This happens when device was stopped but BH wasn't. */ 1268 if (!vdev->vm_running) { 1269 /* Make sure tx waiting is set, so we'll run when restarted. */ 1270 assert(q->tx_waiting); 1271 return; 1272 } 1273 1274 q->tx_waiting = 0; 1275 1276 /* Just in case the driver is not ready on more */ 1277 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1278 return; 1279 } 1280 1281 virtio_queue_set_notification(q->tx_vq, 1); 1282 virtio_net_flush_tx(q); 1283 } 1284 1285 static void virtio_net_tx_bh(void *opaque) 1286 { 1287 VirtIONetQueue *q = opaque; 1288 VirtIONet *n = q->n; 1289 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1290 int32_t ret; 1291 1292 /* This happens when device was stopped but BH wasn't. */ 1293 if (!vdev->vm_running) { 1294 /* Make sure tx waiting is set, so we'll run when restarted. */ 1295 assert(q->tx_waiting); 1296 return; 1297 } 1298 1299 q->tx_waiting = 0; 1300 1301 /* Just in case the driver is not ready on more */ 1302 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { 1303 return; 1304 } 1305 1306 ret = virtio_net_flush_tx(q); 1307 if (ret == -EBUSY) { 1308 return; /* Notification re-enable handled by tx_complete */ 1309 } 1310 1311 /* If we flush a full burst of packets, assume there are 1312 * more coming and immediately reschedule */ 1313 if (ret >= n->tx_burst) { 1314 qemu_bh_schedule(q->tx_bh); 1315 q->tx_waiting = 1; 1316 return; 1317 } 1318 1319 /* If less than a full burst, re-enable notification and flush 1320 * anything that may have come in while we weren't looking. If 1321 * we find something, assume the guest is still active and reschedule */ 1322 virtio_queue_set_notification(q->tx_vq, 1); 1323 if (virtio_net_flush_tx(q) > 0) { 1324 virtio_queue_set_notification(q->tx_vq, 0); 1325 qemu_bh_schedule(q->tx_bh); 1326 q->tx_waiting = 1; 1327 } 1328 } 1329 1330 static void virtio_net_add_queue(VirtIONet *n, int index) 1331 { 1332 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1333 1334 n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); 1335 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { 1336 n->vqs[index].tx_vq = 1337 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); 1338 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1339 virtio_net_tx_timer, 1340 &n->vqs[index]); 1341 } else { 1342 n->vqs[index].tx_vq = 1343 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); 1344 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); 1345 } 1346 1347 n->vqs[index].tx_waiting = 0; 1348 n->vqs[index].n = n; 1349 } 1350 1351 static void virtio_net_del_queue(VirtIONet *n, int index) 1352 { 1353 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1354 VirtIONetQueue *q = &n->vqs[index]; 1355 NetClientState *nc = qemu_get_subqueue(n->nic, index); 1356 1357 qemu_purge_queued_packets(nc); 1358 1359 virtio_del_queue(vdev, index * 2); 1360 if (q->tx_timer) { 1361 timer_del(q->tx_timer); 1362 timer_free(q->tx_timer); 1363 } else { 1364 qemu_bh_delete(q->tx_bh); 1365 } 1366 virtio_del_queue(vdev, index * 2 + 1); 1367 } 1368 1369 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) 1370 { 1371 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1372 int old_num_queues = virtio_get_num_queues(vdev); 1373 int new_num_queues = new_max_queues * 2 + 1; 1374 int i; 1375 1376 assert(old_num_queues >= 3); 1377 assert(old_num_queues % 2 == 1); 1378 1379 if (old_num_queues == new_num_queues) { 1380 return; 1381 } 1382 1383 /* 1384 * We always need to remove and add ctrl vq if 1385 * old_num_queues != new_num_queues. Remove ctrl_vq first, 1386 * and then we only enter one of the following too loops. 1387 */ 1388 virtio_del_queue(vdev, old_num_queues - 1); 1389 1390 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) { 1391 /* new_num_queues < old_num_queues */ 1392 virtio_net_del_queue(n, i / 2); 1393 } 1394 1395 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) { 1396 /* new_num_queues > old_num_queues */ 1397 virtio_net_add_queue(n, i / 2); 1398 } 1399 1400 /* add ctrl_vq last */ 1401 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1402 } 1403 1404 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) 1405 { 1406 int max = multiqueue ? n->max_queues : 1; 1407 1408 n->multiqueue = multiqueue; 1409 virtio_net_change_num_queues(n, max); 1410 1411 virtio_net_set_queues(n); 1412 } 1413 1414 static void virtio_net_save(QEMUFile *f, void *opaque) 1415 { 1416 VirtIONet *n = opaque; 1417 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1418 1419 /* At this point, backend must be stopped, otherwise 1420 * it might keep writing to memory. */ 1421 assert(!n->vhost_started); 1422 virtio_save(vdev, f); 1423 } 1424 1425 static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f) 1426 { 1427 VirtIONet *n = VIRTIO_NET(vdev); 1428 int i; 1429 1430 qemu_put_buffer(f, n->mac, ETH_ALEN); 1431 qemu_put_be32(f, n->vqs[0].tx_waiting); 1432 qemu_put_be32(f, n->mergeable_rx_bufs); 1433 qemu_put_be16(f, n->status); 1434 qemu_put_byte(f, n->promisc); 1435 qemu_put_byte(f, n->allmulti); 1436 qemu_put_be32(f, n->mac_table.in_use); 1437 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); 1438 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1439 qemu_put_be32(f, n->has_vnet_hdr); 1440 qemu_put_byte(f, n->mac_table.multi_overflow); 1441 qemu_put_byte(f, n->mac_table.uni_overflow); 1442 qemu_put_byte(f, n->alluni); 1443 qemu_put_byte(f, n->nomulti); 1444 qemu_put_byte(f, n->nouni); 1445 qemu_put_byte(f, n->nobcast); 1446 qemu_put_byte(f, n->has_ufo); 1447 if (n->max_queues > 1) { 1448 qemu_put_be16(f, n->max_queues); 1449 qemu_put_be16(f, n->curr_queues); 1450 for (i = 1; i < n->curr_queues; i++) { 1451 qemu_put_be32(f, n->vqs[i].tx_waiting); 1452 } 1453 } 1454 1455 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 1456 qemu_put_be64(f, n->curr_guest_offloads); 1457 } 1458 } 1459 1460 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) 1461 { 1462 VirtIONet *n = opaque; 1463 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1464 1465 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) 1466 return -EINVAL; 1467 1468 return virtio_load(vdev, f, version_id); 1469 } 1470 1471 static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f, 1472 int version_id) 1473 { 1474 VirtIONet *n = VIRTIO_NET(vdev); 1475 int i, link_down; 1476 1477 qemu_get_buffer(f, n->mac, ETH_ALEN); 1478 n->vqs[0].tx_waiting = qemu_get_be32(f); 1479 1480 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f), 1481 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)); 1482 1483 if (version_id >= 3) 1484 n->status = qemu_get_be16(f); 1485 1486 if (version_id >= 4) { 1487 if (version_id < 8) { 1488 n->promisc = qemu_get_be32(f); 1489 n->allmulti = qemu_get_be32(f); 1490 } else { 1491 n->promisc = qemu_get_byte(f); 1492 n->allmulti = qemu_get_byte(f); 1493 } 1494 } 1495 1496 if (version_id >= 5) { 1497 n->mac_table.in_use = qemu_get_be32(f); 1498 /* MAC_TABLE_ENTRIES may be different from the saved image */ 1499 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) { 1500 qemu_get_buffer(f, n->mac_table.macs, 1501 n->mac_table.in_use * ETH_ALEN); 1502 } else { 1503 int64_t i; 1504 1505 /* Overflow detected - can happen if source has a larger MAC table. 1506 * We simply set overflow flag so there's no need to maintain the 1507 * table of addresses, discard them all. 1508 * Note: 64 bit math to avoid integer overflow. 1509 */ 1510 for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) { 1511 qemu_get_byte(f); 1512 } 1513 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1; 1514 n->mac_table.in_use = 0; 1515 } 1516 } 1517 1518 if (version_id >= 6) 1519 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1520 1521 if (version_id >= 7) { 1522 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) { 1523 error_report("virtio-net: saved image requires vnet_hdr=on"); 1524 return -1; 1525 } 1526 } 1527 1528 if (version_id >= 9) { 1529 n->mac_table.multi_overflow = qemu_get_byte(f); 1530 n->mac_table.uni_overflow = qemu_get_byte(f); 1531 } 1532 1533 if (version_id >= 10) { 1534 n->alluni = qemu_get_byte(f); 1535 n->nomulti = qemu_get_byte(f); 1536 n->nouni = qemu_get_byte(f); 1537 n->nobcast = qemu_get_byte(f); 1538 } 1539 1540 if (version_id >= 11) { 1541 if (qemu_get_byte(f) && !peer_has_ufo(n)) { 1542 error_report("virtio-net: saved image requires TUN_F_UFO support"); 1543 return -1; 1544 } 1545 } 1546 1547 if (n->max_queues > 1) { 1548 if (n->max_queues != qemu_get_be16(f)) { 1549 error_report("virtio-net: different max_queues "); 1550 return -1; 1551 } 1552 1553 n->curr_queues = qemu_get_be16(f); 1554 if (n->curr_queues > n->max_queues) { 1555 error_report("virtio-net: curr_queues %x > max_queues %x", 1556 n->curr_queues, n->max_queues); 1557 return -1; 1558 } 1559 for (i = 1; i < n->curr_queues; i++) { 1560 n->vqs[i].tx_waiting = qemu_get_be32(f); 1561 } 1562 } 1563 1564 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 1565 n->curr_guest_offloads = qemu_get_be64(f); 1566 } else { 1567 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); 1568 } 1569 1570 if (peer_has_vnet_hdr(n)) { 1571 virtio_net_apply_guest_offloads(n); 1572 } 1573 1574 virtio_net_set_queues(n); 1575 1576 /* Find the first multicast entry in the saved MAC filter */ 1577 for (i = 0; i < n->mac_table.in_use; i++) { 1578 if (n->mac_table.macs[i * ETH_ALEN] & 1) { 1579 break; 1580 } 1581 } 1582 n->mac_table.first_multi = i; 1583 1584 /* nc.link_down can't be migrated, so infer link_down according 1585 * to link status bit in n->status */ 1586 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; 1587 for (i = 0; i < n->max_queues; i++) { 1588 qemu_get_subqueue(n->nic, i)->link_down = link_down; 1589 } 1590 1591 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && 1592 virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { 1593 n->announce_counter = SELF_ANNOUNCE_ROUNDS; 1594 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL)); 1595 } 1596 1597 return 0; 1598 } 1599 1600 static NetClientInfo net_virtio_info = { 1601 .type = NET_CLIENT_OPTIONS_KIND_NIC, 1602 .size = sizeof(NICState), 1603 .can_receive = virtio_net_can_receive, 1604 .receive = virtio_net_receive, 1605 .link_status_changed = virtio_net_set_link_status, 1606 .query_rx_filter = virtio_net_query_rxfilter, 1607 }; 1608 1609 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) 1610 { 1611 VirtIONet *n = VIRTIO_NET(vdev); 1612 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1613 assert(n->vhost_started); 1614 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); 1615 } 1616 1617 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, 1618 bool mask) 1619 { 1620 VirtIONet *n = VIRTIO_NET(vdev); 1621 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1622 assert(n->vhost_started); 1623 vhost_net_virtqueue_mask(get_vhost_net(nc->peer), 1624 vdev, idx, mask); 1625 } 1626 1627 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) 1628 { 1629 int i, config_size = 0; 1630 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); 1631 for (i = 0; feature_sizes[i].flags != 0; i++) { 1632 if (host_features & feature_sizes[i].flags) { 1633 config_size = MAX(feature_sizes[i].end, config_size); 1634 } 1635 } 1636 n->config_size = config_size; 1637 } 1638 1639 void virtio_net_set_netclient_name(VirtIONet *n, const char *name, 1640 const char *type) 1641 { 1642 /* 1643 * The name can be NULL, the netclient name will be type.x. 1644 */ 1645 assert(type != NULL); 1646 1647 g_free(n->netclient_name); 1648 g_free(n->netclient_type); 1649 n->netclient_name = g_strdup(name); 1650 n->netclient_type = g_strdup(type); 1651 } 1652 1653 static void virtio_net_device_realize(DeviceState *dev, Error **errp) 1654 { 1655 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1656 VirtIONet *n = VIRTIO_NET(dev); 1657 NetClientState *nc; 1658 int i; 1659 1660 virtio_net_set_config_size(n, n->host_features); 1661 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); 1662 1663 n->max_queues = MAX(n->nic_conf.peers.queues, 1); 1664 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) { 1665 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " 1666 "must be a positive integer less than %d.", 1667 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2); 1668 virtio_cleanup(vdev); 1669 return; 1670 } 1671 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); 1672 n->curr_queues = 1; 1673 n->tx_timeout = n->net_conf.txtimer; 1674 1675 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") 1676 && strcmp(n->net_conf.tx, "bh")) { 1677 error_report("virtio-net: " 1678 "Unknown option tx=%s, valid options: \"timer\" \"bh\"", 1679 n->net_conf.tx); 1680 error_report("Defaulting to \"bh\""); 1681 } 1682 1683 for (i = 0; i < n->max_queues; i++) { 1684 virtio_net_add_queue(n, i); 1685 } 1686 1687 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1688 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); 1689 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); 1690 n->status = VIRTIO_NET_S_LINK_UP; 1691 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 1692 virtio_net_announce_timer, n); 1693 1694 if (n->netclient_type) { 1695 /* 1696 * Happen when virtio_net_set_netclient_name has been called. 1697 */ 1698 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1699 n->netclient_type, n->netclient_name, n); 1700 } else { 1701 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1702 object_get_typename(OBJECT(dev)), dev->id, n); 1703 } 1704 1705 peer_test_vnet_hdr(n); 1706 if (peer_has_vnet_hdr(n)) { 1707 for (i = 0; i < n->max_queues; i++) { 1708 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); 1709 } 1710 n->host_hdr_len = sizeof(struct virtio_net_hdr); 1711 } else { 1712 n->host_hdr_len = 0; 1713 } 1714 1715 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); 1716 1717 n->vqs[0].tx_waiting = 0; 1718 n->tx_burst = n->net_conf.txburst; 1719 virtio_net_set_mrg_rx_bufs(n, 0, 0); 1720 n->promisc = 1; /* for compatibility */ 1721 1722 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 1723 1724 n->vlans = g_malloc0(MAX_VLAN >> 3); 1725 1726 nc = qemu_get_queue(n->nic); 1727 nc->rxfilter_notify_enabled = 1; 1728 1729 n->qdev = dev; 1730 register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION, 1731 virtio_net_save, virtio_net_load, n); 1732 } 1733 1734 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) 1735 { 1736 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1737 VirtIONet *n = VIRTIO_NET(dev); 1738 int i, max_queues; 1739 1740 /* This will stop vhost backend if appropriate. */ 1741 virtio_net_set_status(vdev, 0); 1742 1743 unregister_savevm(dev, "virtio-net", n); 1744 1745 g_free(n->netclient_name); 1746 n->netclient_name = NULL; 1747 g_free(n->netclient_type); 1748 n->netclient_type = NULL; 1749 1750 g_free(n->mac_table.macs); 1751 g_free(n->vlans); 1752 1753 max_queues = n->multiqueue ? n->max_queues : 1; 1754 for (i = 0; i < max_queues; i++) { 1755 virtio_net_del_queue(n, i); 1756 } 1757 1758 timer_del(n->announce_timer); 1759 timer_free(n->announce_timer); 1760 g_free(n->vqs); 1761 qemu_del_nic(n->nic); 1762 virtio_cleanup(vdev); 1763 } 1764 1765 static void virtio_net_instance_init(Object *obj) 1766 { 1767 VirtIONet *n = VIRTIO_NET(obj); 1768 1769 /* 1770 * The default config_size is sizeof(struct virtio_net_config). 1771 * Can be overriden with virtio_net_set_config_size. 1772 */ 1773 n->config_size = sizeof(struct virtio_net_config); 1774 device_add_bootindex_property(obj, &n->nic_conf.bootindex, 1775 "bootindex", "/ethernet-phy@0", 1776 DEVICE(n), NULL); 1777 } 1778 1779 static Property virtio_net_properties[] = { 1780 DEFINE_PROP_BIT("any_layout", VirtIONet, host_features, 1781 VIRTIO_F_ANY_LAYOUT, true), 1782 DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true), 1783 DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features, 1784 VIRTIO_NET_F_GUEST_CSUM, true), 1785 DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true), 1786 DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features, 1787 VIRTIO_NET_F_GUEST_TSO4, true), 1788 DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features, 1789 VIRTIO_NET_F_GUEST_TSO6, true), 1790 DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features, 1791 VIRTIO_NET_F_GUEST_ECN, true), 1792 DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features, 1793 VIRTIO_NET_F_GUEST_UFO, true), 1794 DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features, 1795 VIRTIO_NET_F_GUEST_ANNOUNCE, true), 1796 DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features, 1797 VIRTIO_NET_F_HOST_TSO4, true), 1798 DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features, 1799 VIRTIO_NET_F_HOST_TSO6, true), 1800 DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features, 1801 VIRTIO_NET_F_HOST_ECN, true), 1802 DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features, 1803 VIRTIO_NET_F_HOST_UFO, true), 1804 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features, 1805 VIRTIO_NET_F_MRG_RXBUF, true), 1806 DEFINE_PROP_BIT("status", VirtIONet, host_features, 1807 VIRTIO_NET_F_STATUS, true), 1808 DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features, 1809 VIRTIO_NET_F_CTRL_VQ, true), 1810 DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features, 1811 VIRTIO_NET_F_CTRL_RX, true), 1812 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features, 1813 VIRTIO_NET_F_CTRL_VLAN, true), 1814 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features, 1815 VIRTIO_NET_F_CTRL_RX_EXTRA, true), 1816 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features, 1817 VIRTIO_NET_F_CTRL_MAC_ADDR, true), 1818 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features, 1819 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true), 1820 DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false), 1821 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), 1822 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, 1823 TX_TIMER_INTERVAL), 1824 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), 1825 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), 1826 DEFINE_PROP_END_OF_LIST(), 1827 }; 1828 1829 static void virtio_net_class_init(ObjectClass *klass, void *data) 1830 { 1831 DeviceClass *dc = DEVICE_CLASS(klass); 1832 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1833 1834 dc->props = virtio_net_properties; 1835 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 1836 vdc->realize = virtio_net_device_realize; 1837 vdc->unrealize = virtio_net_device_unrealize; 1838 vdc->get_config = virtio_net_get_config; 1839 vdc->set_config = virtio_net_set_config; 1840 vdc->get_features = virtio_net_get_features; 1841 vdc->set_features = virtio_net_set_features; 1842 vdc->bad_features = virtio_net_bad_features; 1843 vdc->reset = virtio_net_reset; 1844 vdc->set_status = virtio_net_set_status; 1845 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; 1846 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; 1847 vdc->load = virtio_net_load_device; 1848 vdc->save = virtio_net_save_device; 1849 } 1850 1851 static const TypeInfo virtio_net_info = { 1852 .name = TYPE_VIRTIO_NET, 1853 .parent = TYPE_VIRTIO_DEVICE, 1854 .instance_size = sizeof(VirtIONet), 1855 .instance_init = virtio_net_instance_init, 1856 .class_init = virtio_net_class_init, 1857 }; 1858 1859 static void virtio_register_types(void) 1860 { 1861 type_register_static(&virtio_net_info); 1862 } 1863 1864 type_init(virtio_register_types) 1865