1 /* 2 * Virtio Network Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/iov.h" 15 #include "hw/virtio/virtio.h" 16 #include "net/net.h" 17 #include "net/checksum.h" 18 #include "net/tap.h" 19 #include "qemu/error-report.h" 20 #include "qemu/timer.h" 21 #include "hw/virtio/virtio-net.h" 22 #include "net/vhost_net.h" 23 #include "hw/virtio/virtio-bus.h" 24 #include "qapi/qmp/qjson.h" 25 #include "qapi-event.h" 26 #include "hw/virtio/virtio-access.h" 27 28 #define VIRTIO_NET_VM_VERSION 11 29 30 #define MAC_TABLE_ENTRIES 64 31 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ 32 33 /* 34 * Calculate the number of bytes up to and including the given 'field' of 35 * 'container'. 36 */ 37 #define endof(container, field) \ 38 (offsetof(container, field) + sizeof(((container *)0)->field)) 39 40 typedef struct VirtIOFeature { 41 uint32_t flags; 42 size_t end; 43 } VirtIOFeature; 44 45 static VirtIOFeature feature_sizes[] = { 46 {.flags = 1 << VIRTIO_NET_F_MAC, 47 .end = endof(struct virtio_net_config, mac)}, 48 {.flags = 1 << VIRTIO_NET_F_STATUS, 49 .end = endof(struct virtio_net_config, status)}, 50 {.flags = 1 << VIRTIO_NET_F_MQ, 51 .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, 52 {} 53 }; 54 55 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) 56 { 57 VirtIONet *n = qemu_get_nic_opaque(nc); 58 59 return &n->vqs[nc->queue_index]; 60 } 61 62 static int vq2q(int queue_index) 63 { 64 return queue_index / 2; 65 } 66 67 /* TODO 68 * - we could suppress RX interrupt if we were so inclined. 69 */ 70 71 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) 72 { 73 VirtIONet *n = VIRTIO_NET(vdev); 74 struct virtio_net_config netcfg; 75 76 virtio_stw_p(vdev, &netcfg.status, n->status); 77 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); 78 memcpy(netcfg.mac, n->mac, ETH_ALEN); 79 memcpy(config, &netcfg, n->config_size); 80 } 81 82 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) 83 { 84 VirtIONet *n = VIRTIO_NET(vdev); 85 struct virtio_net_config netcfg = {}; 86 87 memcpy(&netcfg, config, n->config_size); 88 89 if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) && 90 memcmp(netcfg.mac, n->mac, ETH_ALEN)) { 91 memcpy(n->mac, netcfg.mac, ETH_ALEN); 92 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 93 } 94 } 95 96 static bool virtio_net_started(VirtIONet *n, uint8_t status) 97 { 98 VirtIODevice *vdev = VIRTIO_DEVICE(n); 99 return (status & VIRTIO_CONFIG_S_DRIVER_OK) && 100 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; 101 } 102 103 static void virtio_net_announce_timer(void *opaque) 104 { 105 VirtIONet *n = opaque; 106 VirtIODevice *vdev = VIRTIO_DEVICE(n); 107 108 n->announce_counter--; 109 n->status |= VIRTIO_NET_S_ANNOUNCE; 110 virtio_notify_config(vdev); 111 } 112 113 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) 114 { 115 VirtIODevice *vdev = VIRTIO_DEVICE(n); 116 NetClientState *nc = qemu_get_queue(n->nic); 117 int queues = n->multiqueue ? n->max_queues : 1; 118 119 if (!get_vhost_net(nc->peer)) { 120 return; 121 } 122 123 if (!!n->vhost_started == 124 (virtio_net_started(n, status) && !nc->peer->link_down)) { 125 return; 126 } 127 if (!n->vhost_started) { 128 int r, i; 129 130 if (!vhost_net_query(get_vhost_net(nc->peer), vdev)) { 131 return; 132 } 133 134 /* Any packets outstanding? Purge them to avoid touching rings 135 * when vhost is running. 136 */ 137 for (i = 0; i < queues; i++) { 138 NetClientState *qnc = qemu_get_subqueue(n->nic, i); 139 140 /* Purge both directions: TX and RX. */ 141 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc); 142 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer); 143 } 144 145 n->vhost_started = 1; 146 r = vhost_net_start(vdev, n->nic->ncs, queues); 147 if (r < 0) { 148 error_report("unable to start vhost net: %d: " 149 "falling back on userspace virtio", -r); 150 n->vhost_started = 0; 151 } 152 } else { 153 vhost_net_stop(vdev, n->nic->ncs, queues); 154 n->vhost_started = 0; 155 } 156 } 157 158 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) 159 { 160 VirtIONet *n = VIRTIO_NET(vdev); 161 VirtIONetQueue *q; 162 int i; 163 uint8_t queue_status; 164 165 virtio_net_vhost_status(n, status); 166 167 for (i = 0; i < n->max_queues; i++) { 168 q = &n->vqs[i]; 169 170 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { 171 queue_status = 0; 172 } else { 173 queue_status = status; 174 } 175 176 if (!q->tx_waiting) { 177 continue; 178 } 179 180 if (virtio_net_started(n, queue_status) && !n->vhost_started) { 181 if (q->tx_timer) { 182 timer_mod(q->tx_timer, 183 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 184 } else { 185 qemu_bh_schedule(q->tx_bh); 186 } 187 } else { 188 if (q->tx_timer) { 189 timer_del(q->tx_timer); 190 } else { 191 qemu_bh_cancel(q->tx_bh); 192 } 193 } 194 } 195 } 196 197 static void virtio_net_set_link_status(NetClientState *nc) 198 { 199 VirtIONet *n = qemu_get_nic_opaque(nc); 200 VirtIODevice *vdev = VIRTIO_DEVICE(n); 201 uint16_t old_status = n->status; 202 203 if (nc->link_down) 204 n->status &= ~VIRTIO_NET_S_LINK_UP; 205 else 206 n->status |= VIRTIO_NET_S_LINK_UP; 207 208 if (n->status != old_status) 209 virtio_notify_config(vdev); 210 211 virtio_net_set_status(vdev, vdev->status); 212 } 213 214 static void rxfilter_notify(NetClientState *nc) 215 { 216 VirtIONet *n = qemu_get_nic_opaque(nc); 217 218 if (nc->rxfilter_notify_enabled) { 219 gchar *path = object_get_canonical_path(OBJECT(n->qdev)); 220 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name, 221 n->netclient_name, path, &error_abort); 222 g_free(path); 223 224 /* disable event notification to avoid events flooding */ 225 nc->rxfilter_notify_enabled = 0; 226 } 227 } 228 229 static char *mac_strdup_printf(const uint8_t *mac) 230 { 231 return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac[0], 232 mac[1], mac[2], mac[3], mac[4], mac[5]); 233 } 234 235 static intList *get_vlan_table(VirtIONet *n) 236 { 237 intList *list, *entry; 238 int i, j; 239 240 list = NULL; 241 for (i = 0; i < MAX_VLAN >> 5; i++) { 242 for (j = 0; n->vlans[i] && j <= 0x1f; j++) { 243 if (n->vlans[i] & (1U << j)) { 244 entry = g_malloc0(sizeof(*entry)); 245 entry->value = (i << 5) + j; 246 entry->next = list; 247 list = entry; 248 } 249 } 250 } 251 252 return list; 253 } 254 255 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) 256 { 257 VirtIONet *n = qemu_get_nic_opaque(nc); 258 VirtIODevice *vdev = VIRTIO_DEVICE(n); 259 RxFilterInfo *info; 260 strList *str_list, *entry; 261 int i; 262 263 info = g_malloc0(sizeof(*info)); 264 info->name = g_strdup(nc->name); 265 info->promiscuous = n->promisc; 266 267 if (n->nouni) { 268 info->unicast = RX_STATE_NONE; 269 } else if (n->alluni) { 270 info->unicast = RX_STATE_ALL; 271 } else { 272 info->unicast = RX_STATE_NORMAL; 273 } 274 275 if (n->nomulti) { 276 info->multicast = RX_STATE_NONE; 277 } else if (n->allmulti) { 278 info->multicast = RX_STATE_ALL; 279 } else { 280 info->multicast = RX_STATE_NORMAL; 281 } 282 283 info->broadcast_allowed = n->nobcast; 284 info->multicast_overflow = n->mac_table.multi_overflow; 285 info->unicast_overflow = n->mac_table.uni_overflow; 286 287 info->main_mac = mac_strdup_printf(n->mac); 288 289 str_list = NULL; 290 for (i = 0; i < n->mac_table.first_multi; i++) { 291 entry = g_malloc0(sizeof(*entry)); 292 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 293 entry->next = str_list; 294 str_list = entry; 295 } 296 info->unicast_table = str_list; 297 298 str_list = NULL; 299 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 300 entry = g_malloc0(sizeof(*entry)); 301 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 302 entry->next = str_list; 303 str_list = entry; 304 } 305 info->multicast_table = str_list; 306 info->vlan_table = get_vlan_table(n); 307 308 if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) { 309 info->vlan = RX_STATE_ALL; 310 } else if (!info->vlan_table) { 311 info->vlan = RX_STATE_NONE; 312 } else { 313 info->vlan = RX_STATE_NORMAL; 314 } 315 316 /* enable event notification after query */ 317 nc->rxfilter_notify_enabled = 1; 318 319 return info; 320 } 321 322 static void virtio_net_reset(VirtIODevice *vdev) 323 { 324 VirtIONet *n = VIRTIO_NET(vdev); 325 326 /* Reset back to compatibility mode */ 327 n->promisc = 1; 328 n->allmulti = 0; 329 n->alluni = 0; 330 n->nomulti = 0; 331 n->nouni = 0; 332 n->nobcast = 0; 333 /* multiqueue is disabled by default */ 334 n->curr_queues = 1; 335 timer_del(n->announce_timer); 336 n->announce_counter = 0; 337 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 338 339 /* Flush any MAC and VLAN filter table state */ 340 n->mac_table.in_use = 0; 341 n->mac_table.first_multi = 0; 342 n->mac_table.multi_overflow = 0; 343 n->mac_table.uni_overflow = 0; 344 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 345 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); 346 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 347 memset(n->vlans, 0, MAX_VLAN >> 3); 348 } 349 350 static void peer_test_vnet_hdr(VirtIONet *n) 351 { 352 NetClientState *nc = qemu_get_queue(n->nic); 353 if (!nc->peer) { 354 return; 355 } 356 357 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer); 358 } 359 360 static int peer_has_vnet_hdr(VirtIONet *n) 361 { 362 return n->has_vnet_hdr; 363 } 364 365 static int peer_has_ufo(VirtIONet *n) 366 { 367 if (!peer_has_vnet_hdr(n)) 368 return 0; 369 370 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer); 371 372 return n->has_ufo; 373 } 374 375 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs) 376 { 377 int i; 378 NetClientState *nc; 379 380 n->mergeable_rx_bufs = mergeable_rx_bufs; 381 382 n->guest_hdr_len = n->mergeable_rx_bufs ? 383 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); 384 385 for (i = 0; i < n->max_queues; i++) { 386 nc = qemu_get_subqueue(n->nic, i); 387 388 if (peer_has_vnet_hdr(n) && 389 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { 390 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); 391 n->host_hdr_len = n->guest_hdr_len; 392 } 393 } 394 } 395 396 static int peer_attach(VirtIONet *n, int index) 397 { 398 NetClientState *nc = qemu_get_subqueue(n->nic, index); 399 400 if (!nc->peer) { 401 return 0; 402 } 403 404 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 405 return 0; 406 } 407 408 return tap_enable(nc->peer); 409 } 410 411 static int peer_detach(VirtIONet *n, int index) 412 { 413 NetClientState *nc = qemu_get_subqueue(n->nic, index); 414 415 if (!nc->peer) { 416 return 0; 417 } 418 419 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 420 return 0; 421 } 422 423 return tap_disable(nc->peer); 424 } 425 426 static void virtio_net_set_queues(VirtIONet *n) 427 { 428 int i; 429 int r; 430 431 for (i = 0; i < n->max_queues; i++) { 432 if (i < n->curr_queues) { 433 r = peer_attach(n, i); 434 assert(!r); 435 } else { 436 r = peer_detach(n, i); 437 assert(!r); 438 } 439 } 440 } 441 442 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); 443 444 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features) 445 { 446 VirtIONet *n = VIRTIO_NET(vdev); 447 NetClientState *nc = qemu_get_queue(n->nic); 448 449 features |= (1 << VIRTIO_NET_F_MAC); 450 451 if (!peer_has_vnet_hdr(n)) { 452 features &= ~(0x1 << VIRTIO_NET_F_CSUM); 453 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4); 454 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6); 455 features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN); 456 457 features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM); 458 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4); 459 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6); 460 features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN); 461 } 462 463 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { 464 features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO); 465 features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO); 466 } 467 468 if (!get_vhost_net(nc->peer)) { 469 return features; 470 } 471 return vhost_net_get_features(get_vhost_net(nc->peer), features); 472 } 473 474 static uint32_t virtio_net_bad_features(VirtIODevice *vdev) 475 { 476 uint32_t features = 0; 477 478 /* Linux kernel 2.6.25. It understood MAC (as everyone must), 479 * but also these: */ 480 features |= (1 << VIRTIO_NET_F_MAC); 481 features |= (1 << VIRTIO_NET_F_CSUM); 482 features |= (1 << VIRTIO_NET_F_HOST_TSO4); 483 features |= (1 << VIRTIO_NET_F_HOST_TSO6); 484 features |= (1 << VIRTIO_NET_F_HOST_ECN); 485 486 return features; 487 } 488 489 static void virtio_net_apply_guest_offloads(VirtIONet *n) 490 { 491 qemu_set_offload(qemu_get_queue(n->nic)->peer, 492 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), 493 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), 494 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), 495 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), 496 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); 497 } 498 499 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) 500 { 501 static const uint64_t guest_offloads_mask = 502 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | 503 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 504 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | 505 (1ULL << VIRTIO_NET_F_GUEST_ECN) | 506 (1ULL << VIRTIO_NET_F_GUEST_UFO); 507 508 return guest_offloads_mask & features; 509 } 510 511 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) 512 { 513 VirtIODevice *vdev = VIRTIO_DEVICE(n); 514 return virtio_net_guest_offloads_by_features(vdev->guest_features); 515 } 516 517 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features) 518 { 519 VirtIONet *n = VIRTIO_NET(vdev); 520 int i; 521 522 virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ))); 523 524 virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF))); 525 526 if (n->has_vnet_hdr) { 527 n->curr_guest_offloads = 528 virtio_net_guest_offloads_by_features(features); 529 virtio_net_apply_guest_offloads(n); 530 } 531 532 for (i = 0; i < n->max_queues; i++) { 533 NetClientState *nc = qemu_get_subqueue(n->nic, i); 534 535 if (!get_vhost_net(nc->peer)) { 536 continue; 537 } 538 vhost_net_ack_features(get_vhost_net(nc->peer), features); 539 } 540 541 if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) { 542 memset(n->vlans, 0, MAX_VLAN >> 3); 543 } else { 544 memset(n->vlans, 0xff, MAX_VLAN >> 3); 545 } 546 } 547 548 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, 549 struct iovec *iov, unsigned int iov_cnt) 550 { 551 uint8_t on; 552 size_t s; 553 NetClientState *nc = qemu_get_queue(n->nic); 554 555 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); 556 if (s != sizeof(on)) { 557 return VIRTIO_NET_ERR; 558 } 559 560 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { 561 n->promisc = on; 562 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { 563 n->allmulti = on; 564 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { 565 n->alluni = on; 566 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { 567 n->nomulti = on; 568 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { 569 n->nouni = on; 570 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { 571 n->nobcast = on; 572 } else { 573 return VIRTIO_NET_ERR; 574 } 575 576 rxfilter_notify(nc); 577 578 return VIRTIO_NET_OK; 579 } 580 581 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, 582 struct iovec *iov, unsigned int iov_cnt) 583 { 584 VirtIODevice *vdev = VIRTIO_DEVICE(n); 585 uint64_t offloads; 586 size_t s; 587 588 if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) { 589 return VIRTIO_NET_ERR; 590 } 591 592 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); 593 if (s != sizeof(offloads)) { 594 return VIRTIO_NET_ERR; 595 } 596 597 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { 598 uint64_t supported_offloads; 599 600 if (!n->has_vnet_hdr) { 601 return VIRTIO_NET_ERR; 602 } 603 604 supported_offloads = virtio_net_supported_guest_offloads(n); 605 if (offloads & ~supported_offloads) { 606 return VIRTIO_NET_ERR; 607 } 608 609 n->curr_guest_offloads = offloads; 610 virtio_net_apply_guest_offloads(n); 611 612 return VIRTIO_NET_OK; 613 } else { 614 return VIRTIO_NET_ERR; 615 } 616 } 617 618 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, 619 struct iovec *iov, unsigned int iov_cnt) 620 { 621 VirtIODevice *vdev = VIRTIO_DEVICE(n); 622 struct virtio_net_ctrl_mac mac_data; 623 size_t s; 624 NetClientState *nc = qemu_get_queue(n->nic); 625 626 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { 627 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { 628 return VIRTIO_NET_ERR; 629 } 630 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); 631 assert(s == sizeof(n->mac)); 632 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 633 rxfilter_notify(nc); 634 635 return VIRTIO_NET_OK; 636 } 637 638 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { 639 return VIRTIO_NET_ERR; 640 } 641 642 int in_use = 0; 643 int first_multi = 0; 644 uint8_t uni_overflow = 0; 645 uint8_t multi_overflow = 0; 646 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 647 648 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 649 sizeof(mac_data.entries)); 650 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 651 if (s != sizeof(mac_data.entries)) { 652 goto error; 653 } 654 iov_discard_front(&iov, &iov_cnt, s); 655 656 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { 657 goto error; 658 } 659 660 if (mac_data.entries <= MAC_TABLE_ENTRIES) { 661 s = iov_to_buf(iov, iov_cnt, 0, macs, 662 mac_data.entries * ETH_ALEN); 663 if (s != mac_data.entries * ETH_ALEN) { 664 goto error; 665 } 666 in_use += mac_data.entries; 667 } else { 668 uni_overflow = 1; 669 } 670 671 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); 672 673 first_multi = in_use; 674 675 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 676 sizeof(mac_data.entries)); 677 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); 678 if (s != sizeof(mac_data.entries)) { 679 goto error; 680 } 681 682 iov_discard_front(&iov, &iov_cnt, s); 683 684 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { 685 goto error; 686 } 687 688 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) { 689 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN], 690 mac_data.entries * ETH_ALEN); 691 if (s != mac_data.entries * ETH_ALEN) { 692 goto error; 693 } 694 in_use += mac_data.entries; 695 } else { 696 multi_overflow = 1; 697 } 698 699 n->mac_table.in_use = in_use; 700 n->mac_table.first_multi = first_multi; 701 n->mac_table.uni_overflow = uni_overflow; 702 n->mac_table.multi_overflow = multi_overflow; 703 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN); 704 g_free(macs); 705 rxfilter_notify(nc); 706 707 return VIRTIO_NET_OK; 708 709 error: 710 g_free(macs); 711 return VIRTIO_NET_ERR; 712 } 713 714 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, 715 struct iovec *iov, unsigned int iov_cnt) 716 { 717 VirtIODevice *vdev = VIRTIO_DEVICE(n); 718 uint16_t vid; 719 size_t s; 720 NetClientState *nc = qemu_get_queue(n->nic); 721 722 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); 723 vid = virtio_lduw_p(vdev, &vid); 724 if (s != sizeof(vid)) { 725 return VIRTIO_NET_ERR; 726 } 727 728 if (vid >= MAX_VLAN) 729 return VIRTIO_NET_ERR; 730 731 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) 732 n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); 733 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) 734 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); 735 else 736 return VIRTIO_NET_ERR; 737 738 rxfilter_notify(nc); 739 740 return VIRTIO_NET_OK; 741 } 742 743 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd, 744 struct iovec *iov, unsigned int iov_cnt) 745 { 746 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK && 747 n->status & VIRTIO_NET_S_ANNOUNCE) { 748 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 749 if (n->announce_counter) { 750 timer_mod(n->announce_timer, 751 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 752 self_announce_delay(n->announce_counter)); 753 } 754 return VIRTIO_NET_OK; 755 } else { 756 return VIRTIO_NET_ERR; 757 } 758 } 759 760 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, 761 struct iovec *iov, unsigned int iov_cnt) 762 { 763 VirtIODevice *vdev = VIRTIO_DEVICE(n); 764 struct virtio_net_ctrl_mq mq; 765 size_t s; 766 uint16_t queues; 767 768 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); 769 if (s != sizeof(mq)) { 770 return VIRTIO_NET_ERR; 771 } 772 773 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 774 return VIRTIO_NET_ERR; 775 } 776 777 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs); 778 779 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 780 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 781 queues > n->max_queues || 782 !n->multiqueue) { 783 return VIRTIO_NET_ERR; 784 } 785 786 n->curr_queues = queues; 787 /* stop the backend before changing the number of queues to avoid handling a 788 * disabled queue */ 789 virtio_net_set_status(vdev, vdev->status); 790 virtio_net_set_queues(n); 791 792 return VIRTIO_NET_OK; 793 } 794 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 795 { 796 VirtIONet *n = VIRTIO_NET(vdev); 797 struct virtio_net_ctrl_hdr ctrl; 798 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 799 VirtQueueElement elem; 800 size_t s; 801 struct iovec *iov; 802 unsigned int iov_cnt; 803 804 while (virtqueue_pop(vq, &elem)) { 805 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) || 806 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) { 807 error_report("virtio-net ctrl missing headers"); 808 exit(1); 809 } 810 811 iov = elem.out_sg; 812 iov_cnt = elem.out_num; 813 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); 814 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); 815 if (s != sizeof(ctrl)) { 816 status = VIRTIO_NET_ERR; 817 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { 818 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); 819 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { 820 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); 821 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { 822 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); 823 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { 824 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); 825 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { 826 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); 827 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { 828 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); 829 } 830 831 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status)); 832 assert(s == sizeof(status)); 833 834 virtqueue_push(vq, &elem, sizeof(status)); 835 virtio_notify(vdev, vq); 836 } 837 } 838 839 /* RX */ 840 841 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) 842 { 843 VirtIONet *n = VIRTIO_NET(vdev); 844 int queue_index = vq2q(virtio_get_queue_index(vq)); 845 846 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); 847 } 848 849 static int virtio_net_can_receive(NetClientState *nc) 850 { 851 VirtIONet *n = qemu_get_nic_opaque(nc); 852 VirtIODevice *vdev = VIRTIO_DEVICE(n); 853 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 854 855 if (!vdev->vm_running) { 856 return 0; 857 } 858 859 if (nc->queue_index >= n->curr_queues) { 860 return 0; 861 } 862 863 if (!virtio_queue_ready(q->rx_vq) || 864 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 865 return 0; 866 } 867 868 return 1; 869 } 870 871 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) 872 { 873 VirtIONet *n = q->n; 874 if (virtio_queue_empty(q->rx_vq) || 875 (n->mergeable_rx_bufs && 876 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 877 virtio_queue_set_notification(q->rx_vq, 1); 878 879 /* To avoid a race condition where the guest has made some buffers 880 * available after the above check but before notification was 881 * enabled, check for available buffers again. 882 */ 883 if (virtio_queue_empty(q->rx_vq) || 884 (n->mergeable_rx_bufs && 885 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 886 return 0; 887 } 888 } 889 890 virtio_queue_set_notification(q->rx_vq, 0); 891 return 1; 892 } 893 894 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr) 895 { 896 virtio_tswap16s(vdev, &hdr->hdr_len); 897 virtio_tswap16s(vdev, &hdr->gso_size); 898 virtio_tswap16s(vdev, &hdr->csum_start); 899 virtio_tswap16s(vdev, &hdr->csum_offset); 900 } 901 902 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so 903 * it never finds out that the packets don't have valid checksums. This 904 * causes dhclient to get upset. Fedora's carried a patch for ages to 905 * fix this with Xen but it hasn't appeared in an upstream release of 906 * dhclient yet. 907 * 908 * To avoid breaking existing guests, we catch udp packets and add 909 * checksums. This is terrible but it's better than hacking the guest 910 * kernels. 911 * 912 * N.B. if we introduce a zero-copy API, this operation is no longer free so 913 * we should provide a mechanism to disable it to avoid polluting the host 914 * cache. 915 */ 916 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, 917 uint8_t *buf, size_t size) 918 { 919 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ 920 (size > 27 && size < 1500) && /* normal sized MTU */ 921 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ 922 (buf[23] == 17) && /* ip.protocol == UDP */ 923 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ 924 net_checksum_calculate(buf, size); 925 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; 926 } 927 } 928 929 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, 930 const void *buf, size_t size) 931 { 932 if (n->has_vnet_hdr) { 933 /* FIXME this cast is evil */ 934 void *wbuf = (void *)buf; 935 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, 936 size - n->host_hdr_len); 937 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf); 938 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); 939 } else { 940 struct virtio_net_hdr hdr = { 941 .flags = 0, 942 .gso_type = VIRTIO_NET_HDR_GSO_NONE 943 }; 944 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); 945 } 946 } 947 948 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) 949 { 950 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 951 static const uint8_t vlan[] = {0x81, 0x00}; 952 uint8_t *ptr = (uint8_t *)buf; 953 int i; 954 955 if (n->promisc) 956 return 1; 957 958 ptr += n->host_hdr_len; 959 960 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { 961 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff; 962 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) 963 return 0; 964 } 965 966 if (ptr[0] & 1) { // multicast 967 if (!memcmp(ptr, bcast, sizeof(bcast))) { 968 return !n->nobcast; 969 } else if (n->nomulti) { 970 return 0; 971 } else if (n->allmulti || n->mac_table.multi_overflow) { 972 return 1; 973 } 974 975 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 976 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 977 return 1; 978 } 979 } 980 } else { // unicast 981 if (n->nouni) { 982 return 0; 983 } else if (n->alluni || n->mac_table.uni_overflow) { 984 return 1; 985 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { 986 return 1; 987 } 988 989 for (i = 0; i < n->mac_table.first_multi; i++) { 990 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 991 return 1; 992 } 993 } 994 } 995 996 return 0; 997 } 998 999 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size) 1000 { 1001 VirtIONet *n = qemu_get_nic_opaque(nc); 1002 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1003 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1004 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; 1005 struct virtio_net_hdr_mrg_rxbuf mhdr; 1006 unsigned mhdr_cnt = 0; 1007 size_t offset, i, guest_offset; 1008 1009 if (!virtio_net_can_receive(nc)) { 1010 return -1; 1011 } 1012 1013 /* hdr_len refers to the header we supply to the guest */ 1014 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { 1015 return 0; 1016 } 1017 1018 if (!receive_filter(n, buf, size)) 1019 return size; 1020 1021 offset = i = 0; 1022 1023 while (offset < size) { 1024 VirtQueueElement elem; 1025 int len, total; 1026 const struct iovec *sg = elem.in_sg; 1027 1028 total = 0; 1029 1030 if (virtqueue_pop(q->rx_vq, &elem) == 0) { 1031 if (i == 0) 1032 return -1; 1033 error_report("virtio-net unexpected empty queue: " 1034 "i %zd mergeable %d offset %zd, size %zd, " 1035 "guest hdr len %zd, host hdr len %zd guest features 0x%x", 1036 i, n->mergeable_rx_bufs, offset, size, 1037 n->guest_hdr_len, n->host_hdr_len, vdev->guest_features); 1038 exit(1); 1039 } 1040 1041 if (elem.in_num < 1) { 1042 error_report("virtio-net receive queue contains no in buffers"); 1043 exit(1); 1044 } 1045 1046 if (i == 0) { 1047 assert(offset == 0); 1048 if (n->mergeable_rx_bufs) { 1049 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), 1050 sg, elem.in_num, 1051 offsetof(typeof(mhdr), num_buffers), 1052 sizeof(mhdr.num_buffers)); 1053 } 1054 1055 receive_header(n, sg, elem.in_num, buf, size); 1056 offset = n->host_hdr_len; 1057 total += n->guest_hdr_len; 1058 guest_offset = n->guest_hdr_len; 1059 } else { 1060 guest_offset = 0; 1061 } 1062 1063 /* copy in packet. ugh */ 1064 len = iov_from_buf(sg, elem.in_num, guest_offset, 1065 buf + offset, size - offset); 1066 total += len; 1067 offset += len; 1068 /* If buffers can't be merged, at this point we 1069 * must have consumed the complete packet. 1070 * Otherwise, drop it. */ 1071 if (!n->mergeable_rx_bufs && offset < size) { 1072 #if 0 1073 error_report("virtio-net truncated non-mergeable packet: " 1074 "i %zd mergeable %d offset %zd, size %zd, " 1075 "guest hdr len %zd, host hdr len %zd", 1076 i, n->mergeable_rx_bufs, 1077 offset, size, n->guest_hdr_len, n->host_hdr_len); 1078 #endif 1079 return size; 1080 } 1081 1082 /* signal other side */ 1083 virtqueue_fill(q->rx_vq, &elem, total, i++); 1084 } 1085 1086 if (mhdr_cnt) { 1087 virtio_stw_p(vdev, &mhdr.num_buffers, i); 1088 iov_from_buf(mhdr_sg, mhdr_cnt, 1089 0, 1090 &mhdr.num_buffers, sizeof mhdr.num_buffers); 1091 } 1092 1093 virtqueue_flush(q->rx_vq, i); 1094 virtio_notify(vdev, q->rx_vq); 1095 1096 return size; 1097 } 1098 1099 static int32_t virtio_net_flush_tx(VirtIONetQueue *q); 1100 1101 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) 1102 { 1103 VirtIONet *n = qemu_get_nic_opaque(nc); 1104 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1105 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1106 1107 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0); 1108 virtio_notify(vdev, q->tx_vq); 1109 1110 q->async_tx.elem.out_num = q->async_tx.len = 0; 1111 1112 virtio_queue_set_notification(q->tx_vq, 1); 1113 virtio_net_flush_tx(q); 1114 } 1115 1116 /* TX */ 1117 static int32_t virtio_net_flush_tx(VirtIONetQueue *q) 1118 { 1119 VirtIONet *n = q->n; 1120 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1121 VirtQueueElement elem; 1122 int32_t num_packets = 0; 1123 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); 1124 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1125 return num_packets; 1126 } 1127 1128 if (q->async_tx.elem.out_num) { 1129 virtio_queue_set_notification(q->tx_vq, 0); 1130 return num_packets; 1131 } 1132 1133 while (virtqueue_pop(q->tx_vq, &elem)) { 1134 ssize_t ret, len; 1135 unsigned int out_num = elem.out_num; 1136 struct iovec *out_sg = &elem.out_sg[0]; 1137 struct iovec sg[VIRTQUEUE_MAX_SIZE]; 1138 1139 if (out_num < 1) { 1140 error_report("virtio-net header not in first element"); 1141 exit(1); 1142 } 1143 1144 if (n->has_vnet_hdr) { 1145 if (out_sg[0].iov_len < n->guest_hdr_len) { 1146 error_report("virtio-net header incorrect"); 1147 exit(1); 1148 } 1149 virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base); 1150 } 1151 1152 /* 1153 * If host wants to see the guest header as is, we can 1154 * pass it on unchanged. Otherwise, copy just the parts 1155 * that host is interested in. 1156 */ 1157 assert(n->host_hdr_len <= n->guest_hdr_len); 1158 if (n->host_hdr_len != n->guest_hdr_len) { 1159 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), 1160 out_sg, out_num, 1161 0, n->host_hdr_len); 1162 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, 1163 out_sg, out_num, 1164 n->guest_hdr_len, -1); 1165 out_num = sg_num; 1166 out_sg = sg; 1167 } 1168 1169 len = n->guest_hdr_len; 1170 1171 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), 1172 out_sg, out_num, virtio_net_tx_complete); 1173 if (ret == 0) { 1174 virtio_queue_set_notification(q->tx_vq, 0); 1175 q->async_tx.elem = elem; 1176 q->async_tx.len = len; 1177 return -EBUSY; 1178 } 1179 1180 len += ret; 1181 1182 virtqueue_push(q->tx_vq, &elem, 0); 1183 virtio_notify(vdev, q->tx_vq); 1184 1185 if (++num_packets >= n->tx_burst) { 1186 break; 1187 } 1188 } 1189 return num_packets; 1190 } 1191 1192 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) 1193 { 1194 VirtIONet *n = VIRTIO_NET(vdev); 1195 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1196 1197 /* This happens when device was stopped but VCPU wasn't. */ 1198 if (!vdev->vm_running) { 1199 q->tx_waiting = 1; 1200 return; 1201 } 1202 1203 if (q->tx_waiting) { 1204 virtio_queue_set_notification(vq, 1); 1205 timer_del(q->tx_timer); 1206 q->tx_waiting = 0; 1207 virtio_net_flush_tx(q); 1208 } else { 1209 timer_mod(q->tx_timer, 1210 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 1211 q->tx_waiting = 1; 1212 virtio_queue_set_notification(vq, 0); 1213 } 1214 } 1215 1216 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) 1217 { 1218 VirtIONet *n = VIRTIO_NET(vdev); 1219 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1220 1221 if (unlikely(q->tx_waiting)) { 1222 return; 1223 } 1224 q->tx_waiting = 1; 1225 /* This happens when device was stopped but VCPU wasn't. */ 1226 if (!vdev->vm_running) { 1227 return; 1228 } 1229 virtio_queue_set_notification(vq, 0); 1230 qemu_bh_schedule(q->tx_bh); 1231 } 1232 1233 static void virtio_net_tx_timer(void *opaque) 1234 { 1235 VirtIONetQueue *q = opaque; 1236 VirtIONet *n = q->n; 1237 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1238 /* This happens when device was stopped but BH wasn't. */ 1239 if (!vdev->vm_running) { 1240 /* Make sure tx waiting is set, so we'll run when restarted. */ 1241 assert(q->tx_waiting); 1242 return; 1243 } 1244 1245 q->tx_waiting = 0; 1246 1247 /* Just in case the driver is not ready on more */ 1248 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1249 return; 1250 } 1251 1252 virtio_queue_set_notification(q->tx_vq, 1); 1253 virtio_net_flush_tx(q); 1254 } 1255 1256 static void virtio_net_tx_bh(void *opaque) 1257 { 1258 VirtIONetQueue *q = opaque; 1259 VirtIONet *n = q->n; 1260 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1261 int32_t ret; 1262 1263 /* This happens when device was stopped but BH wasn't. */ 1264 if (!vdev->vm_running) { 1265 /* Make sure tx waiting is set, so we'll run when restarted. */ 1266 assert(q->tx_waiting); 1267 return; 1268 } 1269 1270 q->tx_waiting = 0; 1271 1272 /* Just in case the driver is not ready on more */ 1273 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { 1274 return; 1275 } 1276 1277 ret = virtio_net_flush_tx(q); 1278 if (ret == -EBUSY) { 1279 return; /* Notification re-enable handled by tx_complete */ 1280 } 1281 1282 /* If we flush a full burst of packets, assume there are 1283 * more coming and immediately reschedule */ 1284 if (ret >= n->tx_burst) { 1285 qemu_bh_schedule(q->tx_bh); 1286 q->tx_waiting = 1; 1287 return; 1288 } 1289 1290 /* If less than a full burst, re-enable notification and flush 1291 * anything that may have come in while we weren't looking. If 1292 * we find something, assume the guest is still active and reschedule */ 1293 virtio_queue_set_notification(q->tx_vq, 1); 1294 if (virtio_net_flush_tx(q) > 0) { 1295 virtio_queue_set_notification(q->tx_vq, 0); 1296 qemu_bh_schedule(q->tx_bh); 1297 q->tx_waiting = 1; 1298 } 1299 } 1300 1301 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) 1302 { 1303 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1304 int i, max = multiqueue ? n->max_queues : 1; 1305 1306 n->multiqueue = multiqueue; 1307 1308 for (i = 2; i <= n->max_queues * 2 + 1; i++) { 1309 virtio_del_queue(vdev, i); 1310 } 1311 1312 for (i = 1; i < max; i++) { 1313 n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); 1314 if (n->vqs[i].tx_timer) { 1315 n->vqs[i].tx_vq = 1316 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); 1317 n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1318 virtio_net_tx_timer, 1319 &n->vqs[i]); 1320 } else { 1321 n->vqs[i].tx_vq = 1322 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); 1323 n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]); 1324 } 1325 1326 n->vqs[i].tx_waiting = 0; 1327 n->vqs[i].n = n; 1328 } 1329 1330 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack 1331 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid 1332 * breaking them. 1333 */ 1334 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1335 1336 virtio_net_set_queues(n); 1337 } 1338 1339 static void virtio_net_save(QEMUFile *f, void *opaque) 1340 { 1341 VirtIONet *n = opaque; 1342 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1343 1344 /* At this point, backend must be stopped, otherwise 1345 * it might keep writing to memory. */ 1346 assert(!n->vhost_started); 1347 virtio_save(vdev, f); 1348 } 1349 1350 static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f) 1351 { 1352 VirtIONet *n = VIRTIO_NET(vdev); 1353 int i; 1354 1355 qemu_put_buffer(f, n->mac, ETH_ALEN); 1356 qemu_put_be32(f, n->vqs[0].tx_waiting); 1357 qemu_put_be32(f, n->mergeable_rx_bufs); 1358 qemu_put_be16(f, n->status); 1359 qemu_put_byte(f, n->promisc); 1360 qemu_put_byte(f, n->allmulti); 1361 qemu_put_be32(f, n->mac_table.in_use); 1362 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); 1363 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1364 qemu_put_be32(f, n->has_vnet_hdr); 1365 qemu_put_byte(f, n->mac_table.multi_overflow); 1366 qemu_put_byte(f, n->mac_table.uni_overflow); 1367 qemu_put_byte(f, n->alluni); 1368 qemu_put_byte(f, n->nomulti); 1369 qemu_put_byte(f, n->nouni); 1370 qemu_put_byte(f, n->nobcast); 1371 qemu_put_byte(f, n->has_ufo); 1372 if (n->max_queues > 1) { 1373 qemu_put_be16(f, n->max_queues); 1374 qemu_put_be16(f, n->curr_queues); 1375 for (i = 1; i < n->curr_queues; i++) { 1376 qemu_put_be32(f, n->vqs[i].tx_waiting); 1377 } 1378 } 1379 1380 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) { 1381 qemu_put_be64(f, n->curr_guest_offloads); 1382 } 1383 } 1384 1385 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) 1386 { 1387 VirtIONet *n = opaque; 1388 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1389 1390 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) 1391 return -EINVAL; 1392 1393 return virtio_load(vdev, f, version_id); 1394 } 1395 1396 static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f, 1397 int version_id) 1398 { 1399 VirtIONet *n = VIRTIO_NET(vdev); 1400 int i, link_down; 1401 1402 qemu_get_buffer(f, n->mac, ETH_ALEN); 1403 n->vqs[0].tx_waiting = qemu_get_be32(f); 1404 1405 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f)); 1406 1407 if (version_id >= 3) 1408 n->status = qemu_get_be16(f); 1409 1410 if (version_id >= 4) { 1411 if (version_id < 8) { 1412 n->promisc = qemu_get_be32(f); 1413 n->allmulti = qemu_get_be32(f); 1414 } else { 1415 n->promisc = qemu_get_byte(f); 1416 n->allmulti = qemu_get_byte(f); 1417 } 1418 } 1419 1420 if (version_id >= 5) { 1421 n->mac_table.in_use = qemu_get_be32(f); 1422 /* MAC_TABLE_ENTRIES may be different from the saved image */ 1423 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) { 1424 qemu_get_buffer(f, n->mac_table.macs, 1425 n->mac_table.in_use * ETH_ALEN); 1426 } else { 1427 int64_t i; 1428 1429 /* Overflow detected - can happen if source has a larger MAC table. 1430 * We simply set overflow flag so there's no need to maintain the 1431 * table of addresses, discard them all. 1432 * Note: 64 bit math to avoid integer overflow. 1433 */ 1434 for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) { 1435 qemu_get_byte(f); 1436 } 1437 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1; 1438 n->mac_table.in_use = 0; 1439 } 1440 } 1441 1442 if (version_id >= 6) 1443 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1444 1445 if (version_id >= 7) { 1446 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) { 1447 error_report("virtio-net: saved image requires vnet_hdr=on"); 1448 return -1; 1449 } 1450 } 1451 1452 if (version_id >= 9) { 1453 n->mac_table.multi_overflow = qemu_get_byte(f); 1454 n->mac_table.uni_overflow = qemu_get_byte(f); 1455 } 1456 1457 if (version_id >= 10) { 1458 n->alluni = qemu_get_byte(f); 1459 n->nomulti = qemu_get_byte(f); 1460 n->nouni = qemu_get_byte(f); 1461 n->nobcast = qemu_get_byte(f); 1462 } 1463 1464 if (version_id >= 11) { 1465 if (qemu_get_byte(f) && !peer_has_ufo(n)) { 1466 error_report("virtio-net: saved image requires TUN_F_UFO support"); 1467 return -1; 1468 } 1469 } 1470 1471 if (n->max_queues > 1) { 1472 if (n->max_queues != qemu_get_be16(f)) { 1473 error_report("virtio-net: different max_queues "); 1474 return -1; 1475 } 1476 1477 n->curr_queues = qemu_get_be16(f); 1478 if (n->curr_queues > n->max_queues) { 1479 error_report("virtio-net: curr_queues %x > max_queues %x", 1480 n->curr_queues, n->max_queues); 1481 return -1; 1482 } 1483 for (i = 1; i < n->curr_queues; i++) { 1484 n->vqs[i].tx_waiting = qemu_get_be32(f); 1485 } 1486 } 1487 1488 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) { 1489 n->curr_guest_offloads = qemu_get_be64(f); 1490 } else { 1491 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); 1492 } 1493 1494 if (peer_has_vnet_hdr(n)) { 1495 virtio_net_apply_guest_offloads(n); 1496 } 1497 1498 virtio_net_set_queues(n); 1499 1500 /* Find the first multicast entry in the saved MAC filter */ 1501 for (i = 0; i < n->mac_table.in_use; i++) { 1502 if (n->mac_table.macs[i * ETH_ALEN] & 1) { 1503 break; 1504 } 1505 } 1506 n->mac_table.first_multi = i; 1507 1508 /* nc.link_down can't be migrated, so infer link_down according 1509 * to link status bit in n->status */ 1510 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; 1511 for (i = 0; i < n->max_queues; i++) { 1512 qemu_get_subqueue(n->nic, i)->link_down = link_down; 1513 } 1514 1515 if (vdev->guest_features & (0x1 << VIRTIO_NET_F_GUEST_ANNOUNCE) && 1516 vdev->guest_features & (0x1 << VIRTIO_NET_F_CTRL_VQ)) { 1517 n->announce_counter = SELF_ANNOUNCE_ROUNDS; 1518 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL)); 1519 } 1520 1521 return 0; 1522 } 1523 1524 static void virtio_net_cleanup(NetClientState *nc) 1525 { 1526 VirtIONet *n = qemu_get_nic_opaque(nc); 1527 1528 n->nic = NULL; 1529 } 1530 1531 static NetClientInfo net_virtio_info = { 1532 .type = NET_CLIENT_OPTIONS_KIND_NIC, 1533 .size = sizeof(NICState), 1534 .can_receive = virtio_net_can_receive, 1535 .receive = virtio_net_receive, 1536 .cleanup = virtio_net_cleanup, 1537 .link_status_changed = virtio_net_set_link_status, 1538 .query_rx_filter = virtio_net_query_rxfilter, 1539 }; 1540 1541 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) 1542 { 1543 VirtIONet *n = VIRTIO_NET(vdev); 1544 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1545 assert(n->vhost_started); 1546 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); 1547 } 1548 1549 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, 1550 bool mask) 1551 { 1552 VirtIONet *n = VIRTIO_NET(vdev); 1553 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1554 assert(n->vhost_started); 1555 vhost_net_virtqueue_mask(get_vhost_net(nc->peer), 1556 vdev, idx, mask); 1557 } 1558 1559 void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features) 1560 { 1561 int i, config_size = 0; 1562 host_features |= (1 << VIRTIO_NET_F_MAC); 1563 for (i = 0; feature_sizes[i].flags != 0; i++) { 1564 if (host_features & feature_sizes[i].flags) { 1565 config_size = MAX(feature_sizes[i].end, config_size); 1566 } 1567 } 1568 n->config_size = config_size; 1569 } 1570 1571 void virtio_net_set_netclient_name(VirtIONet *n, const char *name, 1572 const char *type) 1573 { 1574 /* 1575 * The name can be NULL, the netclient name will be type.x. 1576 */ 1577 assert(type != NULL); 1578 1579 g_free(n->netclient_name); 1580 g_free(n->netclient_type); 1581 n->netclient_name = g_strdup(name); 1582 n->netclient_type = g_strdup(type); 1583 } 1584 1585 static void virtio_net_device_realize(DeviceState *dev, Error **errp) 1586 { 1587 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1588 VirtIONet *n = VIRTIO_NET(dev); 1589 NetClientState *nc; 1590 int i; 1591 1592 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); 1593 1594 n->max_queues = MAX(n->nic_conf.peers.queues, 1); 1595 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); 1596 n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); 1597 n->curr_queues = 1; 1598 n->vqs[0].n = n; 1599 n->tx_timeout = n->net_conf.txtimer; 1600 1601 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") 1602 && strcmp(n->net_conf.tx, "bh")) { 1603 error_report("virtio-net: " 1604 "Unknown option tx=%s, valid options: \"timer\" \"bh\"", 1605 n->net_conf.tx); 1606 error_report("Defaulting to \"bh\""); 1607 } 1608 1609 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { 1610 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256, 1611 virtio_net_handle_tx_timer); 1612 n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer, 1613 &n->vqs[0]); 1614 } else { 1615 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256, 1616 virtio_net_handle_tx_bh); 1617 n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]); 1618 } 1619 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1620 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); 1621 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); 1622 n->status = VIRTIO_NET_S_LINK_UP; 1623 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 1624 virtio_net_announce_timer, n); 1625 1626 if (n->netclient_type) { 1627 /* 1628 * Happen when virtio_net_set_netclient_name has been called. 1629 */ 1630 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1631 n->netclient_type, n->netclient_name, n); 1632 } else { 1633 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1634 object_get_typename(OBJECT(dev)), dev->id, n); 1635 } 1636 1637 peer_test_vnet_hdr(n); 1638 if (peer_has_vnet_hdr(n)) { 1639 for (i = 0; i < n->max_queues; i++) { 1640 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); 1641 } 1642 n->host_hdr_len = sizeof(struct virtio_net_hdr); 1643 } else { 1644 n->host_hdr_len = 0; 1645 } 1646 1647 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); 1648 1649 n->vqs[0].tx_waiting = 0; 1650 n->tx_burst = n->net_conf.txburst; 1651 virtio_net_set_mrg_rx_bufs(n, 0); 1652 n->promisc = 1; /* for compatibility */ 1653 1654 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 1655 1656 n->vlans = g_malloc0(MAX_VLAN >> 3); 1657 1658 nc = qemu_get_queue(n->nic); 1659 nc->rxfilter_notify_enabled = 1; 1660 1661 n->qdev = dev; 1662 register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION, 1663 virtio_net_save, virtio_net_load, n); 1664 } 1665 1666 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) 1667 { 1668 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1669 VirtIONet *n = VIRTIO_NET(dev); 1670 int i; 1671 1672 /* This will stop vhost backend if appropriate. */ 1673 virtio_net_set_status(vdev, 0); 1674 1675 unregister_savevm(dev, "virtio-net", n); 1676 1677 g_free(n->netclient_name); 1678 n->netclient_name = NULL; 1679 g_free(n->netclient_type); 1680 n->netclient_type = NULL; 1681 1682 g_free(n->mac_table.macs); 1683 g_free(n->vlans); 1684 1685 for (i = 0; i < n->max_queues; i++) { 1686 VirtIONetQueue *q = &n->vqs[i]; 1687 NetClientState *nc = qemu_get_subqueue(n->nic, i); 1688 1689 qemu_purge_queued_packets(nc); 1690 1691 if (q->tx_timer) { 1692 timer_del(q->tx_timer); 1693 timer_free(q->tx_timer); 1694 } else if (q->tx_bh) { 1695 qemu_bh_delete(q->tx_bh); 1696 } 1697 } 1698 1699 timer_del(n->announce_timer); 1700 timer_free(n->announce_timer); 1701 g_free(n->vqs); 1702 qemu_del_nic(n->nic); 1703 virtio_cleanup(vdev); 1704 } 1705 1706 static void virtio_net_instance_init(Object *obj) 1707 { 1708 VirtIONet *n = VIRTIO_NET(obj); 1709 1710 /* 1711 * The default config_size is sizeof(struct virtio_net_config). 1712 * Can be overriden with virtio_net_set_config_size. 1713 */ 1714 n->config_size = sizeof(struct virtio_net_config); 1715 device_add_bootindex_property(obj, &n->nic_conf.bootindex, 1716 "bootindex", "/ethernet-phy@0", 1717 DEVICE(n), NULL); 1718 } 1719 1720 static Property virtio_net_properties[] = { 1721 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), 1722 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, 1723 TX_TIMER_INTERVAL), 1724 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), 1725 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), 1726 DEFINE_PROP_END_OF_LIST(), 1727 }; 1728 1729 static void virtio_net_class_init(ObjectClass *klass, void *data) 1730 { 1731 DeviceClass *dc = DEVICE_CLASS(klass); 1732 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1733 1734 dc->props = virtio_net_properties; 1735 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 1736 vdc->realize = virtio_net_device_realize; 1737 vdc->unrealize = virtio_net_device_unrealize; 1738 vdc->get_config = virtio_net_get_config; 1739 vdc->set_config = virtio_net_set_config; 1740 vdc->get_features = virtio_net_get_features; 1741 vdc->set_features = virtio_net_set_features; 1742 vdc->bad_features = virtio_net_bad_features; 1743 vdc->reset = virtio_net_reset; 1744 vdc->set_status = virtio_net_set_status; 1745 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; 1746 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; 1747 vdc->load = virtio_net_load_device; 1748 vdc->save = virtio_net_save_device; 1749 } 1750 1751 static const TypeInfo virtio_net_info = { 1752 .name = TYPE_VIRTIO_NET, 1753 .parent = TYPE_VIRTIO_DEVICE, 1754 .instance_size = sizeof(VirtIONet), 1755 .instance_init = virtio_net_instance_init, 1756 .class_init = virtio_net_class_init, 1757 }; 1758 1759 static void virtio_register_types(void) 1760 { 1761 type_register_static(&virtio_net_info); 1762 } 1763 1764 type_init(virtio_register_types) 1765