1 /* 2 * Virtio Network Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/iov.h" 15 #include "hw/virtio/virtio.h" 16 #include "net/net.h" 17 #include "net/checksum.h" 18 #include "net/tap.h" 19 #include "qemu/error-report.h" 20 #include "qemu/timer.h" 21 #include "hw/virtio/virtio-net.h" 22 #include "net/vhost_net.h" 23 #include "hw/virtio/virtio-bus.h" 24 #include "qapi/qmp/qjson.h" 25 #include "qapi-event.h" 26 27 #define VIRTIO_NET_VM_VERSION 11 28 29 #define MAC_TABLE_ENTRIES 64 30 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ 31 32 /* 33 * Calculate the number of bytes up to and including the given 'field' of 34 * 'container'. 35 */ 36 #define endof(container, field) \ 37 (offsetof(container, field) + sizeof(((container *)0)->field)) 38 39 typedef struct VirtIOFeature { 40 uint32_t flags; 41 size_t end; 42 } VirtIOFeature; 43 44 static VirtIOFeature feature_sizes[] = { 45 {.flags = 1 << VIRTIO_NET_F_MAC, 46 .end = endof(struct virtio_net_config, mac)}, 47 {.flags = 1 << VIRTIO_NET_F_STATUS, 48 .end = endof(struct virtio_net_config, status)}, 49 {.flags = 1 << VIRTIO_NET_F_MQ, 50 .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, 51 {} 52 }; 53 54 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) 55 { 56 VirtIONet *n = qemu_get_nic_opaque(nc); 57 58 return &n->vqs[nc->queue_index]; 59 } 60 61 static int vq2q(int queue_index) 62 { 63 return queue_index / 2; 64 } 65 66 /* TODO 67 * - we could suppress RX interrupt if we were so inclined. 68 */ 69 70 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) 71 { 72 VirtIONet *n = VIRTIO_NET(vdev); 73 struct virtio_net_config netcfg; 74 75 stw_p(&netcfg.status, n->status); 76 stw_p(&netcfg.max_virtqueue_pairs, n->max_queues); 77 memcpy(netcfg.mac, n->mac, ETH_ALEN); 78 memcpy(config, &netcfg, n->config_size); 79 } 80 81 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) 82 { 83 VirtIONet *n = VIRTIO_NET(vdev); 84 struct virtio_net_config netcfg = {}; 85 86 memcpy(&netcfg, config, n->config_size); 87 88 if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) && 89 memcmp(netcfg.mac, n->mac, ETH_ALEN)) { 90 memcpy(n->mac, netcfg.mac, ETH_ALEN); 91 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 92 } 93 } 94 95 static bool virtio_net_started(VirtIONet *n, uint8_t status) 96 { 97 VirtIODevice *vdev = VIRTIO_DEVICE(n); 98 return (status & VIRTIO_CONFIG_S_DRIVER_OK) && 99 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; 100 } 101 102 static void virtio_net_announce_timer(void *opaque) 103 { 104 VirtIONet *n = opaque; 105 VirtIODevice *vdev = VIRTIO_DEVICE(n); 106 107 n->announce_counter--; 108 n->status |= VIRTIO_NET_S_ANNOUNCE; 109 virtio_notify_config(vdev); 110 } 111 112 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) 113 { 114 VirtIODevice *vdev = VIRTIO_DEVICE(n); 115 NetClientState *nc = qemu_get_queue(n->nic); 116 int queues = n->multiqueue ? n->max_queues : 1; 117 118 if (!get_vhost_net(nc->peer)) { 119 return; 120 } 121 122 if (!!n->vhost_started == 123 (virtio_net_started(n, status) && !nc->peer->link_down)) { 124 return; 125 } 126 if (!n->vhost_started) { 127 int r; 128 if (!vhost_net_query(get_vhost_net(nc->peer), vdev)) { 129 return; 130 } 131 n->vhost_started = 1; 132 r = vhost_net_start(vdev, n->nic->ncs, queues); 133 if (r < 0) { 134 error_report("unable to start vhost net: %d: " 135 "falling back on userspace virtio", -r); 136 n->vhost_started = 0; 137 } 138 } else { 139 vhost_net_stop(vdev, n->nic->ncs, queues); 140 n->vhost_started = 0; 141 } 142 } 143 144 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) 145 { 146 VirtIONet *n = VIRTIO_NET(vdev); 147 VirtIONetQueue *q; 148 int i; 149 uint8_t queue_status; 150 151 virtio_net_vhost_status(n, status); 152 153 for (i = 0; i < n->max_queues; i++) { 154 q = &n->vqs[i]; 155 156 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { 157 queue_status = 0; 158 } else { 159 queue_status = status; 160 } 161 162 if (!q->tx_waiting) { 163 continue; 164 } 165 166 if (virtio_net_started(n, queue_status) && !n->vhost_started) { 167 if (q->tx_timer) { 168 timer_mod(q->tx_timer, 169 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 170 } else { 171 qemu_bh_schedule(q->tx_bh); 172 } 173 } else { 174 if (q->tx_timer) { 175 timer_del(q->tx_timer); 176 } else { 177 qemu_bh_cancel(q->tx_bh); 178 } 179 } 180 } 181 } 182 183 static void virtio_net_set_link_status(NetClientState *nc) 184 { 185 VirtIONet *n = qemu_get_nic_opaque(nc); 186 VirtIODevice *vdev = VIRTIO_DEVICE(n); 187 uint16_t old_status = n->status; 188 189 if (nc->link_down) 190 n->status &= ~VIRTIO_NET_S_LINK_UP; 191 else 192 n->status |= VIRTIO_NET_S_LINK_UP; 193 194 if (n->status != old_status) 195 virtio_notify_config(vdev); 196 197 virtio_net_set_status(vdev, vdev->status); 198 } 199 200 static void rxfilter_notify(NetClientState *nc) 201 { 202 VirtIONet *n = qemu_get_nic_opaque(nc); 203 204 if (nc->rxfilter_notify_enabled) { 205 gchar *path = object_get_canonical_path(OBJECT(n->qdev)); 206 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name, 207 n->netclient_name, path, &error_abort); 208 g_free(path); 209 210 /* disable event notification to avoid events flooding */ 211 nc->rxfilter_notify_enabled = 0; 212 } 213 } 214 215 static char *mac_strdup_printf(const uint8_t *mac) 216 { 217 return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac[0], 218 mac[1], mac[2], mac[3], mac[4], mac[5]); 219 } 220 221 static intList *get_vlan_table(VirtIONet *n) 222 { 223 intList *list, *entry; 224 int i, j; 225 226 list = NULL; 227 for (i = 0; i < MAX_VLAN >> 5; i++) { 228 for (j = 0; n->vlans[i] && j <= 0x1f; j++) { 229 if (n->vlans[i] & (1U << j)) { 230 entry = g_malloc0(sizeof(*entry)); 231 entry->value = (i << 5) + j; 232 entry->next = list; 233 list = entry; 234 } 235 } 236 } 237 238 return list; 239 } 240 241 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) 242 { 243 VirtIONet *n = qemu_get_nic_opaque(nc); 244 VirtIODevice *vdev = VIRTIO_DEVICE(n); 245 RxFilterInfo *info; 246 strList *str_list, *entry; 247 int i; 248 249 info = g_malloc0(sizeof(*info)); 250 info->name = g_strdup(nc->name); 251 info->promiscuous = n->promisc; 252 253 if (n->nouni) { 254 info->unicast = RX_STATE_NONE; 255 } else if (n->alluni) { 256 info->unicast = RX_STATE_ALL; 257 } else { 258 info->unicast = RX_STATE_NORMAL; 259 } 260 261 if (n->nomulti) { 262 info->multicast = RX_STATE_NONE; 263 } else if (n->allmulti) { 264 info->multicast = RX_STATE_ALL; 265 } else { 266 info->multicast = RX_STATE_NORMAL; 267 } 268 269 info->broadcast_allowed = n->nobcast; 270 info->multicast_overflow = n->mac_table.multi_overflow; 271 info->unicast_overflow = n->mac_table.uni_overflow; 272 273 info->main_mac = mac_strdup_printf(n->mac); 274 275 str_list = NULL; 276 for (i = 0; i < n->mac_table.first_multi; i++) { 277 entry = g_malloc0(sizeof(*entry)); 278 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 279 entry->next = str_list; 280 str_list = entry; 281 } 282 info->unicast_table = str_list; 283 284 str_list = NULL; 285 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 286 entry = g_malloc0(sizeof(*entry)); 287 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 288 entry->next = str_list; 289 str_list = entry; 290 } 291 info->multicast_table = str_list; 292 info->vlan_table = get_vlan_table(n); 293 294 if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) { 295 info->vlan = RX_STATE_ALL; 296 } else if (!info->vlan_table) { 297 info->vlan = RX_STATE_NONE; 298 } else { 299 info->vlan = RX_STATE_NORMAL; 300 } 301 302 /* enable event notification after query */ 303 nc->rxfilter_notify_enabled = 1; 304 305 return info; 306 } 307 308 static void virtio_net_reset(VirtIODevice *vdev) 309 { 310 VirtIONet *n = VIRTIO_NET(vdev); 311 312 /* Reset back to compatibility mode */ 313 n->promisc = 1; 314 n->allmulti = 0; 315 n->alluni = 0; 316 n->nomulti = 0; 317 n->nouni = 0; 318 n->nobcast = 0; 319 /* multiqueue is disabled by default */ 320 n->curr_queues = 1; 321 timer_del(n->announce_timer); 322 n->announce_counter = 0; 323 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 324 325 /* Flush any MAC and VLAN filter table state */ 326 n->mac_table.in_use = 0; 327 n->mac_table.first_multi = 0; 328 n->mac_table.multi_overflow = 0; 329 n->mac_table.uni_overflow = 0; 330 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 331 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); 332 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 333 memset(n->vlans, 0, MAX_VLAN >> 3); 334 } 335 336 static void peer_test_vnet_hdr(VirtIONet *n) 337 { 338 NetClientState *nc = qemu_get_queue(n->nic); 339 if (!nc->peer) { 340 return; 341 } 342 343 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer); 344 } 345 346 static int peer_has_vnet_hdr(VirtIONet *n) 347 { 348 return n->has_vnet_hdr; 349 } 350 351 static int peer_has_ufo(VirtIONet *n) 352 { 353 if (!peer_has_vnet_hdr(n)) 354 return 0; 355 356 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer); 357 358 return n->has_ufo; 359 } 360 361 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs) 362 { 363 int i; 364 NetClientState *nc; 365 366 n->mergeable_rx_bufs = mergeable_rx_bufs; 367 368 n->guest_hdr_len = n->mergeable_rx_bufs ? 369 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); 370 371 for (i = 0; i < n->max_queues; i++) { 372 nc = qemu_get_subqueue(n->nic, i); 373 374 if (peer_has_vnet_hdr(n) && 375 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { 376 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); 377 n->host_hdr_len = n->guest_hdr_len; 378 } 379 } 380 } 381 382 static int peer_attach(VirtIONet *n, int index) 383 { 384 NetClientState *nc = qemu_get_subqueue(n->nic, index); 385 386 if (!nc->peer) { 387 return 0; 388 } 389 390 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 391 return 0; 392 } 393 394 return tap_enable(nc->peer); 395 } 396 397 static int peer_detach(VirtIONet *n, int index) 398 { 399 NetClientState *nc = qemu_get_subqueue(n->nic, index); 400 401 if (!nc->peer) { 402 return 0; 403 } 404 405 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 406 return 0; 407 } 408 409 return tap_disable(nc->peer); 410 } 411 412 static void virtio_net_set_queues(VirtIONet *n) 413 { 414 int i; 415 int r; 416 417 for (i = 0; i < n->max_queues; i++) { 418 if (i < n->curr_queues) { 419 r = peer_attach(n, i); 420 assert(!r); 421 } else { 422 r = peer_detach(n, i); 423 assert(!r); 424 } 425 } 426 } 427 428 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); 429 430 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features) 431 { 432 VirtIONet *n = VIRTIO_NET(vdev); 433 NetClientState *nc = qemu_get_queue(n->nic); 434 435 features |= (1 << VIRTIO_NET_F_MAC); 436 437 if (!peer_has_vnet_hdr(n)) { 438 features &= ~(0x1 << VIRTIO_NET_F_CSUM); 439 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4); 440 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6); 441 features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN); 442 443 features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM); 444 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4); 445 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6); 446 features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN); 447 } 448 449 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { 450 features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO); 451 features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO); 452 } 453 454 if (!get_vhost_net(nc->peer)) { 455 return features; 456 } 457 return vhost_net_get_features(get_vhost_net(nc->peer), features); 458 } 459 460 static uint32_t virtio_net_bad_features(VirtIODevice *vdev) 461 { 462 uint32_t features = 0; 463 464 /* Linux kernel 2.6.25. It understood MAC (as everyone must), 465 * but also these: */ 466 features |= (1 << VIRTIO_NET_F_MAC); 467 features |= (1 << VIRTIO_NET_F_CSUM); 468 features |= (1 << VIRTIO_NET_F_HOST_TSO4); 469 features |= (1 << VIRTIO_NET_F_HOST_TSO6); 470 features |= (1 << VIRTIO_NET_F_HOST_ECN); 471 472 return features; 473 } 474 475 static void virtio_net_apply_guest_offloads(VirtIONet *n) 476 { 477 qemu_set_offload(qemu_get_queue(n->nic)->peer, 478 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), 479 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), 480 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), 481 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), 482 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); 483 } 484 485 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) 486 { 487 static const uint64_t guest_offloads_mask = 488 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | 489 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 490 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | 491 (1ULL << VIRTIO_NET_F_GUEST_ECN) | 492 (1ULL << VIRTIO_NET_F_GUEST_UFO); 493 494 return guest_offloads_mask & features; 495 } 496 497 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) 498 { 499 VirtIODevice *vdev = VIRTIO_DEVICE(n); 500 return virtio_net_guest_offloads_by_features(vdev->guest_features); 501 } 502 503 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features) 504 { 505 VirtIONet *n = VIRTIO_NET(vdev); 506 int i; 507 508 virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ))); 509 510 virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF))); 511 512 if (n->has_vnet_hdr) { 513 n->curr_guest_offloads = 514 virtio_net_guest_offloads_by_features(features); 515 virtio_net_apply_guest_offloads(n); 516 } 517 518 for (i = 0; i < n->max_queues; i++) { 519 NetClientState *nc = qemu_get_subqueue(n->nic, i); 520 521 if (!get_vhost_net(nc->peer)) { 522 continue; 523 } 524 vhost_net_ack_features(get_vhost_net(nc->peer), features); 525 } 526 527 if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) { 528 memset(n->vlans, 0, MAX_VLAN >> 3); 529 } else { 530 memset(n->vlans, 0xff, MAX_VLAN >> 3); 531 } 532 } 533 534 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, 535 struct iovec *iov, unsigned int iov_cnt) 536 { 537 uint8_t on; 538 size_t s; 539 NetClientState *nc = qemu_get_queue(n->nic); 540 541 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); 542 if (s != sizeof(on)) { 543 return VIRTIO_NET_ERR; 544 } 545 546 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { 547 n->promisc = on; 548 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { 549 n->allmulti = on; 550 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { 551 n->alluni = on; 552 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { 553 n->nomulti = on; 554 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { 555 n->nouni = on; 556 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { 557 n->nobcast = on; 558 } else { 559 return VIRTIO_NET_ERR; 560 } 561 562 rxfilter_notify(nc); 563 564 return VIRTIO_NET_OK; 565 } 566 567 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, 568 struct iovec *iov, unsigned int iov_cnt) 569 { 570 VirtIODevice *vdev = VIRTIO_DEVICE(n); 571 uint64_t offloads; 572 size_t s; 573 574 if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) { 575 return VIRTIO_NET_ERR; 576 } 577 578 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); 579 if (s != sizeof(offloads)) { 580 return VIRTIO_NET_ERR; 581 } 582 583 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { 584 uint64_t supported_offloads; 585 586 if (!n->has_vnet_hdr) { 587 return VIRTIO_NET_ERR; 588 } 589 590 supported_offloads = virtio_net_supported_guest_offloads(n); 591 if (offloads & ~supported_offloads) { 592 return VIRTIO_NET_ERR; 593 } 594 595 n->curr_guest_offloads = offloads; 596 virtio_net_apply_guest_offloads(n); 597 598 return VIRTIO_NET_OK; 599 } else { 600 return VIRTIO_NET_ERR; 601 } 602 } 603 604 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, 605 struct iovec *iov, unsigned int iov_cnt) 606 { 607 struct virtio_net_ctrl_mac mac_data; 608 size_t s; 609 NetClientState *nc = qemu_get_queue(n->nic); 610 611 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { 612 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { 613 return VIRTIO_NET_ERR; 614 } 615 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); 616 assert(s == sizeof(n->mac)); 617 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 618 rxfilter_notify(nc); 619 620 return VIRTIO_NET_OK; 621 } 622 623 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { 624 return VIRTIO_NET_ERR; 625 } 626 627 int in_use = 0; 628 int first_multi = 0; 629 uint8_t uni_overflow = 0; 630 uint8_t multi_overflow = 0; 631 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 632 633 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 634 sizeof(mac_data.entries)); 635 mac_data.entries = ldl_p(&mac_data.entries); 636 if (s != sizeof(mac_data.entries)) { 637 goto error; 638 } 639 iov_discard_front(&iov, &iov_cnt, s); 640 641 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { 642 goto error; 643 } 644 645 if (mac_data.entries <= MAC_TABLE_ENTRIES) { 646 s = iov_to_buf(iov, iov_cnt, 0, macs, 647 mac_data.entries * ETH_ALEN); 648 if (s != mac_data.entries * ETH_ALEN) { 649 goto error; 650 } 651 in_use += mac_data.entries; 652 } else { 653 uni_overflow = 1; 654 } 655 656 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); 657 658 first_multi = in_use; 659 660 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 661 sizeof(mac_data.entries)); 662 mac_data.entries = ldl_p(&mac_data.entries); 663 if (s != sizeof(mac_data.entries)) { 664 goto error; 665 } 666 667 iov_discard_front(&iov, &iov_cnt, s); 668 669 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { 670 goto error; 671 } 672 673 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) { 674 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN], 675 mac_data.entries * ETH_ALEN); 676 if (s != mac_data.entries * ETH_ALEN) { 677 goto error; 678 } 679 in_use += mac_data.entries; 680 } else { 681 multi_overflow = 1; 682 } 683 684 n->mac_table.in_use = in_use; 685 n->mac_table.first_multi = first_multi; 686 n->mac_table.uni_overflow = uni_overflow; 687 n->mac_table.multi_overflow = multi_overflow; 688 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN); 689 g_free(macs); 690 rxfilter_notify(nc); 691 692 return VIRTIO_NET_OK; 693 694 error: 695 g_free(macs); 696 return VIRTIO_NET_ERR; 697 } 698 699 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, 700 struct iovec *iov, unsigned int iov_cnt) 701 { 702 uint16_t vid; 703 size_t s; 704 NetClientState *nc = qemu_get_queue(n->nic); 705 706 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); 707 vid = lduw_p(&vid); 708 if (s != sizeof(vid)) { 709 return VIRTIO_NET_ERR; 710 } 711 712 if (vid >= MAX_VLAN) 713 return VIRTIO_NET_ERR; 714 715 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) 716 n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); 717 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) 718 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); 719 else 720 return VIRTIO_NET_ERR; 721 722 rxfilter_notify(nc); 723 724 return VIRTIO_NET_OK; 725 } 726 727 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd, 728 struct iovec *iov, unsigned int iov_cnt) 729 { 730 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK && 731 n->status & VIRTIO_NET_S_ANNOUNCE) { 732 n->status &= ~VIRTIO_NET_S_ANNOUNCE; 733 if (n->announce_counter) { 734 timer_mod(n->announce_timer, 735 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 736 self_announce_delay(n->announce_counter)); 737 } 738 return VIRTIO_NET_OK; 739 } else { 740 return VIRTIO_NET_ERR; 741 } 742 } 743 744 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, 745 struct iovec *iov, unsigned int iov_cnt) 746 { 747 VirtIODevice *vdev = VIRTIO_DEVICE(n); 748 struct virtio_net_ctrl_mq mq; 749 size_t s; 750 uint16_t queues; 751 752 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); 753 if (s != sizeof(mq)) { 754 return VIRTIO_NET_ERR; 755 } 756 757 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 758 return VIRTIO_NET_ERR; 759 } 760 761 queues = lduw_p(&mq.virtqueue_pairs); 762 763 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 764 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 765 queues > n->max_queues || 766 !n->multiqueue) { 767 return VIRTIO_NET_ERR; 768 } 769 770 n->curr_queues = queues; 771 /* stop the backend before changing the number of queues to avoid handling a 772 * disabled queue */ 773 virtio_net_set_status(vdev, vdev->status); 774 virtio_net_set_queues(n); 775 776 return VIRTIO_NET_OK; 777 } 778 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 779 { 780 VirtIONet *n = VIRTIO_NET(vdev); 781 struct virtio_net_ctrl_hdr ctrl; 782 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 783 VirtQueueElement elem; 784 size_t s; 785 struct iovec *iov; 786 unsigned int iov_cnt; 787 788 while (virtqueue_pop(vq, &elem)) { 789 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) || 790 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) { 791 error_report("virtio-net ctrl missing headers"); 792 exit(1); 793 } 794 795 iov = elem.out_sg; 796 iov_cnt = elem.out_num; 797 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); 798 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); 799 if (s != sizeof(ctrl)) { 800 status = VIRTIO_NET_ERR; 801 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { 802 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); 803 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { 804 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); 805 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { 806 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); 807 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { 808 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); 809 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { 810 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); 811 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { 812 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); 813 } 814 815 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status)); 816 assert(s == sizeof(status)); 817 818 virtqueue_push(vq, &elem, sizeof(status)); 819 virtio_notify(vdev, vq); 820 } 821 } 822 823 /* RX */ 824 825 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) 826 { 827 VirtIONet *n = VIRTIO_NET(vdev); 828 int queue_index = vq2q(virtio_get_queue_index(vq)); 829 830 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); 831 } 832 833 static int virtio_net_can_receive(NetClientState *nc) 834 { 835 VirtIONet *n = qemu_get_nic_opaque(nc); 836 VirtIODevice *vdev = VIRTIO_DEVICE(n); 837 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 838 839 if (!vdev->vm_running) { 840 return 0; 841 } 842 843 if (nc->queue_index >= n->curr_queues) { 844 return 0; 845 } 846 847 if (!virtio_queue_ready(q->rx_vq) || 848 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 849 return 0; 850 } 851 852 return 1; 853 } 854 855 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) 856 { 857 VirtIONet *n = q->n; 858 if (virtio_queue_empty(q->rx_vq) || 859 (n->mergeable_rx_bufs && 860 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 861 virtio_queue_set_notification(q->rx_vq, 1); 862 863 /* To avoid a race condition where the guest has made some buffers 864 * available after the above check but before notification was 865 * enabled, check for available buffers again. 866 */ 867 if (virtio_queue_empty(q->rx_vq) || 868 (n->mergeable_rx_bufs && 869 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 870 return 0; 871 } 872 } 873 874 virtio_queue_set_notification(q->rx_vq, 0); 875 return 1; 876 } 877 878 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so 879 * it never finds out that the packets don't have valid checksums. This 880 * causes dhclient to get upset. Fedora's carried a patch for ages to 881 * fix this with Xen but it hasn't appeared in an upstream release of 882 * dhclient yet. 883 * 884 * To avoid breaking existing guests, we catch udp packets and add 885 * checksums. This is terrible but it's better than hacking the guest 886 * kernels. 887 * 888 * N.B. if we introduce a zero-copy API, this operation is no longer free so 889 * we should provide a mechanism to disable it to avoid polluting the host 890 * cache. 891 */ 892 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, 893 uint8_t *buf, size_t size) 894 { 895 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ 896 (size > 27 && size < 1500) && /* normal sized MTU */ 897 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ 898 (buf[23] == 17) && /* ip.protocol == UDP */ 899 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ 900 net_checksum_calculate(buf, size); 901 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; 902 } 903 } 904 905 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, 906 const void *buf, size_t size) 907 { 908 if (n->has_vnet_hdr) { 909 /* FIXME this cast is evil */ 910 void *wbuf = (void *)buf; 911 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, 912 size - n->host_hdr_len); 913 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); 914 } else { 915 struct virtio_net_hdr hdr = { 916 .flags = 0, 917 .gso_type = VIRTIO_NET_HDR_GSO_NONE 918 }; 919 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); 920 } 921 } 922 923 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) 924 { 925 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 926 static const uint8_t vlan[] = {0x81, 0x00}; 927 uint8_t *ptr = (uint8_t *)buf; 928 int i; 929 930 if (n->promisc) 931 return 1; 932 933 ptr += n->host_hdr_len; 934 935 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { 936 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff; 937 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) 938 return 0; 939 } 940 941 if (ptr[0] & 1) { // multicast 942 if (!memcmp(ptr, bcast, sizeof(bcast))) { 943 return !n->nobcast; 944 } else if (n->nomulti) { 945 return 0; 946 } else if (n->allmulti || n->mac_table.multi_overflow) { 947 return 1; 948 } 949 950 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 951 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 952 return 1; 953 } 954 } 955 } else { // unicast 956 if (n->nouni) { 957 return 0; 958 } else if (n->alluni || n->mac_table.uni_overflow) { 959 return 1; 960 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { 961 return 1; 962 } 963 964 for (i = 0; i < n->mac_table.first_multi; i++) { 965 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 966 return 1; 967 } 968 } 969 } 970 971 return 0; 972 } 973 974 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size) 975 { 976 VirtIONet *n = qemu_get_nic_opaque(nc); 977 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 978 VirtIODevice *vdev = VIRTIO_DEVICE(n); 979 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; 980 struct virtio_net_hdr_mrg_rxbuf mhdr; 981 unsigned mhdr_cnt = 0; 982 size_t offset, i, guest_offset; 983 984 if (!virtio_net_can_receive(nc)) { 985 return -1; 986 } 987 988 /* hdr_len refers to the header we supply to the guest */ 989 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { 990 return 0; 991 } 992 993 if (!receive_filter(n, buf, size)) 994 return size; 995 996 offset = i = 0; 997 998 while (offset < size) { 999 VirtQueueElement elem; 1000 int len, total; 1001 const struct iovec *sg = elem.in_sg; 1002 1003 total = 0; 1004 1005 if (virtqueue_pop(q->rx_vq, &elem) == 0) { 1006 if (i == 0) 1007 return -1; 1008 error_report("virtio-net unexpected empty queue: " 1009 "i %zd mergeable %d offset %zd, size %zd, " 1010 "guest hdr len %zd, host hdr len %zd guest features 0x%x", 1011 i, n->mergeable_rx_bufs, offset, size, 1012 n->guest_hdr_len, n->host_hdr_len, vdev->guest_features); 1013 exit(1); 1014 } 1015 1016 if (elem.in_num < 1) { 1017 error_report("virtio-net receive queue contains no in buffers"); 1018 exit(1); 1019 } 1020 1021 if (i == 0) { 1022 assert(offset == 0); 1023 if (n->mergeable_rx_bufs) { 1024 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), 1025 sg, elem.in_num, 1026 offsetof(typeof(mhdr), num_buffers), 1027 sizeof(mhdr.num_buffers)); 1028 } 1029 1030 receive_header(n, sg, elem.in_num, buf, size); 1031 offset = n->host_hdr_len; 1032 total += n->guest_hdr_len; 1033 guest_offset = n->guest_hdr_len; 1034 } else { 1035 guest_offset = 0; 1036 } 1037 1038 /* copy in packet. ugh */ 1039 len = iov_from_buf(sg, elem.in_num, guest_offset, 1040 buf + offset, size - offset); 1041 total += len; 1042 offset += len; 1043 /* If buffers can't be merged, at this point we 1044 * must have consumed the complete packet. 1045 * Otherwise, drop it. */ 1046 if (!n->mergeable_rx_bufs && offset < size) { 1047 #if 0 1048 error_report("virtio-net truncated non-mergeable packet: " 1049 "i %zd mergeable %d offset %zd, size %zd, " 1050 "guest hdr len %zd, host hdr len %zd", 1051 i, n->mergeable_rx_bufs, 1052 offset, size, n->guest_hdr_len, n->host_hdr_len); 1053 #endif 1054 return size; 1055 } 1056 1057 /* signal other side */ 1058 virtqueue_fill(q->rx_vq, &elem, total, i++); 1059 } 1060 1061 if (mhdr_cnt) { 1062 stw_p(&mhdr.num_buffers, i); 1063 iov_from_buf(mhdr_sg, mhdr_cnt, 1064 0, 1065 &mhdr.num_buffers, sizeof mhdr.num_buffers); 1066 } 1067 1068 virtqueue_flush(q->rx_vq, i); 1069 virtio_notify(vdev, q->rx_vq); 1070 1071 return size; 1072 } 1073 1074 static int32_t virtio_net_flush_tx(VirtIONetQueue *q); 1075 1076 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) 1077 { 1078 VirtIONet *n = qemu_get_nic_opaque(nc); 1079 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1080 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1081 1082 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0); 1083 virtio_notify(vdev, q->tx_vq); 1084 1085 q->async_tx.elem.out_num = q->async_tx.len = 0; 1086 1087 virtio_queue_set_notification(q->tx_vq, 1); 1088 virtio_net_flush_tx(q); 1089 } 1090 1091 /* TX */ 1092 static int32_t virtio_net_flush_tx(VirtIONetQueue *q) 1093 { 1094 VirtIONet *n = q->n; 1095 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1096 VirtQueueElement elem; 1097 int32_t num_packets = 0; 1098 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); 1099 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1100 return num_packets; 1101 } 1102 1103 assert(vdev->vm_running); 1104 1105 if (q->async_tx.elem.out_num) { 1106 virtio_queue_set_notification(q->tx_vq, 0); 1107 return num_packets; 1108 } 1109 1110 while (virtqueue_pop(q->tx_vq, &elem)) { 1111 ssize_t ret, len; 1112 unsigned int out_num = elem.out_num; 1113 struct iovec *out_sg = &elem.out_sg[0]; 1114 struct iovec sg[VIRTQUEUE_MAX_SIZE]; 1115 1116 if (out_num < 1) { 1117 error_report("virtio-net header not in first element"); 1118 exit(1); 1119 } 1120 1121 /* 1122 * If host wants to see the guest header as is, we can 1123 * pass it on unchanged. Otherwise, copy just the parts 1124 * that host is interested in. 1125 */ 1126 assert(n->host_hdr_len <= n->guest_hdr_len); 1127 if (n->host_hdr_len != n->guest_hdr_len) { 1128 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), 1129 out_sg, out_num, 1130 0, n->host_hdr_len); 1131 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, 1132 out_sg, out_num, 1133 n->guest_hdr_len, -1); 1134 out_num = sg_num; 1135 out_sg = sg; 1136 } 1137 1138 len = n->guest_hdr_len; 1139 1140 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), 1141 out_sg, out_num, virtio_net_tx_complete); 1142 if (ret == 0) { 1143 virtio_queue_set_notification(q->tx_vq, 0); 1144 q->async_tx.elem = elem; 1145 q->async_tx.len = len; 1146 return -EBUSY; 1147 } 1148 1149 len += ret; 1150 1151 virtqueue_push(q->tx_vq, &elem, 0); 1152 virtio_notify(vdev, q->tx_vq); 1153 1154 if (++num_packets >= n->tx_burst) { 1155 break; 1156 } 1157 } 1158 return num_packets; 1159 } 1160 1161 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) 1162 { 1163 VirtIONet *n = VIRTIO_NET(vdev); 1164 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1165 1166 /* This happens when device was stopped but VCPU wasn't. */ 1167 if (!vdev->vm_running) { 1168 q->tx_waiting = 1; 1169 return; 1170 } 1171 1172 if (q->tx_waiting) { 1173 virtio_queue_set_notification(vq, 1); 1174 timer_del(q->tx_timer); 1175 q->tx_waiting = 0; 1176 virtio_net_flush_tx(q); 1177 } else { 1178 timer_mod(q->tx_timer, 1179 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 1180 q->tx_waiting = 1; 1181 virtio_queue_set_notification(vq, 0); 1182 } 1183 } 1184 1185 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) 1186 { 1187 VirtIONet *n = VIRTIO_NET(vdev); 1188 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1189 1190 if (unlikely(q->tx_waiting)) { 1191 return; 1192 } 1193 q->tx_waiting = 1; 1194 /* This happens when device was stopped but VCPU wasn't. */ 1195 if (!vdev->vm_running) { 1196 return; 1197 } 1198 virtio_queue_set_notification(vq, 0); 1199 qemu_bh_schedule(q->tx_bh); 1200 } 1201 1202 static void virtio_net_tx_timer(void *opaque) 1203 { 1204 VirtIONetQueue *q = opaque; 1205 VirtIONet *n = q->n; 1206 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1207 assert(vdev->vm_running); 1208 1209 q->tx_waiting = 0; 1210 1211 /* Just in case the driver is not ready on more */ 1212 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1213 return; 1214 } 1215 1216 virtio_queue_set_notification(q->tx_vq, 1); 1217 virtio_net_flush_tx(q); 1218 } 1219 1220 static void virtio_net_tx_bh(void *opaque) 1221 { 1222 VirtIONetQueue *q = opaque; 1223 VirtIONet *n = q->n; 1224 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1225 int32_t ret; 1226 1227 assert(vdev->vm_running); 1228 1229 q->tx_waiting = 0; 1230 1231 /* Just in case the driver is not ready on more */ 1232 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { 1233 return; 1234 } 1235 1236 ret = virtio_net_flush_tx(q); 1237 if (ret == -EBUSY) { 1238 return; /* Notification re-enable handled by tx_complete */ 1239 } 1240 1241 /* If we flush a full burst of packets, assume there are 1242 * more coming and immediately reschedule */ 1243 if (ret >= n->tx_burst) { 1244 qemu_bh_schedule(q->tx_bh); 1245 q->tx_waiting = 1; 1246 return; 1247 } 1248 1249 /* If less than a full burst, re-enable notification and flush 1250 * anything that may have come in while we weren't looking. If 1251 * we find something, assume the guest is still active and reschedule */ 1252 virtio_queue_set_notification(q->tx_vq, 1); 1253 if (virtio_net_flush_tx(q) > 0) { 1254 virtio_queue_set_notification(q->tx_vq, 0); 1255 qemu_bh_schedule(q->tx_bh); 1256 q->tx_waiting = 1; 1257 } 1258 } 1259 1260 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) 1261 { 1262 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1263 int i, max = multiqueue ? n->max_queues : 1; 1264 1265 n->multiqueue = multiqueue; 1266 1267 for (i = 2; i <= n->max_queues * 2 + 1; i++) { 1268 virtio_del_queue(vdev, i); 1269 } 1270 1271 for (i = 1; i < max; i++) { 1272 n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); 1273 if (n->vqs[i].tx_timer) { 1274 n->vqs[i].tx_vq = 1275 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); 1276 n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1277 virtio_net_tx_timer, 1278 &n->vqs[i]); 1279 } else { 1280 n->vqs[i].tx_vq = 1281 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); 1282 n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]); 1283 } 1284 1285 n->vqs[i].tx_waiting = 0; 1286 n->vqs[i].n = n; 1287 } 1288 1289 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack 1290 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid 1291 * breaking them. 1292 */ 1293 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1294 1295 virtio_net_set_queues(n); 1296 } 1297 1298 static void virtio_net_save(QEMUFile *f, void *opaque) 1299 { 1300 int i; 1301 VirtIONet *n = opaque; 1302 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1303 1304 /* At this point, backend must be stopped, otherwise 1305 * it might keep writing to memory. */ 1306 assert(!n->vhost_started); 1307 virtio_save(vdev, f); 1308 1309 qemu_put_buffer(f, n->mac, ETH_ALEN); 1310 qemu_put_be32(f, n->vqs[0].tx_waiting); 1311 qemu_put_be32(f, n->mergeable_rx_bufs); 1312 qemu_put_be16(f, n->status); 1313 qemu_put_byte(f, n->promisc); 1314 qemu_put_byte(f, n->allmulti); 1315 qemu_put_be32(f, n->mac_table.in_use); 1316 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); 1317 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1318 qemu_put_be32(f, n->has_vnet_hdr); 1319 qemu_put_byte(f, n->mac_table.multi_overflow); 1320 qemu_put_byte(f, n->mac_table.uni_overflow); 1321 qemu_put_byte(f, n->alluni); 1322 qemu_put_byte(f, n->nomulti); 1323 qemu_put_byte(f, n->nouni); 1324 qemu_put_byte(f, n->nobcast); 1325 qemu_put_byte(f, n->has_ufo); 1326 if (n->max_queues > 1) { 1327 qemu_put_be16(f, n->max_queues); 1328 qemu_put_be16(f, n->curr_queues); 1329 for (i = 1; i < n->curr_queues; i++) { 1330 qemu_put_be32(f, n->vqs[i].tx_waiting); 1331 } 1332 } 1333 1334 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) { 1335 qemu_put_be64(f, n->curr_guest_offloads); 1336 } 1337 } 1338 1339 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) 1340 { 1341 VirtIONet *n = opaque; 1342 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1343 int ret, i, link_down; 1344 1345 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) 1346 return -EINVAL; 1347 1348 ret = virtio_load(vdev, f); 1349 if (ret) { 1350 return ret; 1351 } 1352 1353 qemu_get_buffer(f, n->mac, ETH_ALEN); 1354 n->vqs[0].tx_waiting = qemu_get_be32(f); 1355 1356 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f)); 1357 1358 if (version_id >= 3) 1359 n->status = qemu_get_be16(f); 1360 1361 if (version_id >= 4) { 1362 if (version_id < 8) { 1363 n->promisc = qemu_get_be32(f); 1364 n->allmulti = qemu_get_be32(f); 1365 } else { 1366 n->promisc = qemu_get_byte(f); 1367 n->allmulti = qemu_get_byte(f); 1368 } 1369 } 1370 1371 if (version_id >= 5) { 1372 n->mac_table.in_use = qemu_get_be32(f); 1373 /* MAC_TABLE_ENTRIES may be different from the saved image */ 1374 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) { 1375 qemu_get_buffer(f, n->mac_table.macs, 1376 n->mac_table.in_use * ETH_ALEN); 1377 } else { 1378 int64_t i; 1379 1380 /* Overflow detected - can happen if source has a larger MAC table. 1381 * We simply set overflow flag so there's no need to maintain the 1382 * table of addresses, discard them all. 1383 * Note: 64 bit math to avoid integer overflow. 1384 */ 1385 for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) { 1386 qemu_get_byte(f); 1387 } 1388 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1; 1389 n->mac_table.in_use = 0; 1390 } 1391 } 1392 1393 if (version_id >= 6) 1394 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1395 1396 if (version_id >= 7) { 1397 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) { 1398 error_report("virtio-net: saved image requires vnet_hdr=on"); 1399 return -1; 1400 } 1401 } 1402 1403 if (version_id >= 9) { 1404 n->mac_table.multi_overflow = qemu_get_byte(f); 1405 n->mac_table.uni_overflow = qemu_get_byte(f); 1406 } 1407 1408 if (version_id >= 10) { 1409 n->alluni = qemu_get_byte(f); 1410 n->nomulti = qemu_get_byte(f); 1411 n->nouni = qemu_get_byte(f); 1412 n->nobcast = qemu_get_byte(f); 1413 } 1414 1415 if (version_id >= 11) { 1416 if (qemu_get_byte(f) && !peer_has_ufo(n)) { 1417 error_report("virtio-net: saved image requires TUN_F_UFO support"); 1418 return -1; 1419 } 1420 } 1421 1422 if (n->max_queues > 1) { 1423 if (n->max_queues != qemu_get_be16(f)) { 1424 error_report("virtio-net: different max_queues "); 1425 return -1; 1426 } 1427 1428 n->curr_queues = qemu_get_be16(f); 1429 if (n->curr_queues > n->max_queues) { 1430 error_report("virtio-net: curr_queues %x > max_queues %x", 1431 n->curr_queues, n->max_queues); 1432 return -1; 1433 } 1434 for (i = 1; i < n->curr_queues; i++) { 1435 n->vqs[i].tx_waiting = qemu_get_be32(f); 1436 } 1437 } 1438 1439 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) { 1440 n->curr_guest_offloads = qemu_get_be64(f); 1441 } else { 1442 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); 1443 } 1444 1445 if (peer_has_vnet_hdr(n)) { 1446 virtio_net_apply_guest_offloads(n); 1447 } 1448 1449 virtio_net_set_queues(n); 1450 1451 /* Find the first multicast entry in the saved MAC filter */ 1452 for (i = 0; i < n->mac_table.in_use; i++) { 1453 if (n->mac_table.macs[i * ETH_ALEN] & 1) { 1454 break; 1455 } 1456 } 1457 n->mac_table.first_multi = i; 1458 1459 /* nc.link_down can't be migrated, so infer link_down according 1460 * to link status bit in n->status */ 1461 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; 1462 for (i = 0; i < n->max_queues; i++) { 1463 qemu_get_subqueue(n->nic, i)->link_down = link_down; 1464 } 1465 1466 if (vdev->guest_features & (0x1 << VIRTIO_NET_F_GUEST_ANNOUNCE) && 1467 vdev->guest_features & (0x1 << VIRTIO_NET_F_CTRL_VQ)) { 1468 n->announce_counter = SELF_ANNOUNCE_ROUNDS; 1469 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL)); 1470 } 1471 1472 return 0; 1473 } 1474 1475 static void virtio_net_cleanup(NetClientState *nc) 1476 { 1477 VirtIONet *n = qemu_get_nic_opaque(nc); 1478 1479 n->nic = NULL; 1480 } 1481 1482 static NetClientInfo net_virtio_info = { 1483 .type = NET_CLIENT_OPTIONS_KIND_NIC, 1484 .size = sizeof(NICState), 1485 .can_receive = virtio_net_can_receive, 1486 .receive = virtio_net_receive, 1487 .cleanup = virtio_net_cleanup, 1488 .link_status_changed = virtio_net_set_link_status, 1489 .query_rx_filter = virtio_net_query_rxfilter, 1490 }; 1491 1492 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) 1493 { 1494 VirtIONet *n = VIRTIO_NET(vdev); 1495 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1496 assert(n->vhost_started); 1497 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); 1498 } 1499 1500 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, 1501 bool mask) 1502 { 1503 VirtIONet *n = VIRTIO_NET(vdev); 1504 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1505 assert(n->vhost_started); 1506 vhost_net_virtqueue_mask(get_vhost_net(nc->peer), 1507 vdev, idx, mask); 1508 } 1509 1510 void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features) 1511 { 1512 int i, config_size = 0; 1513 host_features |= (1 << VIRTIO_NET_F_MAC); 1514 for (i = 0; feature_sizes[i].flags != 0; i++) { 1515 if (host_features & feature_sizes[i].flags) { 1516 config_size = MAX(feature_sizes[i].end, config_size); 1517 } 1518 } 1519 n->config_size = config_size; 1520 } 1521 1522 void virtio_net_set_netclient_name(VirtIONet *n, const char *name, 1523 const char *type) 1524 { 1525 /* 1526 * The name can be NULL, the netclient name will be type.x. 1527 */ 1528 assert(type != NULL); 1529 1530 g_free(n->netclient_name); 1531 g_free(n->netclient_type); 1532 n->netclient_name = g_strdup(name); 1533 n->netclient_type = g_strdup(type); 1534 } 1535 1536 static void virtio_net_device_realize(DeviceState *dev, Error **errp) 1537 { 1538 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1539 VirtIONet *n = VIRTIO_NET(dev); 1540 NetClientState *nc; 1541 int i; 1542 1543 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); 1544 1545 n->max_queues = MAX(n->nic_conf.queues, 1); 1546 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); 1547 n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); 1548 n->curr_queues = 1; 1549 n->vqs[0].n = n; 1550 n->tx_timeout = n->net_conf.txtimer; 1551 1552 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") 1553 && strcmp(n->net_conf.tx, "bh")) { 1554 error_report("virtio-net: " 1555 "Unknown option tx=%s, valid options: \"timer\" \"bh\"", 1556 n->net_conf.tx); 1557 error_report("Defaulting to \"bh\""); 1558 } 1559 1560 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { 1561 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256, 1562 virtio_net_handle_tx_timer); 1563 n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer, 1564 &n->vqs[0]); 1565 } else { 1566 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256, 1567 virtio_net_handle_tx_bh); 1568 n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]); 1569 } 1570 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1571 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); 1572 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); 1573 n->status = VIRTIO_NET_S_LINK_UP; 1574 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 1575 virtio_net_announce_timer, n); 1576 1577 if (n->netclient_type) { 1578 /* 1579 * Happen when virtio_net_set_netclient_name has been called. 1580 */ 1581 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1582 n->netclient_type, n->netclient_name, n); 1583 } else { 1584 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1585 object_get_typename(OBJECT(dev)), dev->id, n); 1586 } 1587 1588 peer_test_vnet_hdr(n); 1589 if (peer_has_vnet_hdr(n)) { 1590 for (i = 0; i < n->max_queues; i++) { 1591 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); 1592 } 1593 n->host_hdr_len = sizeof(struct virtio_net_hdr); 1594 } else { 1595 n->host_hdr_len = 0; 1596 } 1597 1598 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); 1599 1600 n->vqs[0].tx_waiting = 0; 1601 n->tx_burst = n->net_conf.txburst; 1602 virtio_net_set_mrg_rx_bufs(n, 0); 1603 n->promisc = 1; /* for compatibility */ 1604 1605 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 1606 1607 n->vlans = g_malloc0(MAX_VLAN >> 3); 1608 1609 nc = qemu_get_queue(n->nic); 1610 nc->rxfilter_notify_enabled = 1; 1611 1612 n->qdev = dev; 1613 register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION, 1614 virtio_net_save, virtio_net_load, n); 1615 1616 add_boot_device_path(n->nic_conf.bootindex, dev, "/ethernet-phy@0"); 1617 } 1618 1619 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) 1620 { 1621 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1622 VirtIONet *n = VIRTIO_NET(dev); 1623 int i; 1624 1625 /* This will stop vhost backend if appropriate. */ 1626 virtio_net_set_status(vdev, 0); 1627 1628 unregister_savevm(dev, "virtio-net", n); 1629 1630 g_free(n->netclient_name); 1631 n->netclient_name = NULL; 1632 g_free(n->netclient_type); 1633 n->netclient_type = NULL; 1634 1635 g_free(n->mac_table.macs); 1636 g_free(n->vlans); 1637 1638 for (i = 0; i < n->max_queues; i++) { 1639 VirtIONetQueue *q = &n->vqs[i]; 1640 NetClientState *nc = qemu_get_subqueue(n->nic, i); 1641 1642 qemu_purge_queued_packets(nc); 1643 1644 if (q->tx_timer) { 1645 timer_del(q->tx_timer); 1646 timer_free(q->tx_timer); 1647 } else if (q->tx_bh) { 1648 qemu_bh_delete(q->tx_bh); 1649 } 1650 } 1651 1652 timer_del(n->announce_timer); 1653 timer_free(n->announce_timer); 1654 g_free(n->vqs); 1655 qemu_del_nic(n->nic); 1656 virtio_cleanup(vdev); 1657 } 1658 1659 static void virtio_net_instance_init(Object *obj) 1660 { 1661 VirtIONet *n = VIRTIO_NET(obj); 1662 1663 /* 1664 * The default config_size is sizeof(struct virtio_net_config). 1665 * Can be overriden with virtio_net_set_config_size. 1666 */ 1667 n->config_size = sizeof(struct virtio_net_config); 1668 } 1669 1670 static Property virtio_net_properties[] = { 1671 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), 1672 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, 1673 TX_TIMER_INTERVAL), 1674 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), 1675 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), 1676 DEFINE_PROP_END_OF_LIST(), 1677 }; 1678 1679 static void virtio_net_class_init(ObjectClass *klass, void *data) 1680 { 1681 DeviceClass *dc = DEVICE_CLASS(klass); 1682 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1683 1684 dc->props = virtio_net_properties; 1685 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 1686 vdc->realize = virtio_net_device_realize; 1687 vdc->unrealize = virtio_net_device_unrealize; 1688 vdc->get_config = virtio_net_get_config; 1689 vdc->set_config = virtio_net_set_config; 1690 vdc->get_features = virtio_net_get_features; 1691 vdc->set_features = virtio_net_set_features; 1692 vdc->bad_features = virtio_net_bad_features; 1693 vdc->reset = virtio_net_reset; 1694 vdc->set_status = virtio_net_set_status; 1695 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; 1696 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; 1697 } 1698 1699 static const TypeInfo virtio_net_info = { 1700 .name = TYPE_VIRTIO_NET, 1701 .parent = TYPE_VIRTIO_DEVICE, 1702 .instance_size = sizeof(VirtIONet), 1703 .instance_init = virtio_net_instance_init, 1704 .class_init = virtio_net_class_init, 1705 }; 1706 1707 static void virtio_register_types(void) 1708 { 1709 type_register_static(&virtio_net_info); 1710 } 1711 1712 type_init(virtio_register_types) 1713