1 /* 2 * Virtio Network Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/iov.h" 15 #include "hw/virtio/virtio.h" 16 #include "net/net.h" 17 #include "net/checksum.h" 18 #include "net/tap.h" 19 #include "qemu/error-report.h" 20 #include "qemu/timer.h" 21 #include "hw/virtio/virtio-net.h" 22 #include "net/vhost_net.h" 23 #include "hw/virtio/virtio-bus.h" 24 #include "qapi/qmp/qjson.h" 25 #include "monitor/monitor.h" 26 27 #define VIRTIO_NET_VM_VERSION 11 28 29 #define MAC_TABLE_ENTRIES 64 30 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ 31 32 /* 33 * Calculate the number of bytes up to and including the given 'field' of 34 * 'container'. 35 */ 36 #define endof(container, field) \ 37 (offsetof(container, field) + sizeof(((container *)0)->field)) 38 39 typedef struct VirtIOFeature { 40 uint32_t flags; 41 size_t end; 42 } VirtIOFeature; 43 44 static VirtIOFeature feature_sizes[] = { 45 {.flags = 1 << VIRTIO_NET_F_MAC, 46 .end = endof(struct virtio_net_config, mac)}, 47 {.flags = 1 << VIRTIO_NET_F_STATUS, 48 .end = endof(struct virtio_net_config, status)}, 49 {.flags = 1 << VIRTIO_NET_F_MQ, 50 .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, 51 {} 52 }; 53 54 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) 55 { 56 VirtIONet *n = qemu_get_nic_opaque(nc); 57 58 return &n->vqs[nc->queue_index]; 59 } 60 61 static int vq2q(int queue_index) 62 { 63 return queue_index / 2; 64 } 65 66 /* TODO 67 * - we could suppress RX interrupt if we were so inclined. 68 */ 69 70 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) 71 { 72 VirtIONet *n = VIRTIO_NET(vdev); 73 struct virtio_net_config netcfg; 74 75 stw_p(&netcfg.status, n->status); 76 stw_p(&netcfg.max_virtqueue_pairs, n->max_queues); 77 memcpy(netcfg.mac, n->mac, ETH_ALEN); 78 memcpy(config, &netcfg, n->config_size); 79 } 80 81 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) 82 { 83 VirtIONet *n = VIRTIO_NET(vdev); 84 struct virtio_net_config netcfg = {}; 85 86 memcpy(&netcfg, config, n->config_size); 87 88 if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) && 89 memcmp(netcfg.mac, n->mac, ETH_ALEN)) { 90 memcpy(n->mac, netcfg.mac, ETH_ALEN); 91 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 92 } 93 } 94 95 static bool virtio_net_started(VirtIONet *n, uint8_t status) 96 { 97 VirtIODevice *vdev = VIRTIO_DEVICE(n); 98 return (status & VIRTIO_CONFIG_S_DRIVER_OK) && 99 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; 100 } 101 102 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) 103 { 104 VirtIODevice *vdev = VIRTIO_DEVICE(n); 105 NetClientState *nc = qemu_get_queue(n->nic); 106 int queues = n->multiqueue ? n->max_queues : 1; 107 108 if (!nc->peer) { 109 return; 110 } 111 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 112 return; 113 } 114 115 if (!tap_get_vhost_net(nc->peer)) { 116 return; 117 } 118 119 if (!!n->vhost_started == 120 (virtio_net_started(n, status) && !nc->peer->link_down)) { 121 return; 122 } 123 if (!n->vhost_started) { 124 int r; 125 if (!vhost_net_query(tap_get_vhost_net(nc->peer), vdev)) { 126 return; 127 } 128 n->vhost_started = 1; 129 r = vhost_net_start(vdev, n->nic->ncs, queues); 130 if (r < 0) { 131 error_report("unable to start vhost net: %d: " 132 "falling back on userspace virtio", -r); 133 n->vhost_started = 0; 134 } 135 } else { 136 vhost_net_stop(vdev, n->nic->ncs, queues); 137 n->vhost_started = 0; 138 } 139 } 140 141 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) 142 { 143 VirtIONet *n = VIRTIO_NET(vdev); 144 VirtIONetQueue *q; 145 int i; 146 uint8_t queue_status; 147 148 virtio_net_vhost_status(n, status); 149 150 for (i = 0; i < n->max_queues; i++) { 151 q = &n->vqs[i]; 152 153 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { 154 queue_status = 0; 155 } else { 156 queue_status = status; 157 } 158 159 if (!q->tx_waiting) { 160 continue; 161 } 162 163 if (virtio_net_started(n, queue_status) && !n->vhost_started) { 164 if (q->tx_timer) { 165 timer_mod(q->tx_timer, 166 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 167 } else { 168 qemu_bh_schedule(q->tx_bh); 169 } 170 } else { 171 if (q->tx_timer) { 172 timer_del(q->tx_timer); 173 } else { 174 qemu_bh_cancel(q->tx_bh); 175 } 176 } 177 } 178 } 179 180 static void virtio_net_set_link_status(NetClientState *nc) 181 { 182 VirtIONet *n = qemu_get_nic_opaque(nc); 183 VirtIODevice *vdev = VIRTIO_DEVICE(n); 184 uint16_t old_status = n->status; 185 186 if (nc->link_down) 187 n->status &= ~VIRTIO_NET_S_LINK_UP; 188 else 189 n->status |= VIRTIO_NET_S_LINK_UP; 190 191 if (n->status != old_status) 192 virtio_notify_config(vdev); 193 194 virtio_net_set_status(vdev, vdev->status); 195 } 196 197 static void rxfilter_notify(NetClientState *nc) 198 { 199 QObject *event_data; 200 VirtIONet *n = qemu_get_nic_opaque(nc); 201 202 if (nc->rxfilter_notify_enabled) { 203 gchar *path = object_get_canonical_path(OBJECT(n->qdev)); 204 if (n->netclient_name) { 205 event_data = qobject_from_jsonf("{ 'name': %s, 'path': %s }", 206 n->netclient_name, path); 207 } else { 208 event_data = qobject_from_jsonf("{ 'path': %s }", path); 209 } 210 monitor_protocol_event(QEVENT_NIC_RX_FILTER_CHANGED, event_data); 211 qobject_decref(event_data); 212 g_free(path); 213 214 /* disable event notification to avoid events flooding */ 215 nc->rxfilter_notify_enabled = 0; 216 } 217 } 218 219 static char *mac_strdup_printf(const uint8_t *mac) 220 { 221 return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac[0], 222 mac[1], mac[2], mac[3], mac[4], mac[5]); 223 } 224 225 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) 226 { 227 VirtIONet *n = qemu_get_nic_opaque(nc); 228 RxFilterInfo *info; 229 strList *str_list, *entry; 230 intList *int_list, *int_entry; 231 int i, j; 232 233 info = g_malloc0(sizeof(*info)); 234 info->name = g_strdup(nc->name); 235 info->promiscuous = n->promisc; 236 237 if (n->nouni) { 238 info->unicast = RX_STATE_NONE; 239 } else if (n->alluni) { 240 info->unicast = RX_STATE_ALL; 241 } else { 242 info->unicast = RX_STATE_NORMAL; 243 } 244 245 if (n->nomulti) { 246 info->multicast = RX_STATE_NONE; 247 } else if (n->allmulti) { 248 info->multicast = RX_STATE_ALL; 249 } else { 250 info->multicast = RX_STATE_NORMAL; 251 } 252 253 info->broadcast_allowed = n->nobcast; 254 info->multicast_overflow = n->mac_table.multi_overflow; 255 info->unicast_overflow = n->mac_table.uni_overflow; 256 257 info->main_mac = mac_strdup_printf(n->mac); 258 259 str_list = NULL; 260 for (i = 0; i < n->mac_table.first_multi; i++) { 261 entry = g_malloc0(sizeof(*entry)); 262 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 263 entry->next = str_list; 264 str_list = entry; 265 } 266 info->unicast_table = str_list; 267 268 str_list = NULL; 269 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 270 entry = g_malloc0(sizeof(*entry)); 271 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN); 272 entry->next = str_list; 273 str_list = entry; 274 } 275 info->multicast_table = str_list; 276 277 int_list = NULL; 278 for (i = 0; i < MAX_VLAN >> 5; i++) { 279 for (j = 0; n->vlans[i] && j < 0x1f; j++) { 280 if (n->vlans[i] & (1U << j)) { 281 int_entry = g_malloc0(sizeof(*int_entry)); 282 int_entry->value = (i << 5) + j; 283 int_entry->next = int_list; 284 int_list = int_entry; 285 } 286 } 287 } 288 info->vlan_table = int_list; 289 290 /* enable event notification after query */ 291 nc->rxfilter_notify_enabled = 1; 292 293 return info; 294 } 295 296 static void virtio_net_reset(VirtIODevice *vdev) 297 { 298 VirtIONet *n = VIRTIO_NET(vdev); 299 300 /* Reset back to compatibility mode */ 301 n->promisc = 1; 302 n->allmulti = 0; 303 n->alluni = 0; 304 n->nomulti = 0; 305 n->nouni = 0; 306 n->nobcast = 0; 307 /* multiqueue is disabled by default */ 308 n->curr_queues = 1; 309 310 /* Flush any MAC and VLAN filter table state */ 311 n->mac_table.in_use = 0; 312 n->mac_table.first_multi = 0; 313 n->mac_table.multi_overflow = 0; 314 n->mac_table.uni_overflow = 0; 315 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 316 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); 317 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 318 memset(n->vlans, 0, MAX_VLAN >> 3); 319 } 320 321 static void peer_test_vnet_hdr(VirtIONet *n) 322 { 323 NetClientState *nc = qemu_get_queue(n->nic); 324 if (!nc->peer) { 325 return; 326 } 327 328 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 329 return; 330 } 331 332 n->has_vnet_hdr = tap_has_vnet_hdr(nc->peer); 333 } 334 335 static int peer_has_vnet_hdr(VirtIONet *n) 336 { 337 return n->has_vnet_hdr; 338 } 339 340 static int peer_has_ufo(VirtIONet *n) 341 { 342 if (!peer_has_vnet_hdr(n)) 343 return 0; 344 345 n->has_ufo = tap_has_ufo(qemu_get_queue(n->nic)->peer); 346 347 return n->has_ufo; 348 } 349 350 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs) 351 { 352 int i; 353 NetClientState *nc; 354 355 n->mergeable_rx_bufs = mergeable_rx_bufs; 356 357 n->guest_hdr_len = n->mergeable_rx_bufs ? 358 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); 359 360 for (i = 0; i < n->max_queues; i++) { 361 nc = qemu_get_subqueue(n->nic, i); 362 363 if (peer_has_vnet_hdr(n) && 364 tap_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { 365 tap_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); 366 n->host_hdr_len = n->guest_hdr_len; 367 } 368 } 369 } 370 371 static int peer_attach(VirtIONet *n, int index) 372 { 373 NetClientState *nc = qemu_get_subqueue(n->nic, index); 374 375 if (!nc->peer) { 376 return 0; 377 } 378 379 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 380 return 0; 381 } 382 383 return tap_enable(nc->peer); 384 } 385 386 static int peer_detach(VirtIONet *n, int index) 387 { 388 NetClientState *nc = qemu_get_subqueue(n->nic, index); 389 390 if (!nc->peer) { 391 return 0; 392 } 393 394 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 395 return 0; 396 } 397 398 return tap_disable(nc->peer); 399 } 400 401 static void virtio_net_set_queues(VirtIONet *n) 402 { 403 int i; 404 405 for (i = 0; i < n->max_queues; i++) { 406 if (i < n->curr_queues) { 407 assert(!peer_attach(n, i)); 408 } else { 409 assert(!peer_detach(n, i)); 410 } 411 } 412 } 413 414 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); 415 416 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features) 417 { 418 VirtIONet *n = VIRTIO_NET(vdev); 419 NetClientState *nc = qemu_get_queue(n->nic); 420 421 features |= (1 << VIRTIO_NET_F_MAC); 422 423 if (!peer_has_vnet_hdr(n)) { 424 features &= ~(0x1 << VIRTIO_NET_F_CSUM); 425 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4); 426 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6); 427 features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN); 428 429 features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM); 430 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4); 431 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6); 432 features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN); 433 } 434 435 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { 436 features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO); 437 features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO); 438 } 439 440 if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 441 return features; 442 } 443 if (!tap_get_vhost_net(nc->peer)) { 444 return features; 445 } 446 return vhost_net_get_features(tap_get_vhost_net(nc->peer), features); 447 } 448 449 static uint32_t virtio_net_bad_features(VirtIODevice *vdev) 450 { 451 uint32_t features = 0; 452 453 /* Linux kernel 2.6.25. It understood MAC (as everyone must), 454 * but also these: */ 455 features |= (1 << VIRTIO_NET_F_MAC); 456 features |= (1 << VIRTIO_NET_F_CSUM); 457 features |= (1 << VIRTIO_NET_F_HOST_TSO4); 458 features |= (1 << VIRTIO_NET_F_HOST_TSO6); 459 features |= (1 << VIRTIO_NET_F_HOST_ECN); 460 461 return features; 462 } 463 464 static void virtio_net_apply_guest_offloads(VirtIONet *n) 465 { 466 tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer, 467 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), 468 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), 469 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), 470 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), 471 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); 472 } 473 474 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) 475 { 476 static const uint64_t guest_offloads_mask = 477 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | 478 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 479 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | 480 (1ULL << VIRTIO_NET_F_GUEST_ECN) | 481 (1ULL << VIRTIO_NET_F_GUEST_UFO); 482 483 return guest_offloads_mask & features; 484 } 485 486 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) 487 { 488 VirtIODevice *vdev = VIRTIO_DEVICE(n); 489 return virtio_net_guest_offloads_by_features(vdev->guest_features); 490 } 491 492 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features) 493 { 494 VirtIONet *n = VIRTIO_NET(vdev); 495 int i; 496 497 virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ))); 498 499 virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF))); 500 501 if (n->has_vnet_hdr) { 502 n->curr_guest_offloads = 503 virtio_net_guest_offloads_by_features(features); 504 virtio_net_apply_guest_offloads(n); 505 } 506 507 for (i = 0; i < n->max_queues; i++) { 508 NetClientState *nc = qemu_get_subqueue(n->nic, i); 509 510 if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 511 continue; 512 } 513 if (!tap_get_vhost_net(nc->peer)) { 514 continue; 515 } 516 vhost_net_ack_features(tap_get_vhost_net(nc->peer), features); 517 } 518 } 519 520 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, 521 struct iovec *iov, unsigned int iov_cnt) 522 { 523 uint8_t on; 524 size_t s; 525 NetClientState *nc = qemu_get_queue(n->nic); 526 527 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); 528 if (s != sizeof(on)) { 529 return VIRTIO_NET_ERR; 530 } 531 532 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { 533 n->promisc = on; 534 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { 535 n->allmulti = on; 536 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { 537 n->alluni = on; 538 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { 539 n->nomulti = on; 540 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { 541 n->nouni = on; 542 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { 543 n->nobcast = on; 544 } else { 545 return VIRTIO_NET_ERR; 546 } 547 548 rxfilter_notify(nc); 549 550 return VIRTIO_NET_OK; 551 } 552 553 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, 554 struct iovec *iov, unsigned int iov_cnt) 555 { 556 VirtIODevice *vdev = VIRTIO_DEVICE(n); 557 uint64_t offloads; 558 size_t s; 559 560 if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) { 561 return VIRTIO_NET_ERR; 562 } 563 564 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); 565 if (s != sizeof(offloads)) { 566 return VIRTIO_NET_ERR; 567 } 568 569 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { 570 uint64_t supported_offloads; 571 572 if (!n->has_vnet_hdr) { 573 return VIRTIO_NET_ERR; 574 } 575 576 supported_offloads = virtio_net_supported_guest_offloads(n); 577 if (offloads & ~supported_offloads) { 578 return VIRTIO_NET_ERR; 579 } 580 581 n->curr_guest_offloads = offloads; 582 virtio_net_apply_guest_offloads(n); 583 584 return VIRTIO_NET_OK; 585 } else { 586 return VIRTIO_NET_ERR; 587 } 588 } 589 590 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, 591 struct iovec *iov, unsigned int iov_cnt) 592 { 593 struct virtio_net_ctrl_mac mac_data; 594 size_t s; 595 NetClientState *nc = qemu_get_queue(n->nic); 596 597 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { 598 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { 599 return VIRTIO_NET_ERR; 600 } 601 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); 602 assert(s == sizeof(n->mac)); 603 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 604 rxfilter_notify(nc); 605 606 return VIRTIO_NET_OK; 607 } 608 609 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { 610 return VIRTIO_NET_ERR; 611 } 612 613 n->mac_table.in_use = 0; 614 n->mac_table.first_multi = 0; 615 n->mac_table.uni_overflow = 0; 616 n->mac_table.multi_overflow = 0; 617 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 618 619 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 620 sizeof(mac_data.entries)); 621 mac_data.entries = ldl_p(&mac_data.entries); 622 if (s != sizeof(mac_data.entries)) { 623 goto error; 624 } 625 iov_discard_front(&iov, &iov_cnt, s); 626 627 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { 628 goto error; 629 } 630 631 if (mac_data.entries <= MAC_TABLE_ENTRIES) { 632 s = iov_to_buf(iov, iov_cnt, 0, n->mac_table.macs, 633 mac_data.entries * ETH_ALEN); 634 if (s != mac_data.entries * ETH_ALEN) { 635 goto error; 636 } 637 n->mac_table.in_use += mac_data.entries; 638 } else { 639 n->mac_table.uni_overflow = 1; 640 } 641 642 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); 643 644 n->mac_table.first_multi = n->mac_table.in_use; 645 646 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 647 sizeof(mac_data.entries)); 648 mac_data.entries = ldl_p(&mac_data.entries); 649 if (s != sizeof(mac_data.entries)) { 650 goto error; 651 } 652 653 iov_discard_front(&iov, &iov_cnt, s); 654 655 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { 656 goto error; 657 } 658 659 if (n->mac_table.in_use + mac_data.entries <= MAC_TABLE_ENTRIES) { 660 s = iov_to_buf(iov, iov_cnt, 0, 661 &n->mac_table.macs[n->mac_table.in_use * ETH_ALEN], 662 mac_data.entries * ETH_ALEN); 663 if (s != mac_data.entries * ETH_ALEN) { 664 goto error; 665 } 666 n->mac_table.in_use += mac_data.entries; 667 } else { 668 n->mac_table.multi_overflow = 1; 669 } 670 671 rxfilter_notify(nc); 672 673 return VIRTIO_NET_OK; 674 675 error: 676 rxfilter_notify(nc); 677 return VIRTIO_NET_ERR; 678 } 679 680 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, 681 struct iovec *iov, unsigned int iov_cnt) 682 { 683 uint16_t vid; 684 size_t s; 685 NetClientState *nc = qemu_get_queue(n->nic); 686 687 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); 688 vid = lduw_p(&vid); 689 if (s != sizeof(vid)) { 690 return VIRTIO_NET_ERR; 691 } 692 693 if (vid >= MAX_VLAN) 694 return VIRTIO_NET_ERR; 695 696 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) 697 n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); 698 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) 699 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); 700 else 701 return VIRTIO_NET_ERR; 702 703 rxfilter_notify(nc); 704 705 return VIRTIO_NET_OK; 706 } 707 708 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, 709 struct iovec *iov, unsigned int iov_cnt) 710 { 711 VirtIODevice *vdev = VIRTIO_DEVICE(n); 712 struct virtio_net_ctrl_mq mq; 713 size_t s; 714 uint16_t queues; 715 716 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); 717 if (s != sizeof(mq)) { 718 return VIRTIO_NET_ERR; 719 } 720 721 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 722 return VIRTIO_NET_ERR; 723 } 724 725 queues = lduw_p(&mq.virtqueue_pairs); 726 727 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 728 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 729 queues > n->max_queues || 730 !n->multiqueue) { 731 return VIRTIO_NET_ERR; 732 } 733 734 n->curr_queues = queues; 735 /* stop the backend before changing the number of queues to avoid handling a 736 * disabled queue */ 737 virtio_net_set_status(vdev, vdev->status); 738 virtio_net_set_queues(n); 739 740 return VIRTIO_NET_OK; 741 } 742 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 743 { 744 VirtIONet *n = VIRTIO_NET(vdev); 745 struct virtio_net_ctrl_hdr ctrl; 746 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 747 VirtQueueElement elem; 748 size_t s; 749 struct iovec *iov; 750 unsigned int iov_cnt; 751 752 while (virtqueue_pop(vq, &elem)) { 753 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) || 754 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) { 755 error_report("virtio-net ctrl missing headers"); 756 exit(1); 757 } 758 759 iov = elem.out_sg; 760 iov_cnt = elem.out_num; 761 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); 762 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); 763 if (s != sizeof(ctrl)) { 764 status = VIRTIO_NET_ERR; 765 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { 766 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); 767 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { 768 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); 769 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { 770 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); 771 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { 772 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); 773 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { 774 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); 775 } 776 777 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status)); 778 assert(s == sizeof(status)); 779 780 virtqueue_push(vq, &elem, sizeof(status)); 781 virtio_notify(vdev, vq); 782 } 783 } 784 785 /* RX */ 786 787 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) 788 { 789 VirtIONet *n = VIRTIO_NET(vdev); 790 int queue_index = vq2q(virtio_get_queue_index(vq)); 791 792 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); 793 } 794 795 static int virtio_net_can_receive(NetClientState *nc) 796 { 797 VirtIONet *n = qemu_get_nic_opaque(nc); 798 VirtIODevice *vdev = VIRTIO_DEVICE(n); 799 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 800 801 if (!vdev->vm_running) { 802 return 0; 803 } 804 805 if (nc->queue_index >= n->curr_queues) { 806 return 0; 807 } 808 809 if (!virtio_queue_ready(q->rx_vq) || 810 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 811 return 0; 812 } 813 814 return 1; 815 } 816 817 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) 818 { 819 VirtIONet *n = q->n; 820 if (virtio_queue_empty(q->rx_vq) || 821 (n->mergeable_rx_bufs && 822 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 823 virtio_queue_set_notification(q->rx_vq, 1); 824 825 /* To avoid a race condition where the guest has made some buffers 826 * available after the above check but before notification was 827 * enabled, check for available buffers again. 828 */ 829 if (virtio_queue_empty(q->rx_vq) || 830 (n->mergeable_rx_bufs && 831 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 832 return 0; 833 } 834 } 835 836 virtio_queue_set_notification(q->rx_vq, 0); 837 return 1; 838 } 839 840 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so 841 * it never finds out that the packets don't have valid checksums. This 842 * causes dhclient to get upset. Fedora's carried a patch for ages to 843 * fix this with Xen but it hasn't appeared in an upstream release of 844 * dhclient yet. 845 * 846 * To avoid breaking existing guests, we catch udp packets and add 847 * checksums. This is terrible but it's better than hacking the guest 848 * kernels. 849 * 850 * N.B. if we introduce a zero-copy API, this operation is no longer free so 851 * we should provide a mechanism to disable it to avoid polluting the host 852 * cache. 853 */ 854 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, 855 uint8_t *buf, size_t size) 856 { 857 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ 858 (size > 27 && size < 1500) && /* normal sized MTU */ 859 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ 860 (buf[23] == 17) && /* ip.protocol == UDP */ 861 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ 862 net_checksum_calculate(buf, size); 863 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; 864 } 865 } 866 867 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, 868 const void *buf, size_t size) 869 { 870 if (n->has_vnet_hdr) { 871 /* FIXME this cast is evil */ 872 void *wbuf = (void *)buf; 873 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, 874 size - n->host_hdr_len); 875 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); 876 } else { 877 struct virtio_net_hdr hdr = { 878 .flags = 0, 879 .gso_type = VIRTIO_NET_HDR_GSO_NONE 880 }; 881 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); 882 } 883 } 884 885 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) 886 { 887 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 888 static const uint8_t vlan[] = {0x81, 0x00}; 889 uint8_t *ptr = (uint8_t *)buf; 890 int i; 891 892 if (n->promisc) 893 return 1; 894 895 ptr += n->host_hdr_len; 896 897 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { 898 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff; 899 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) 900 return 0; 901 } 902 903 if (ptr[0] & 1) { // multicast 904 if (!memcmp(ptr, bcast, sizeof(bcast))) { 905 return !n->nobcast; 906 } else if (n->nomulti) { 907 return 0; 908 } else if (n->allmulti || n->mac_table.multi_overflow) { 909 return 1; 910 } 911 912 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 913 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 914 return 1; 915 } 916 } 917 } else { // unicast 918 if (n->nouni) { 919 return 0; 920 } else if (n->alluni || n->mac_table.uni_overflow) { 921 return 1; 922 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { 923 return 1; 924 } 925 926 for (i = 0; i < n->mac_table.first_multi; i++) { 927 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 928 return 1; 929 } 930 } 931 } 932 933 return 0; 934 } 935 936 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size) 937 { 938 VirtIONet *n = qemu_get_nic_opaque(nc); 939 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 940 VirtIODevice *vdev = VIRTIO_DEVICE(n); 941 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; 942 struct virtio_net_hdr_mrg_rxbuf mhdr; 943 unsigned mhdr_cnt = 0; 944 size_t offset, i, guest_offset; 945 946 if (!virtio_net_can_receive(nc)) { 947 return -1; 948 } 949 950 /* hdr_len refers to the header we supply to the guest */ 951 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { 952 return 0; 953 } 954 955 if (!receive_filter(n, buf, size)) 956 return size; 957 958 offset = i = 0; 959 960 while (offset < size) { 961 VirtQueueElement elem; 962 int len, total; 963 const struct iovec *sg = elem.in_sg; 964 965 total = 0; 966 967 if (virtqueue_pop(q->rx_vq, &elem) == 0) { 968 if (i == 0) 969 return -1; 970 error_report("virtio-net unexpected empty queue: " 971 "i %zd mergeable %d offset %zd, size %zd, " 972 "guest hdr len %zd, host hdr len %zd guest features 0x%x", 973 i, n->mergeable_rx_bufs, offset, size, 974 n->guest_hdr_len, n->host_hdr_len, vdev->guest_features); 975 exit(1); 976 } 977 978 if (elem.in_num < 1) { 979 error_report("virtio-net receive queue contains no in buffers"); 980 exit(1); 981 } 982 983 if (i == 0) { 984 assert(offset == 0); 985 if (n->mergeable_rx_bufs) { 986 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), 987 sg, elem.in_num, 988 offsetof(typeof(mhdr), num_buffers), 989 sizeof(mhdr.num_buffers)); 990 } 991 992 receive_header(n, sg, elem.in_num, buf, size); 993 offset = n->host_hdr_len; 994 total += n->guest_hdr_len; 995 guest_offset = n->guest_hdr_len; 996 } else { 997 guest_offset = 0; 998 } 999 1000 /* copy in packet. ugh */ 1001 len = iov_from_buf(sg, elem.in_num, guest_offset, 1002 buf + offset, size - offset); 1003 total += len; 1004 offset += len; 1005 /* If buffers can't be merged, at this point we 1006 * must have consumed the complete packet. 1007 * Otherwise, drop it. */ 1008 if (!n->mergeable_rx_bufs && offset < size) { 1009 #if 0 1010 error_report("virtio-net truncated non-mergeable packet: " 1011 "i %zd mergeable %d offset %zd, size %zd, " 1012 "guest hdr len %zd, host hdr len %zd", 1013 i, n->mergeable_rx_bufs, 1014 offset, size, n->guest_hdr_len, n->host_hdr_len); 1015 #endif 1016 return size; 1017 } 1018 1019 /* signal other side */ 1020 virtqueue_fill(q->rx_vq, &elem, total, i++); 1021 } 1022 1023 if (mhdr_cnt) { 1024 stw_p(&mhdr.num_buffers, i); 1025 iov_from_buf(mhdr_sg, mhdr_cnt, 1026 0, 1027 &mhdr.num_buffers, sizeof mhdr.num_buffers); 1028 } 1029 1030 virtqueue_flush(q->rx_vq, i); 1031 virtio_notify(vdev, q->rx_vq); 1032 1033 return size; 1034 } 1035 1036 static int32_t virtio_net_flush_tx(VirtIONetQueue *q); 1037 1038 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) 1039 { 1040 VirtIONet *n = qemu_get_nic_opaque(nc); 1041 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 1042 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1043 1044 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0); 1045 virtio_notify(vdev, q->tx_vq); 1046 1047 q->async_tx.elem.out_num = q->async_tx.len = 0; 1048 1049 virtio_queue_set_notification(q->tx_vq, 1); 1050 virtio_net_flush_tx(q); 1051 } 1052 1053 /* TX */ 1054 static int32_t virtio_net_flush_tx(VirtIONetQueue *q) 1055 { 1056 VirtIONet *n = q->n; 1057 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1058 VirtQueueElement elem; 1059 int32_t num_packets = 0; 1060 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); 1061 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1062 return num_packets; 1063 } 1064 1065 assert(vdev->vm_running); 1066 1067 if (q->async_tx.elem.out_num) { 1068 virtio_queue_set_notification(q->tx_vq, 0); 1069 return num_packets; 1070 } 1071 1072 while (virtqueue_pop(q->tx_vq, &elem)) { 1073 ssize_t ret, len; 1074 unsigned int out_num = elem.out_num; 1075 struct iovec *out_sg = &elem.out_sg[0]; 1076 struct iovec sg[VIRTQUEUE_MAX_SIZE]; 1077 1078 if (out_num < 1) { 1079 error_report("virtio-net header not in first element"); 1080 exit(1); 1081 } 1082 1083 /* 1084 * If host wants to see the guest header as is, we can 1085 * pass it on unchanged. Otherwise, copy just the parts 1086 * that host is interested in. 1087 */ 1088 assert(n->host_hdr_len <= n->guest_hdr_len); 1089 if (n->host_hdr_len != n->guest_hdr_len) { 1090 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), 1091 out_sg, out_num, 1092 0, n->host_hdr_len); 1093 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, 1094 out_sg, out_num, 1095 n->guest_hdr_len, -1); 1096 out_num = sg_num; 1097 out_sg = sg; 1098 } 1099 1100 len = n->guest_hdr_len; 1101 1102 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), 1103 out_sg, out_num, virtio_net_tx_complete); 1104 if (ret == 0) { 1105 virtio_queue_set_notification(q->tx_vq, 0); 1106 q->async_tx.elem = elem; 1107 q->async_tx.len = len; 1108 return -EBUSY; 1109 } 1110 1111 len += ret; 1112 1113 virtqueue_push(q->tx_vq, &elem, 0); 1114 virtio_notify(vdev, q->tx_vq); 1115 1116 if (++num_packets >= n->tx_burst) { 1117 break; 1118 } 1119 } 1120 return num_packets; 1121 } 1122 1123 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) 1124 { 1125 VirtIONet *n = VIRTIO_NET(vdev); 1126 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1127 1128 /* This happens when device was stopped but VCPU wasn't. */ 1129 if (!vdev->vm_running) { 1130 q->tx_waiting = 1; 1131 return; 1132 } 1133 1134 if (q->tx_waiting) { 1135 virtio_queue_set_notification(vq, 1); 1136 timer_del(q->tx_timer); 1137 q->tx_waiting = 0; 1138 virtio_net_flush_tx(q); 1139 } else { 1140 timer_mod(q->tx_timer, 1141 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); 1142 q->tx_waiting = 1; 1143 virtio_queue_set_notification(vq, 0); 1144 } 1145 } 1146 1147 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) 1148 { 1149 VirtIONet *n = VIRTIO_NET(vdev); 1150 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1151 1152 if (unlikely(q->tx_waiting)) { 1153 return; 1154 } 1155 q->tx_waiting = 1; 1156 /* This happens when device was stopped but VCPU wasn't. */ 1157 if (!vdev->vm_running) { 1158 return; 1159 } 1160 virtio_queue_set_notification(vq, 0); 1161 qemu_bh_schedule(q->tx_bh); 1162 } 1163 1164 static void virtio_net_tx_timer(void *opaque) 1165 { 1166 VirtIONetQueue *q = opaque; 1167 VirtIONet *n = q->n; 1168 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1169 assert(vdev->vm_running); 1170 1171 q->tx_waiting = 0; 1172 1173 /* Just in case the driver is not ready on more */ 1174 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1175 return; 1176 } 1177 1178 virtio_queue_set_notification(q->tx_vq, 1); 1179 virtio_net_flush_tx(q); 1180 } 1181 1182 static void virtio_net_tx_bh(void *opaque) 1183 { 1184 VirtIONetQueue *q = opaque; 1185 VirtIONet *n = q->n; 1186 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1187 int32_t ret; 1188 1189 assert(vdev->vm_running); 1190 1191 q->tx_waiting = 0; 1192 1193 /* Just in case the driver is not ready on more */ 1194 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { 1195 return; 1196 } 1197 1198 ret = virtio_net_flush_tx(q); 1199 if (ret == -EBUSY) { 1200 return; /* Notification re-enable handled by tx_complete */ 1201 } 1202 1203 /* If we flush a full burst of packets, assume there are 1204 * more coming and immediately reschedule */ 1205 if (ret >= n->tx_burst) { 1206 qemu_bh_schedule(q->tx_bh); 1207 q->tx_waiting = 1; 1208 return; 1209 } 1210 1211 /* If less than a full burst, re-enable notification and flush 1212 * anything that may have come in while we weren't looking. If 1213 * we find something, assume the guest is still active and reschedule */ 1214 virtio_queue_set_notification(q->tx_vq, 1); 1215 if (virtio_net_flush_tx(q) > 0) { 1216 virtio_queue_set_notification(q->tx_vq, 0); 1217 qemu_bh_schedule(q->tx_bh); 1218 q->tx_waiting = 1; 1219 } 1220 } 1221 1222 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) 1223 { 1224 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1225 int i, max = multiqueue ? n->max_queues : 1; 1226 1227 n->multiqueue = multiqueue; 1228 1229 for (i = 2; i <= n->max_queues * 2 + 1; i++) { 1230 virtio_del_queue(vdev, i); 1231 } 1232 1233 for (i = 1; i < max; i++) { 1234 n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); 1235 if (n->vqs[i].tx_timer) { 1236 n->vqs[i].tx_vq = 1237 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); 1238 n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1239 virtio_net_tx_timer, 1240 &n->vqs[i]); 1241 } else { 1242 n->vqs[i].tx_vq = 1243 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); 1244 n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]); 1245 } 1246 1247 n->vqs[i].tx_waiting = 0; 1248 n->vqs[i].n = n; 1249 } 1250 1251 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack 1252 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid 1253 * breaking them. 1254 */ 1255 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1256 1257 virtio_net_set_queues(n); 1258 } 1259 1260 static void virtio_net_save(QEMUFile *f, void *opaque) 1261 { 1262 int i; 1263 VirtIONet *n = opaque; 1264 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1265 1266 /* At this point, backend must be stopped, otherwise 1267 * it might keep writing to memory. */ 1268 assert(!n->vhost_started); 1269 virtio_save(vdev, f); 1270 1271 qemu_put_buffer(f, n->mac, ETH_ALEN); 1272 qemu_put_be32(f, n->vqs[0].tx_waiting); 1273 qemu_put_be32(f, n->mergeable_rx_bufs); 1274 qemu_put_be16(f, n->status); 1275 qemu_put_byte(f, n->promisc); 1276 qemu_put_byte(f, n->allmulti); 1277 qemu_put_be32(f, n->mac_table.in_use); 1278 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); 1279 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1280 qemu_put_be32(f, n->has_vnet_hdr); 1281 qemu_put_byte(f, n->mac_table.multi_overflow); 1282 qemu_put_byte(f, n->mac_table.uni_overflow); 1283 qemu_put_byte(f, n->alluni); 1284 qemu_put_byte(f, n->nomulti); 1285 qemu_put_byte(f, n->nouni); 1286 qemu_put_byte(f, n->nobcast); 1287 qemu_put_byte(f, n->has_ufo); 1288 if (n->max_queues > 1) { 1289 qemu_put_be16(f, n->max_queues); 1290 qemu_put_be16(f, n->curr_queues); 1291 for (i = 1; i < n->curr_queues; i++) { 1292 qemu_put_be32(f, n->vqs[i].tx_waiting); 1293 } 1294 } 1295 1296 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) { 1297 qemu_put_be64(f, n->curr_guest_offloads); 1298 } 1299 } 1300 1301 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) 1302 { 1303 VirtIONet *n = opaque; 1304 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1305 int ret, i, link_down; 1306 1307 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) 1308 return -EINVAL; 1309 1310 ret = virtio_load(vdev, f); 1311 if (ret) { 1312 return ret; 1313 } 1314 1315 qemu_get_buffer(f, n->mac, ETH_ALEN); 1316 n->vqs[0].tx_waiting = qemu_get_be32(f); 1317 1318 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f)); 1319 1320 if (version_id >= 3) 1321 n->status = qemu_get_be16(f); 1322 1323 if (version_id >= 4) { 1324 if (version_id < 8) { 1325 n->promisc = qemu_get_be32(f); 1326 n->allmulti = qemu_get_be32(f); 1327 } else { 1328 n->promisc = qemu_get_byte(f); 1329 n->allmulti = qemu_get_byte(f); 1330 } 1331 } 1332 1333 if (version_id >= 5) { 1334 n->mac_table.in_use = qemu_get_be32(f); 1335 /* MAC_TABLE_ENTRIES may be different from the saved image */ 1336 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) { 1337 qemu_get_buffer(f, n->mac_table.macs, 1338 n->mac_table.in_use * ETH_ALEN); 1339 } else if (n->mac_table.in_use) { 1340 uint8_t *buf = g_malloc0(n->mac_table.in_use); 1341 qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN); 1342 g_free(buf); 1343 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1; 1344 n->mac_table.in_use = 0; 1345 } 1346 } 1347 1348 if (version_id >= 6) 1349 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1350 1351 if (version_id >= 7) { 1352 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) { 1353 error_report("virtio-net: saved image requires vnet_hdr=on"); 1354 return -1; 1355 } 1356 } 1357 1358 if (version_id >= 9) { 1359 n->mac_table.multi_overflow = qemu_get_byte(f); 1360 n->mac_table.uni_overflow = qemu_get_byte(f); 1361 } 1362 1363 if (version_id >= 10) { 1364 n->alluni = qemu_get_byte(f); 1365 n->nomulti = qemu_get_byte(f); 1366 n->nouni = qemu_get_byte(f); 1367 n->nobcast = qemu_get_byte(f); 1368 } 1369 1370 if (version_id >= 11) { 1371 if (qemu_get_byte(f) && !peer_has_ufo(n)) { 1372 error_report("virtio-net: saved image requires TUN_F_UFO support"); 1373 return -1; 1374 } 1375 } 1376 1377 if (n->max_queues > 1) { 1378 if (n->max_queues != qemu_get_be16(f)) { 1379 error_report("virtio-net: different max_queues "); 1380 return -1; 1381 } 1382 1383 n->curr_queues = qemu_get_be16(f); 1384 for (i = 1; i < n->curr_queues; i++) { 1385 n->vqs[i].tx_waiting = qemu_get_be32(f); 1386 } 1387 } 1388 1389 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) { 1390 n->curr_guest_offloads = qemu_get_be64(f); 1391 } else { 1392 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); 1393 } 1394 1395 if (peer_has_vnet_hdr(n)) { 1396 virtio_net_apply_guest_offloads(n); 1397 } 1398 1399 virtio_net_set_queues(n); 1400 1401 /* Find the first multicast entry in the saved MAC filter */ 1402 for (i = 0; i < n->mac_table.in_use; i++) { 1403 if (n->mac_table.macs[i * ETH_ALEN] & 1) { 1404 break; 1405 } 1406 } 1407 n->mac_table.first_multi = i; 1408 1409 /* nc.link_down can't be migrated, so infer link_down according 1410 * to link status bit in n->status */ 1411 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; 1412 for (i = 0; i < n->max_queues; i++) { 1413 qemu_get_subqueue(n->nic, i)->link_down = link_down; 1414 } 1415 1416 return 0; 1417 } 1418 1419 static void virtio_net_cleanup(NetClientState *nc) 1420 { 1421 VirtIONet *n = qemu_get_nic_opaque(nc); 1422 1423 n->nic = NULL; 1424 } 1425 1426 static NetClientInfo net_virtio_info = { 1427 .type = NET_CLIENT_OPTIONS_KIND_NIC, 1428 .size = sizeof(NICState), 1429 .can_receive = virtio_net_can_receive, 1430 .receive = virtio_net_receive, 1431 .cleanup = virtio_net_cleanup, 1432 .link_status_changed = virtio_net_set_link_status, 1433 .query_rx_filter = virtio_net_query_rxfilter, 1434 }; 1435 1436 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) 1437 { 1438 VirtIONet *n = VIRTIO_NET(vdev); 1439 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1440 assert(n->vhost_started); 1441 return vhost_net_virtqueue_pending(tap_get_vhost_net(nc->peer), idx); 1442 } 1443 1444 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, 1445 bool mask) 1446 { 1447 VirtIONet *n = VIRTIO_NET(vdev); 1448 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1449 assert(n->vhost_started); 1450 vhost_net_virtqueue_mask(tap_get_vhost_net(nc->peer), 1451 vdev, idx, mask); 1452 } 1453 1454 void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features) 1455 { 1456 int i, config_size = 0; 1457 host_features |= (1 << VIRTIO_NET_F_MAC); 1458 for (i = 0; feature_sizes[i].flags != 0; i++) { 1459 if (host_features & feature_sizes[i].flags) { 1460 config_size = MAX(feature_sizes[i].end, config_size); 1461 } 1462 } 1463 n->config_size = config_size; 1464 } 1465 1466 void virtio_net_set_netclient_name(VirtIONet *n, const char *name, 1467 const char *type) 1468 { 1469 /* 1470 * The name can be NULL, the netclient name will be type.x. 1471 */ 1472 assert(type != NULL); 1473 1474 if (n->netclient_name) { 1475 g_free(n->netclient_name); 1476 n->netclient_name = NULL; 1477 } 1478 if (n->netclient_type) { 1479 g_free(n->netclient_type); 1480 n->netclient_type = NULL; 1481 } 1482 1483 if (name != NULL) { 1484 n->netclient_name = g_strdup(name); 1485 } 1486 n->netclient_type = g_strdup(type); 1487 } 1488 1489 static int virtio_net_device_init(VirtIODevice *vdev) 1490 { 1491 int i; 1492 1493 DeviceState *qdev = DEVICE(vdev); 1494 VirtIONet *n = VIRTIO_NET(vdev); 1495 NetClientState *nc; 1496 1497 virtio_init(VIRTIO_DEVICE(n), "virtio-net", VIRTIO_ID_NET, 1498 n->config_size); 1499 1500 n->max_queues = MAX(n->nic_conf.queues, 1); 1501 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); 1502 n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); 1503 n->curr_queues = 1; 1504 n->vqs[0].n = n; 1505 n->tx_timeout = n->net_conf.txtimer; 1506 1507 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") 1508 && strcmp(n->net_conf.tx, "bh")) { 1509 error_report("virtio-net: " 1510 "Unknown option tx=%s, valid options: \"timer\" \"bh\"", 1511 n->net_conf.tx); 1512 error_report("Defaulting to \"bh\""); 1513 } 1514 1515 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { 1516 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256, 1517 virtio_net_handle_tx_timer); 1518 n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer, 1519 &n->vqs[0]); 1520 } else { 1521 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256, 1522 virtio_net_handle_tx_bh); 1523 n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]); 1524 } 1525 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1526 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); 1527 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); 1528 n->status = VIRTIO_NET_S_LINK_UP; 1529 1530 if (n->netclient_type) { 1531 /* 1532 * Happen when virtio_net_set_netclient_name has been called. 1533 */ 1534 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1535 n->netclient_type, n->netclient_name, n); 1536 } else { 1537 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1538 object_get_typename(OBJECT(qdev)), qdev->id, n); 1539 } 1540 1541 peer_test_vnet_hdr(n); 1542 if (peer_has_vnet_hdr(n)) { 1543 for (i = 0; i < n->max_queues; i++) { 1544 tap_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); 1545 } 1546 n->host_hdr_len = sizeof(struct virtio_net_hdr); 1547 } else { 1548 n->host_hdr_len = 0; 1549 } 1550 1551 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); 1552 1553 n->vqs[0].tx_waiting = 0; 1554 n->tx_burst = n->net_conf.txburst; 1555 virtio_net_set_mrg_rx_bufs(n, 0); 1556 n->promisc = 1; /* for compatibility */ 1557 1558 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 1559 1560 n->vlans = g_malloc0(MAX_VLAN >> 3); 1561 1562 nc = qemu_get_queue(n->nic); 1563 nc->rxfilter_notify_enabled = 1; 1564 1565 n->qdev = qdev; 1566 register_savevm(qdev, "virtio-net", -1, VIRTIO_NET_VM_VERSION, 1567 virtio_net_save, virtio_net_load, n); 1568 1569 add_boot_device_path(n->nic_conf.bootindex, qdev, "/ethernet-phy@0"); 1570 return 0; 1571 } 1572 1573 static int virtio_net_device_exit(DeviceState *qdev) 1574 { 1575 VirtIONet *n = VIRTIO_NET(qdev); 1576 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1577 int i; 1578 1579 /* This will stop vhost backend if appropriate. */ 1580 virtio_net_set_status(vdev, 0); 1581 1582 unregister_savevm(qdev, "virtio-net", n); 1583 1584 if (n->netclient_name) { 1585 g_free(n->netclient_name); 1586 n->netclient_name = NULL; 1587 } 1588 if (n->netclient_type) { 1589 g_free(n->netclient_type); 1590 n->netclient_type = NULL; 1591 } 1592 1593 g_free(n->mac_table.macs); 1594 g_free(n->vlans); 1595 1596 for (i = 0; i < n->max_queues; i++) { 1597 VirtIONetQueue *q = &n->vqs[i]; 1598 NetClientState *nc = qemu_get_subqueue(n->nic, i); 1599 1600 qemu_purge_queued_packets(nc); 1601 1602 if (q->tx_timer) { 1603 timer_del(q->tx_timer); 1604 timer_free(q->tx_timer); 1605 } else if (q->tx_bh) { 1606 qemu_bh_delete(q->tx_bh); 1607 } 1608 } 1609 1610 g_free(n->vqs); 1611 qemu_del_nic(n->nic); 1612 virtio_cleanup(vdev); 1613 1614 return 0; 1615 } 1616 1617 static void virtio_net_instance_init(Object *obj) 1618 { 1619 VirtIONet *n = VIRTIO_NET(obj); 1620 1621 /* 1622 * The default config_size is sizeof(struct virtio_net_config). 1623 * Can be overriden with virtio_net_set_config_size. 1624 */ 1625 n->config_size = sizeof(struct virtio_net_config); 1626 } 1627 1628 static Property virtio_net_properties[] = { 1629 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), 1630 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, 1631 TX_TIMER_INTERVAL), 1632 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), 1633 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), 1634 DEFINE_PROP_END_OF_LIST(), 1635 }; 1636 1637 static void virtio_net_class_init(ObjectClass *klass, void *data) 1638 { 1639 DeviceClass *dc = DEVICE_CLASS(klass); 1640 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1641 dc->exit = virtio_net_device_exit; 1642 dc->props = virtio_net_properties; 1643 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 1644 vdc->init = virtio_net_device_init; 1645 vdc->get_config = virtio_net_get_config; 1646 vdc->set_config = virtio_net_set_config; 1647 vdc->get_features = virtio_net_get_features; 1648 vdc->set_features = virtio_net_set_features; 1649 vdc->bad_features = virtio_net_bad_features; 1650 vdc->reset = virtio_net_reset; 1651 vdc->set_status = virtio_net_set_status; 1652 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; 1653 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; 1654 } 1655 1656 static const TypeInfo virtio_net_info = { 1657 .name = TYPE_VIRTIO_NET, 1658 .parent = TYPE_VIRTIO_DEVICE, 1659 .instance_size = sizeof(VirtIONet), 1660 .instance_init = virtio_net_instance_init, 1661 .class_init = virtio_net_class_init, 1662 }; 1663 1664 static void virtio_register_types(void) 1665 { 1666 type_register_static(&virtio_net_info); 1667 } 1668 1669 type_init(virtio_register_types) 1670