1 /* 2 * Virtio Network Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/iov.h" 15 #include "hw/virtio/virtio.h" 16 #include "net/net.h" 17 #include "net/checksum.h" 18 #include "net/tap.h" 19 #include "qemu/error-report.h" 20 #include "qemu/timer.h" 21 #include "hw/virtio/virtio-net.h" 22 #include "net/vhost_net.h" 23 #include "hw/virtio/virtio-bus.h" 24 25 #define VIRTIO_NET_VM_VERSION 11 26 27 #define MAC_TABLE_ENTRIES 64 28 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ 29 30 /* 31 * Calculate the number of bytes up to and including the given 'field' of 32 * 'container'. 33 */ 34 #define endof(container, field) \ 35 (offsetof(container, field) + sizeof(((container *)0)->field)) 36 37 typedef struct VirtIOFeature { 38 uint32_t flags; 39 size_t end; 40 } VirtIOFeature; 41 42 static VirtIOFeature feature_sizes[] = { 43 {.flags = 1 << VIRTIO_NET_F_MAC, 44 .end = endof(struct virtio_net_config, mac)}, 45 {.flags = 1 << VIRTIO_NET_F_STATUS, 46 .end = endof(struct virtio_net_config, status)}, 47 {.flags = 1 << VIRTIO_NET_F_MQ, 48 .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, 49 {} 50 }; 51 52 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) 53 { 54 VirtIONet *n = qemu_get_nic_opaque(nc); 55 56 return &n->vqs[nc->queue_index]; 57 } 58 59 static int vq2q(int queue_index) 60 { 61 return queue_index / 2; 62 } 63 64 /* TODO 65 * - we could suppress RX interrupt if we were so inclined. 66 */ 67 68 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) 69 { 70 VirtIONet *n = VIRTIO_NET(vdev); 71 struct virtio_net_config netcfg; 72 73 stw_p(&netcfg.status, n->status); 74 stw_p(&netcfg.max_virtqueue_pairs, n->max_queues); 75 memcpy(netcfg.mac, n->mac, ETH_ALEN); 76 memcpy(config, &netcfg, n->config_size); 77 } 78 79 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) 80 { 81 VirtIONet *n = VIRTIO_NET(vdev); 82 struct virtio_net_config netcfg = {}; 83 84 memcpy(&netcfg, config, n->config_size); 85 86 if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) && 87 memcmp(netcfg.mac, n->mac, ETH_ALEN)) { 88 memcpy(n->mac, netcfg.mac, ETH_ALEN); 89 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 90 } 91 } 92 93 static bool virtio_net_started(VirtIONet *n, uint8_t status) 94 { 95 VirtIODevice *vdev = VIRTIO_DEVICE(n); 96 return (status & VIRTIO_CONFIG_S_DRIVER_OK) && 97 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; 98 } 99 100 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) 101 { 102 VirtIODevice *vdev = VIRTIO_DEVICE(n); 103 NetClientState *nc = qemu_get_queue(n->nic); 104 int queues = n->multiqueue ? n->max_queues : 1; 105 106 if (!nc->peer) { 107 return; 108 } 109 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 110 return; 111 } 112 113 if (!tap_get_vhost_net(nc->peer)) { 114 return; 115 } 116 117 if (!!n->vhost_started == 118 (virtio_net_started(n, status) && !nc->peer->link_down)) { 119 return; 120 } 121 if (!n->vhost_started) { 122 int r; 123 if (!vhost_net_query(tap_get_vhost_net(nc->peer), vdev)) { 124 return; 125 } 126 n->vhost_started = 1; 127 r = vhost_net_start(vdev, n->nic->ncs, queues); 128 if (r < 0) { 129 error_report("unable to start vhost net: %d: " 130 "falling back on userspace virtio", -r); 131 n->vhost_started = 0; 132 } 133 } else { 134 vhost_net_stop(vdev, n->nic->ncs, queues); 135 n->vhost_started = 0; 136 } 137 } 138 139 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) 140 { 141 VirtIONet *n = VIRTIO_NET(vdev); 142 VirtIONetQueue *q; 143 int i; 144 uint8_t queue_status; 145 146 virtio_net_vhost_status(n, status); 147 148 for (i = 0; i < n->max_queues; i++) { 149 q = &n->vqs[i]; 150 151 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { 152 queue_status = 0; 153 } else { 154 queue_status = status; 155 } 156 157 if (!q->tx_waiting) { 158 continue; 159 } 160 161 if (virtio_net_started(n, queue_status) && !n->vhost_started) { 162 if (q->tx_timer) { 163 qemu_mod_timer(q->tx_timer, 164 qemu_get_clock_ns(vm_clock) + n->tx_timeout); 165 } else { 166 qemu_bh_schedule(q->tx_bh); 167 } 168 } else { 169 if (q->tx_timer) { 170 qemu_del_timer(q->tx_timer); 171 } else { 172 qemu_bh_cancel(q->tx_bh); 173 } 174 } 175 } 176 } 177 178 static void virtio_net_set_link_status(NetClientState *nc) 179 { 180 VirtIONet *n = qemu_get_nic_opaque(nc); 181 VirtIODevice *vdev = VIRTIO_DEVICE(n); 182 uint16_t old_status = n->status; 183 184 if (nc->link_down) 185 n->status &= ~VIRTIO_NET_S_LINK_UP; 186 else 187 n->status |= VIRTIO_NET_S_LINK_UP; 188 189 if (n->status != old_status) 190 virtio_notify_config(vdev); 191 192 virtio_net_set_status(vdev, vdev->status); 193 } 194 195 static void virtio_net_reset(VirtIODevice *vdev) 196 { 197 VirtIONet *n = VIRTIO_NET(vdev); 198 199 /* Reset back to compatibility mode */ 200 n->promisc = 1; 201 n->allmulti = 0; 202 n->alluni = 0; 203 n->nomulti = 0; 204 n->nouni = 0; 205 n->nobcast = 0; 206 /* multiqueue is disabled by default */ 207 n->curr_queues = 1; 208 209 /* Flush any MAC and VLAN filter table state */ 210 n->mac_table.in_use = 0; 211 n->mac_table.first_multi = 0; 212 n->mac_table.multi_overflow = 0; 213 n->mac_table.uni_overflow = 0; 214 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 215 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); 216 memset(n->vlans, 0, MAX_VLAN >> 3); 217 } 218 219 static void peer_test_vnet_hdr(VirtIONet *n) 220 { 221 NetClientState *nc = qemu_get_queue(n->nic); 222 if (!nc->peer) { 223 return; 224 } 225 226 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 227 return; 228 } 229 230 n->has_vnet_hdr = tap_has_vnet_hdr(nc->peer); 231 } 232 233 static int peer_has_vnet_hdr(VirtIONet *n) 234 { 235 return n->has_vnet_hdr; 236 } 237 238 static int peer_has_ufo(VirtIONet *n) 239 { 240 if (!peer_has_vnet_hdr(n)) 241 return 0; 242 243 n->has_ufo = tap_has_ufo(qemu_get_queue(n->nic)->peer); 244 245 return n->has_ufo; 246 } 247 248 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs) 249 { 250 int i; 251 NetClientState *nc; 252 253 n->mergeable_rx_bufs = mergeable_rx_bufs; 254 255 n->guest_hdr_len = n->mergeable_rx_bufs ? 256 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); 257 258 for (i = 0; i < n->max_queues; i++) { 259 nc = qemu_get_subqueue(n->nic, i); 260 261 if (peer_has_vnet_hdr(n) && 262 tap_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { 263 tap_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); 264 n->host_hdr_len = n->guest_hdr_len; 265 } 266 } 267 } 268 269 static int peer_attach(VirtIONet *n, int index) 270 { 271 NetClientState *nc = qemu_get_subqueue(n->nic, index); 272 273 if (!nc->peer) { 274 return 0; 275 } 276 277 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 278 return 0; 279 } 280 281 return tap_enable(nc->peer); 282 } 283 284 static int peer_detach(VirtIONet *n, int index) 285 { 286 NetClientState *nc = qemu_get_subqueue(n->nic, index); 287 288 if (!nc->peer) { 289 return 0; 290 } 291 292 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 293 return 0; 294 } 295 296 return tap_disable(nc->peer); 297 } 298 299 static void virtio_net_set_queues(VirtIONet *n) 300 { 301 int i; 302 303 for (i = 0; i < n->max_queues; i++) { 304 if (i < n->curr_queues) { 305 assert(!peer_attach(n, i)); 306 } else { 307 assert(!peer_detach(n, i)); 308 } 309 } 310 } 311 312 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); 313 314 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features) 315 { 316 VirtIONet *n = VIRTIO_NET(vdev); 317 NetClientState *nc = qemu_get_queue(n->nic); 318 319 features |= (1 << VIRTIO_NET_F_MAC); 320 321 if (!peer_has_vnet_hdr(n)) { 322 features &= ~(0x1 << VIRTIO_NET_F_CSUM); 323 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4); 324 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6); 325 features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN); 326 327 features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM); 328 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4); 329 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6); 330 features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN); 331 } 332 333 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { 334 features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO); 335 features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO); 336 } 337 338 if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 339 return features; 340 } 341 if (!tap_get_vhost_net(nc->peer)) { 342 return features; 343 } 344 return vhost_net_get_features(tap_get_vhost_net(nc->peer), features); 345 } 346 347 static uint32_t virtio_net_bad_features(VirtIODevice *vdev) 348 { 349 uint32_t features = 0; 350 351 /* Linux kernel 2.6.25. It understood MAC (as everyone must), 352 * but also these: */ 353 features |= (1 << VIRTIO_NET_F_MAC); 354 features |= (1 << VIRTIO_NET_F_CSUM); 355 features |= (1 << VIRTIO_NET_F_HOST_TSO4); 356 features |= (1 << VIRTIO_NET_F_HOST_TSO6); 357 features |= (1 << VIRTIO_NET_F_HOST_ECN); 358 359 return features; 360 } 361 362 static void virtio_net_apply_guest_offloads(VirtIONet *n) 363 { 364 tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer, 365 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), 366 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), 367 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), 368 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), 369 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); 370 } 371 372 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) 373 { 374 static const uint64_t guest_offloads_mask = 375 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | 376 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 377 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | 378 (1ULL << VIRTIO_NET_F_GUEST_ECN) | 379 (1ULL << VIRTIO_NET_F_GUEST_UFO); 380 381 return guest_offloads_mask & features; 382 } 383 384 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) 385 { 386 VirtIODevice *vdev = VIRTIO_DEVICE(n); 387 return virtio_net_guest_offloads_by_features(vdev->guest_features); 388 } 389 390 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features) 391 { 392 VirtIONet *n = VIRTIO_NET(vdev); 393 int i; 394 395 virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ))); 396 397 virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF))); 398 399 if (n->has_vnet_hdr) { 400 n->curr_guest_offloads = 401 virtio_net_guest_offloads_by_features(features); 402 virtio_net_apply_guest_offloads(n); 403 } 404 405 for (i = 0; i < n->max_queues; i++) { 406 NetClientState *nc = qemu_get_subqueue(n->nic, i); 407 408 if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { 409 continue; 410 } 411 if (!tap_get_vhost_net(nc->peer)) { 412 continue; 413 } 414 vhost_net_ack_features(tap_get_vhost_net(nc->peer), features); 415 } 416 } 417 418 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, 419 struct iovec *iov, unsigned int iov_cnt) 420 { 421 uint8_t on; 422 size_t s; 423 424 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); 425 if (s != sizeof(on)) { 426 return VIRTIO_NET_ERR; 427 } 428 429 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { 430 n->promisc = on; 431 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { 432 n->allmulti = on; 433 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { 434 n->alluni = on; 435 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { 436 n->nomulti = on; 437 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { 438 n->nouni = on; 439 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { 440 n->nobcast = on; 441 } else { 442 return VIRTIO_NET_ERR; 443 } 444 445 return VIRTIO_NET_OK; 446 } 447 448 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, 449 struct iovec *iov, unsigned int iov_cnt) 450 { 451 VirtIODevice *vdev = VIRTIO_DEVICE(n); 452 uint64_t offloads; 453 size_t s; 454 455 if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) { 456 return VIRTIO_NET_ERR; 457 } 458 459 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); 460 if (s != sizeof(offloads)) { 461 return VIRTIO_NET_ERR; 462 } 463 464 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { 465 uint64_t supported_offloads; 466 467 if (!n->has_vnet_hdr) { 468 return VIRTIO_NET_ERR; 469 } 470 471 supported_offloads = virtio_net_supported_guest_offloads(n); 472 if (offloads & ~supported_offloads) { 473 return VIRTIO_NET_ERR; 474 } 475 476 n->curr_guest_offloads = offloads; 477 virtio_net_apply_guest_offloads(n); 478 479 return VIRTIO_NET_OK; 480 } else { 481 return VIRTIO_NET_ERR; 482 } 483 } 484 485 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, 486 struct iovec *iov, unsigned int iov_cnt) 487 { 488 struct virtio_net_ctrl_mac mac_data; 489 size_t s; 490 491 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { 492 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { 493 return VIRTIO_NET_ERR; 494 } 495 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); 496 assert(s == sizeof(n->mac)); 497 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); 498 return VIRTIO_NET_OK; 499 } 500 501 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { 502 return VIRTIO_NET_ERR; 503 } 504 505 n->mac_table.in_use = 0; 506 n->mac_table.first_multi = 0; 507 n->mac_table.uni_overflow = 0; 508 n->mac_table.multi_overflow = 0; 509 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); 510 511 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 512 sizeof(mac_data.entries)); 513 mac_data.entries = ldl_p(&mac_data.entries); 514 if (s != sizeof(mac_data.entries)) { 515 return VIRTIO_NET_ERR; 516 } 517 iov_discard_front(&iov, &iov_cnt, s); 518 519 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { 520 return VIRTIO_NET_ERR; 521 } 522 523 if (mac_data.entries <= MAC_TABLE_ENTRIES) { 524 s = iov_to_buf(iov, iov_cnt, 0, n->mac_table.macs, 525 mac_data.entries * ETH_ALEN); 526 if (s != mac_data.entries * ETH_ALEN) { 527 return VIRTIO_NET_ERR; 528 } 529 n->mac_table.in_use += mac_data.entries; 530 } else { 531 n->mac_table.uni_overflow = 1; 532 } 533 534 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); 535 536 n->mac_table.first_multi = n->mac_table.in_use; 537 538 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, 539 sizeof(mac_data.entries)); 540 mac_data.entries = ldl_p(&mac_data.entries); 541 if (s != sizeof(mac_data.entries)) { 542 return VIRTIO_NET_ERR; 543 } 544 545 iov_discard_front(&iov, &iov_cnt, s); 546 547 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { 548 return VIRTIO_NET_ERR; 549 } 550 551 if (n->mac_table.in_use + mac_data.entries <= MAC_TABLE_ENTRIES) { 552 s = iov_to_buf(iov, iov_cnt, 0, n->mac_table.macs, 553 mac_data.entries * ETH_ALEN); 554 if (s != mac_data.entries * ETH_ALEN) { 555 return VIRTIO_NET_ERR; 556 } 557 n->mac_table.in_use += mac_data.entries; 558 } else { 559 n->mac_table.multi_overflow = 1; 560 } 561 562 return VIRTIO_NET_OK; 563 } 564 565 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, 566 struct iovec *iov, unsigned int iov_cnt) 567 { 568 uint16_t vid; 569 size_t s; 570 571 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); 572 vid = lduw_p(&vid); 573 if (s != sizeof(vid)) { 574 return VIRTIO_NET_ERR; 575 } 576 577 if (vid >= MAX_VLAN) 578 return VIRTIO_NET_ERR; 579 580 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) 581 n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); 582 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) 583 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); 584 else 585 return VIRTIO_NET_ERR; 586 587 return VIRTIO_NET_OK; 588 } 589 590 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, 591 struct iovec *iov, unsigned int iov_cnt) 592 { 593 VirtIODevice *vdev = VIRTIO_DEVICE(n); 594 struct virtio_net_ctrl_mq mq; 595 size_t s; 596 uint16_t queues; 597 598 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); 599 if (s != sizeof(mq)) { 600 return VIRTIO_NET_ERR; 601 } 602 603 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { 604 return VIRTIO_NET_ERR; 605 } 606 607 queues = lduw_p(&mq.virtqueue_pairs); 608 609 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 610 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 611 queues > n->max_queues || 612 !n->multiqueue) { 613 return VIRTIO_NET_ERR; 614 } 615 616 n->curr_queues = queues; 617 /* stop the backend before changing the number of queues to avoid handling a 618 * disabled queue */ 619 virtio_net_set_status(vdev, vdev->status); 620 virtio_net_set_queues(n); 621 622 return VIRTIO_NET_OK; 623 } 624 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 625 { 626 VirtIONet *n = VIRTIO_NET(vdev); 627 struct virtio_net_ctrl_hdr ctrl; 628 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 629 VirtQueueElement elem; 630 size_t s; 631 struct iovec *iov; 632 unsigned int iov_cnt; 633 634 while (virtqueue_pop(vq, &elem)) { 635 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) || 636 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) { 637 error_report("virtio-net ctrl missing headers"); 638 exit(1); 639 } 640 641 iov = elem.out_sg; 642 iov_cnt = elem.out_num; 643 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); 644 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); 645 if (s != sizeof(ctrl)) { 646 status = VIRTIO_NET_ERR; 647 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { 648 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); 649 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { 650 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); 651 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { 652 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); 653 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { 654 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); 655 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { 656 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); 657 } 658 659 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status)); 660 assert(s == sizeof(status)); 661 662 virtqueue_push(vq, &elem, sizeof(status)); 663 virtio_notify(vdev, vq); 664 } 665 } 666 667 /* RX */ 668 669 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) 670 { 671 VirtIONet *n = VIRTIO_NET(vdev); 672 int queue_index = vq2q(virtio_get_queue_index(vq)); 673 674 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); 675 } 676 677 static int virtio_net_can_receive(NetClientState *nc) 678 { 679 VirtIONet *n = qemu_get_nic_opaque(nc); 680 VirtIODevice *vdev = VIRTIO_DEVICE(n); 681 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 682 683 if (!vdev->vm_running) { 684 return 0; 685 } 686 687 if (nc->queue_index >= n->curr_queues) { 688 return 0; 689 } 690 691 if (!virtio_queue_ready(q->rx_vq) || 692 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 693 return 0; 694 } 695 696 return 1; 697 } 698 699 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) 700 { 701 VirtIONet *n = q->n; 702 if (virtio_queue_empty(q->rx_vq) || 703 (n->mergeable_rx_bufs && 704 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 705 virtio_queue_set_notification(q->rx_vq, 1); 706 707 /* To avoid a race condition where the guest has made some buffers 708 * available after the above check but before notification was 709 * enabled, check for available buffers again. 710 */ 711 if (virtio_queue_empty(q->rx_vq) || 712 (n->mergeable_rx_bufs && 713 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { 714 return 0; 715 } 716 } 717 718 virtio_queue_set_notification(q->rx_vq, 0); 719 return 1; 720 } 721 722 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so 723 * it never finds out that the packets don't have valid checksums. This 724 * causes dhclient to get upset. Fedora's carried a patch for ages to 725 * fix this with Xen but it hasn't appeared in an upstream release of 726 * dhclient yet. 727 * 728 * To avoid breaking existing guests, we catch udp packets and add 729 * checksums. This is terrible but it's better than hacking the guest 730 * kernels. 731 * 732 * N.B. if we introduce a zero-copy API, this operation is no longer free so 733 * we should provide a mechanism to disable it to avoid polluting the host 734 * cache. 735 */ 736 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, 737 uint8_t *buf, size_t size) 738 { 739 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ 740 (size > 27 && size < 1500) && /* normal sized MTU */ 741 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ 742 (buf[23] == 17) && /* ip.protocol == UDP */ 743 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ 744 net_checksum_calculate(buf, size); 745 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; 746 } 747 } 748 749 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, 750 const void *buf, size_t size) 751 { 752 if (n->has_vnet_hdr) { 753 /* FIXME this cast is evil */ 754 void *wbuf = (void *)buf; 755 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, 756 size - n->host_hdr_len); 757 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); 758 } else { 759 struct virtio_net_hdr hdr = { 760 .flags = 0, 761 .gso_type = VIRTIO_NET_HDR_GSO_NONE 762 }; 763 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); 764 } 765 } 766 767 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) 768 { 769 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 770 static const uint8_t vlan[] = {0x81, 0x00}; 771 uint8_t *ptr = (uint8_t *)buf; 772 int i; 773 774 if (n->promisc) 775 return 1; 776 777 ptr += n->host_hdr_len; 778 779 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { 780 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff; 781 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) 782 return 0; 783 } 784 785 if (ptr[0] & 1) { // multicast 786 if (!memcmp(ptr, bcast, sizeof(bcast))) { 787 return !n->nobcast; 788 } else if (n->nomulti) { 789 return 0; 790 } else if (n->allmulti || n->mac_table.multi_overflow) { 791 return 1; 792 } 793 794 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { 795 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 796 return 1; 797 } 798 } 799 } else { // unicast 800 if (n->nouni) { 801 return 0; 802 } else if (n->alluni || n->mac_table.uni_overflow) { 803 return 1; 804 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { 805 return 1; 806 } 807 808 for (i = 0; i < n->mac_table.first_multi; i++) { 809 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { 810 return 1; 811 } 812 } 813 } 814 815 return 0; 816 } 817 818 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size) 819 { 820 VirtIONet *n = qemu_get_nic_opaque(nc); 821 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 822 VirtIODevice *vdev = VIRTIO_DEVICE(n); 823 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; 824 struct virtio_net_hdr_mrg_rxbuf mhdr; 825 unsigned mhdr_cnt = 0; 826 size_t offset, i, guest_offset; 827 828 if (!virtio_net_can_receive(nc)) { 829 return -1; 830 } 831 832 /* hdr_len refers to the header we supply to the guest */ 833 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { 834 return 0; 835 } 836 837 if (!receive_filter(n, buf, size)) 838 return size; 839 840 offset = i = 0; 841 842 while (offset < size) { 843 VirtQueueElement elem; 844 int len, total; 845 const struct iovec *sg = elem.in_sg; 846 847 total = 0; 848 849 if (virtqueue_pop(q->rx_vq, &elem) == 0) { 850 if (i == 0) 851 return -1; 852 error_report("virtio-net unexpected empty queue: " 853 "i %zd mergeable %d offset %zd, size %zd, " 854 "guest hdr len %zd, host hdr len %zd guest features 0x%x", 855 i, n->mergeable_rx_bufs, offset, size, 856 n->guest_hdr_len, n->host_hdr_len, vdev->guest_features); 857 exit(1); 858 } 859 860 if (elem.in_num < 1) { 861 error_report("virtio-net receive queue contains no in buffers"); 862 exit(1); 863 } 864 865 if (i == 0) { 866 assert(offset == 0); 867 if (n->mergeable_rx_bufs) { 868 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), 869 sg, elem.in_num, 870 offsetof(typeof(mhdr), num_buffers), 871 sizeof(mhdr.num_buffers)); 872 } 873 874 receive_header(n, sg, elem.in_num, buf, size); 875 offset = n->host_hdr_len; 876 total += n->guest_hdr_len; 877 guest_offset = n->guest_hdr_len; 878 } else { 879 guest_offset = 0; 880 } 881 882 /* copy in packet. ugh */ 883 len = iov_from_buf(sg, elem.in_num, guest_offset, 884 buf + offset, size - offset); 885 total += len; 886 offset += len; 887 /* If buffers can't be merged, at this point we 888 * must have consumed the complete packet. 889 * Otherwise, drop it. */ 890 if (!n->mergeable_rx_bufs && offset < size) { 891 #if 0 892 error_report("virtio-net truncated non-mergeable packet: " 893 "i %zd mergeable %d offset %zd, size %zd, " 894 "guest hdr len %zd, host hdr len %zd", 895 i, n->mergeable_rx_bufs, 896 offset, size, n->guest_hdr_len, n->host_hdr_len); 897 #endif 898 return size; 899 } 900 901 /* signal other side */ 902 virtqueue_fill(q->rx_vq, &elem, total, i++); 903 } 904 905 if (mhdr_cnt) { 906 stw_p(&mhdr.num_buffers, i); 907 iov_from_buf(mhdr_sg, mhdr_cnt, 908 0, 909 &mhdr.num_buffers, sizeof mhdr.num_buffers); 910 } 911 912 virtqueue_flush(q->rx_vq, i); 913 virtio_notify(vdev, q->rx_vq); 914 915 return size; 916 } 917 918 static int32_t virtio_net_flush_tx(VirtIONetQueue *q); 919 920 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) 921 { 922 VirtIONet *n = qemu_get_nic_opaque(nc); 923 VirtIONetQueue *q = virtio_net_get_subqueue(nc); 924 VirtIODevice *vdev = VIRTIO_DEVICE(n); 925 926 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0); 927 virtio_notify(vdev, q->tx_vq); 928 929 q->async_tx.elem.out_num = q->async_tx.len = 0; 930 931 virtio_queue_set_notification(q->tx_vq, 1); 932 virtio_net_flush_tx(q); 933 } 934 935 /* TX */ 936 static int32_t virtio_net_flush_tx(VirtIONetQueue *q) 937 { 938 VirtIONet *n = q->n; 939 VirtIODevice *vdev = VIRTIO_DEVICE(n); 940 VirtQueueElement elem; 941 int32_t num_packets = 0; 942 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); 943 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 944 return num_packets; 945 } 946 947 assert(vdev->vm_running); 948 949 if (q->async_tx.elem.out_num) { 950 virtio_queue_set_notification(q->tx_vq, 0); 951 return num_packets; 952 } 953 954 while (virtqueue_pop(q->tx_vq, &elem)) { 955 ssize_t ret, len; 956 unsigned int out_num = elem.out_num; 957 struct iovec *out_sg = &elem.out_sg[0]; 958 struct iovec sg[VIRTQUEUE_MAX_SIZE]; 959 960 if (out_num < 1) { 961 error_report("virtio-net header not in first element"); 962 exit(1); 963 } 964 965 /* 966 * If host wants to see the guest header as is, we can 967 * pass it on unchanged. Otherwise, copy just the parts 968 * that host is interested in. 969 */ 970 assert(n->host_hdr_len <= n->guest_hdr_len); 971 if (n->host_hdr_len != n->guest_hdr_len) { 972 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), 973 out_sg, out_num, 974 0, n->host_hdr_len); 975 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, 976 out_sg, out_num, 977 n->guest_hdr_len, -1); 978 out_num = sg_num; 979 out_sg = sg; 980 } 981 982 len = n->guest_hdr_len; 983 984 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), 985 out_sg, out_num, virtio_net_tx_complete); 986 if (ret == 0) { 987 virtio_queue_set_notification(q->tx_vq, 0); 988 q->async_tx.elem = elem; 989 q->async_tx.len = len; 990 return -EBUSY; 991 } 992 993 len += ret; 994 995 virtqueue_push(q->tx_vq, &elem, 0); 996 virtio_notify(vdev, q->tx_vq); 997 998 if (++num_packets >= n->tx_burst) { 999 break; 1000 } 1001 } 1002 return num_packets; 1003 } 1004 1005 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) 1006 { 1007 VirtIONet *n = VIRTIO_NET(vdev); 1008 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1009 1010 /* This happens when device was stopped but VCPU wasn't. */ 1011 if (!vdev->vm_running) { 1012 q->tx_waiting = 1; 1013 return; 1014 } 1015 1016 if (q->tx_waiting) { 1017 virtio_queue_set_notification(vq, 1); 1018 qemu_del_timer(q->tx_timer); 1019 q->tx_waiting = 0; 1020 virtio_net_flush_tx(q); 1021 } else { 1022 qemu_mod_timer(q->tx_timer, 1023 qemu_get_clock_ns(vm_clock) + n->tx_timeout); 1024 q->tx_waiting = 1; 1025 virtio_queue_set_notification(vq, 0); 1026 } 1027 } 1028 1029 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) 1030 { 1031 VirtIONet *n = VIRTIO_NET(vdev); 1032 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; 1033 1034 if (unlikely(q->tx_waiting)) { 1035 return; 1036 } 1037 q->tx_waiting = 1; 1038 /* This happens when device was stopped but VCPU wasn't. */ 1039 if (!vdev->vm_running) { 1040 return; 1041 } 1042 virtio_queue_set_notification(vq, 0); 1043 qemu_bh_schedule(q->tx_bh); 1044 } 1045 1046 static void virtio_net_tx_timer(void *opaque) 1047 { 1048 VirtIONetQueue *q = opaque; 1049 VirtIONet *n = q->n; 1050 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1051 assert(vdev->vm_running); 1052 1053 q->tx_waiting = 0; 1054 1055 /* Just in case the driver is not ready on more */ 1056 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1057 return; 1058 } 1059 1060 virtio_queue_set_notification(q->tx_vq, 1); 1061 virtio_net_flush_tx(q); 1062 } 1063 1064 static void virtio_net_tx_bh(void *opaque) 1065 { 1066 VirtIONetQueue *q = opaque; 1067 VirtIONet *n = q->n; 1068 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1069 int32_t ret; 1070 1071 assert(vdev->vm_running); 1072 1073 q->tx_waiting = 0; 1074 1075 /* Just in case the driver is not ready on more */ 1076 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { 1077 return; 1078 } 1079 1080 ret = virtio_net_flush_tx(q); 1081 if (ret == -EBUSY) { 1082 return; /* Notification re-enable handled by tx_complete */ 1083 } 1084 1085 /* If we flush a full burst of packets, assume there are 1086 * more coming and immediately reschedule */ 1087 if (ret >= n->tx_burst) { 1088 qemu_bh_schedule(q->tx_bh); 1089 q->tx_waiting = 1; 1090 return; 1091 } 1092 1093 /* If less than a full burst, re-enable notification and flush 1094 * anything that may have come in while we weren't looking. If 1095 * we find something, assume the guest is still active and reschedule */ 1096 virtio_queue_set_notification(q->tx_vq, 1); 1097 if (virtio_net_flush_tx(q) > 0) { 1098 virtio_queue_set_notification(q->tx_vq, 0); 1099 qemu_bh_schedule(q->tx_bh); 1100 q->tx_waiting = 1; 1101 } 1102 } 1103 1104 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) 1105 { 1106 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1107 int i, max = multiqueue ? n->max_queues : 1; 1108 1109 n->multiqueue = multiqueue; 1110 1111 for (i = 2; i <= n->max_queues * 2 + 1; i++) { 1112 virtio_del_queue(vdev, i); 1113 } 1114 1115 for (i = 1; i < max; i++) { 1116 n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); 1117 if (n->vqs[i].tx_timer) { 1118 n->vqs[i].tx_vq = 1119 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); 1120 n->vqs[i].tx_timer = qemu_new_timer_ns(vm_clock, 1121 virtio_net_tx_timer, 1122 &n->vqs[i]); 1123 } else { 1124 n->vqs[i].tx_vq = 1125 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); 1126 n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]); 1127 } 1128 1129 n->vqs[i].tx_waiting = 0; 1130 n->vqs[i].n = n; 1131 } 1132 1133 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack 1134 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid 1135 * breaking them. 1136 */ 1137 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1138 1139 virtio_net_set_queues(n); 1140 } 1141 1142 static void virtio_net_save(QEMUFile *f, void *opaque) 1143 { 1144 int i; 1145 VirtIONet *n = opaque; 1146 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1147 1148 /* At this point, backend must be stopped, otherwise 1149 * it might keep writing to memory. */ 1150 assert(!n->vhost_started); 1151 virtio_save(vdev, f); 1152 1153 qemu_put_buffer(f, n->mac, ETH_ALEN); 1154 qemu_put_be32(f, n->vqs[0].tx_waiting); 1155 qemu_put_be32(f, n->mergeable_rx_bufs); 1156 qemu_put_be16(f, n->status); 1157 qemu_put_byte(f, n->promisc); 1158 qemu_put_byte(f, n->allmulti); 1159 qemu_put_be32(f, n->mac_table.in_use); 1160 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); 1161 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1162 qemu_put_be32(f, n->has_vnet_hdr); 1163 qemu_put_byte(f, n->mac_table.multi_overflow); 1164 qemu_put_byte(f, n->mac_table.uni_overflow); 1165 qemu_put_byte(f, n->alluni); 1166 qemu_put_byte(f, n->nomulti); 1167 qemu_put_byte(f, n->nouni); 1168 qemu_put_byte(f, n->nobcast); 1169 qemu_put_byte(f, n->has_ufo); 1170 if (n->max_queues > 1) { 1171 qemu_put_be16(f, n->max_queues); 1172 qemu_put_be16(f, n->curr_queues); 1173 for (i = 1; i < n->curr_queues; i++) { 1174 qemu_put_be32(f, n->vqs[i].tx_waiting); 1175 } 1176 } 1177 1178 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) { 1179 qemu_put_be64(f, n->curr_guest_offloads); 1180 } 1181 } 1182 1183 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) 1184 { 1185 VirtIONet *n = opaque; 1186 VirtIODevice *vdev = VIRTIO_DEVICE(n); 1187 int ret, i, link_down; 1188 1189 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) 1190 return -EINVAL; 1191 1192 ret = virtio_load(vdev, f); 1193 if (ret) { 1194 return ret; 1195 } 1196 1197 qemu_get_buffer(f, n->mac, ETH_ALEN); 1198 n->vqs[0].tx_waiting = qemu_get_be32(f); 1199 1200 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f)); 1201 1202 if (version_id >= 3) 1203 n->status = qemu_get_be16(f); 1204 1205 if (version_id >= 4) { 1206 if (version_id < 8) { 1207 n->promisc = qemu_get_be32(f); 1208 n->allmulti = qemu_get_be32(f); 1209 } else { 1210 n->promisc = qemu_get_byte(f); 1211 n->allmulti = qemu_get_byte(f); 1212 } 1213 } 1214 1215 if (version_id >= 5) { 1216 n->mac_table.in_use = qemu_get_be32(f); 1217 /* MAC_TABLE_ENTRIES may be different from the saved image */ 1218 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) { 1219 qemu_get_buffer(f, n->mac_table.macs, 1220 n->mac_table.in_use * ETH_ALEN); 1221 } else if (n->mac_table.in_use) { 1222 uint8_t *buf = g_malloc0(n->mac_table.in_use); 1223 qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN); 1224 g_free(buf); 1225 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1; 1226 n->mac_table.in_use = 0; 1227 } 1228 } 1229 1230 if (version_id >= 6) 1231 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); 1232 1233 if (version_id >= 7) { 1234 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) { 1235 error_report("virtio-net: saved image requires vnet_hdr=on"); 1236 return -1; 1237 } 1238 } 1239 1240 if (version_id >= 9) { 1241 n->mac_table.multi_overflow = qemu_get_byte(f); 1242 n->mac_table.uni_overflow = qemu_get_byte(f); 1243 } 1244 1245 if (version_id >= 10) { 1246 n->alluni = qemu_get_byte(f); 1247 n->nomulti = qemu_get_byte(f); 1248 n->nouni = qemu_get_byte(f); 1249 n->nobcast = qemu_get_byte(f); 1250 } 1251 1252 if (version_id >= 11) { 1253 if (qemu_get_byte(f) && !peer_has_ufo(n)) { 1254 error_report("virtio-net: saved image requires TUN_F_UFO support"); 1255 return -1; 1256 } 1257 } 1258 1259 if (n->max_queues > 1) { 1260 if (n->max_queues != qemu_get_be16(f)) { 1261 error_report("virtio-net: different max_queues "); 1262 return -1; 1263 } 1264 1265 n->curr_queues = qemu_get_be16(f); 1266 for (i = 1; i < n->curr_queues; i++) { 1267 n->vqs[i].tx_waiting = qemu_get_be32(f); 1268 } 1269 } 1270 1271 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) { 1272 n->curr_guest_offloads = qemu_get_be64(f); 1273 } else { 1274 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); 1275 } 1276 1277 if (peer_has_vnet_hdr(n)) { 1278 virtio_net_apply_guest_offloads(n); 1279 } 1280 1281 virtio_net_set_queues(n); 1282 1283 /* Find the first multicast entry in the saved MAC filter */ 1284 for (i = 0; i < n->mac_table.in_use; i++) { 1285 if (n->mac_table.macs[i * ETH_ALEN] & 1) { 1286 break; 1287 } 1288 } 1289 n->mac_table.first_multi = i; 1290 1291 /* nc.link_down can't be migrated, so infer link_down according 1292 * to link status bit in n->status */ 1293 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; 1294 for (i = 0; i < n->max_queues; i++) { 1295 qemu_get_subqueue(n->nic, i)->link_down = link_down; 1296 } 1297 1298 return 0; 1299 } 1300 1301 static void virtio_net_cleanup(NetClientState *nc) 1302 { 1303 VirtIONet *n = qemu_get_nic_opaque(nc); 1304 1305 n->nic = NULL; 1306 } 1307 1308 static NetClientInfo net_virtio_info = { 1309 .type = NET_CLIENT_OPTIONS_KIND_NIC, 1310 .size = sizeof(NICState), 1311 .can_receive = virtio_net_can_receive, 1312 .receive = virtio_net_receive, 1313 .cleanup = virtio_net_cleanup, 1314 .link_status_changed = virtio_net_set_link_status, 1315 }; 1316 1317 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) 1318 { 1319 VirtIONet *n = VIRTIO_NET(vdev); 1320 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1321 assert(n->vhost_started); 1322 return vhost_net_virtqueue_pending(tap_get_vhost_net(nc->peer), idx); 1323 } 1324 1325 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, 1326 bool mask) 1327 { 1328 VirtIONet *n = VIRTIO_NET(vdev); 1329 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); 1330 assert(n->vhost_started); 1331 vhost_net_virtqueue_mask(tap_get_vhost_net(nc->peer), 1332 vdev, idx, mask); 1333 } 1334 1335 void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features) 1336 { 1337 int i, config_size = 0; 1338 host_features |= (1 << VIRTIO_NET_F_MAC); 1339 for (i = 0; feature_sizes[i].flags != 0; i++) { 1340 if (host_features & feature_sizes[i].flags) { 1341 config_size = MAX(feature_sizes[i].end, config_size); 1342 } 1343 } 1344 n->config_size = config_size; 1345 } 1346 1347 void virtio_net_set_netclient_name(VirtIONet *n, const char *name, 1348 const char *type) 1349 { 1350 /* 1351 * The name can be NULL, the netclient name will be type.x. 1352 */ 1353 assert(type != NULL); 1354 1355 if (n->netclient_name) { 1356 g_free(n->netclient_name); 1357 n->netclient_name = NULL; 1358 } 1359 if (n->netclient_type) { 1360 g_free(n->netclient_type); 1361 n->netclient_type = NULL; 1362 } 1363 1364 if (name != NULL) { 1365 n->netclient_name = g_strdup(name); 1366 } 1367 n->netclient_type = g_strdup(type); 1368 } 1369 1370 static int virtio_net_device_init(VirtIODevice *vdev) 1371 { 1372 int i; 1373 1374 DeviceState *qdev = DEVICE(vdev); 1375 VirtIONet *n = VIRTIO_NET(vdev); 1376 1377 virtio_init(VIRTIO_DEVICE(n), "virtio-net", VIRTIO_ID_NET, 1378 n->config_size); 1379 1380 n->max_queues = MAX(n->nic_conf.queues, 1); 1381 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); 1382 n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); 1383 n->curr_queues = 1; 1384 n->vqs[0].n = n; 1385 n->tx_timeout = n->net_conf.txtimer; 1386 1387 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") 1388 && strcmp(n->net_conf.tx, "bh")) { 1389 error_report("virtio-net: " 1390 "Unknown option tx=%s, valid options: \"timer\" \"bh\"", 1391 n->net_conf.tx); 1392 error_report("Defaulting to \"bh\""); 1393 } 1394 1395 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { 1396 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256, 1397 virtio_net_handle_tx_timer); 1398 n->vqs[0].tx_timer = qemu_new_timer_ns(vm_clock, virtio_net_tx_timer, 1399 &n->vqs[0]); 1400 } else { 1401 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256, 1402 virtio_net_handle_tx_bh); 1403 n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]); 1404 } 1405 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); 1406 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); 1407 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); 1408 n->status = VIRTIO_NET_S_LINK_UP; 1409 1410 if (n->netclient_type) { 1411 /* 1412 * Happen when virtio_net_set_netclient_name has been called. 1413 */ 1414 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1415 n->netclient_type, n->netclient_name, n); 1416 } else { 1417 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, 1418 object_get_typename(OBJECT(qdev)), qdev->id, n); 1419 } 1420 1421 peer_test_vnet_hdr(n); 1422 if (peer_has_vnet_hdr(n)) { 1423 for (i = 0; i < n->max_queues; i++) { 1424 tap_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); 1425 } 1426 n->host_hdr_len = sizeof(struct virtio_net_hdr); 1427 } else { 1428 n->host_hdr_len = 0; 1429 } 1430 1431 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); 1432 1433 n->vqs[0].tx_waiting = 0; 1434 n->tx_burst = n->net_conf.txburst; 1435 virtio_net_set_mrg_rx_bufs(n, 0); 1436 n->promisc = 1; /* for compatibility */ 1437 1438 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); 1439 1440 n->vlans = g_malloc0(MAX_VLAN >> 3); 1441 1442 n->qdev = qdev; 1443 register_savevm(qdev, "virtio-net", -1, VIRTIO_NET_VM_VERSION, 1444 virtio_net_save, virtio_net_load, n); 1445 1446 add_boot_device_path(n->nic_conf.bootindex, qdev, "/ethernet-phy@0"); 1447 return 0; 1448 } 1449 1450 static int virtio_net_device_exit(DeviceState *qdev) 1451 { 1452 VirtIONet *n = VIRTIO_NET(qdev); 1453 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1454 int i; 1455 1456 /* This will stop vhost backend if appropriate. */ 1457 virtio_net_set_status(vdev, 0); 1458 1459 unregister_savevm(qdev, "virtio-net", n); 1460 1461 if (n->netclient_name) { 1462 g_free(n->netclient_name); 1463 n->netclient_name = NULL; 1464 } 1465 if (n->netclient_type) { 1466 g_free(n->netclient_type); 1467 n->netclient_type = NULL; 1468 } 1469 1470 g_free(n->mac_table.macs); 1471 g_free(n->vlans); 1472 1473 for (i = 0; i < n->max_queues; i++) { 1474 VirtIONetQueue *q = &n->vqs[i]; 1475 NetClientState *nc = qemu_get_subqueue(n->nic, i); 1476 1477 qemu_purge_queued_packets(nc); 1478 1479 if (q->tx_timer) { 1480 qemu_del_timer(q->tx_timer); 1481 qemu_free_timer(q->tx_timer); 1482 } else { 1483 qemu_bh_delete(q->tx_bh); 1484 } 1485 } 1486 1487 g_free(n->vqs); 1488 qemu_del_nic(n->nic); 1489 virtio_cleanup(vdev); 1490 1491 return 0; 1492 } 1493 1494 static void virtio_net_instance_init(Object *obj) 1495 { 1496 VirtIONet *n = VIRTIO_NET(obj); 1497 1498 /* 1499 * The default config_size is sizeof(struct virtio_net_config). 1500 * Can be overriden with virtio_net_set_config_size. 1501 */ 1502 n->config_size = sizeof(struct virtio_net_config); 1503 } 1504 1505 static Property virtio_net_properties[] = { 1506 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), 1507 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, 1508 TX_TIMER_INTERVAL), 1509 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), 1510 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), 1511 DEFINE_PROP_END_OF_LIST(), 1512 }; 1513 1514 static void virtio_net_class_init(ObjectClass *klass, void *data) 1515 { 1516 DeviceClass *dc = DEVICE_CLASS(klass); 1517 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1518 dc->exit = virtio_net_device_exit; 1519 dc->props = virtio_net_properties; 1520 vdc->init = virtio_net_device_init; 1521 vdc->get_config = virtio_net_get_config; 1522 vdc->set_config = virtio_net_set_config; 1523 vdc->get_features = virtio_net_get_features; 1524 vdc->set_features = virtio_net_set_features; 1525 vdc->bad_features = virtio_net_bad_features; 1526 vdc->reset = virtio_net_reset; 1527 vdc->set_status = virtio_net_set_status; 1528 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; 1529 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; 1530 } 1531 1532 static const TypeInfo virtio_net_info = { 1533 .name = TYPE_VIRTIO_NET, 1534 .parent = TYPE_VIRTIO_DEVICE, 1535 .instance_size = sizeof(VirtIONet), 1536 .instance_init = virtio_net_instance_init, 1537 .class_init = virtio_net_class_init, 1538 }; 1539 1540 static void virtio_register_types(void) 1541 { 1542 type_register_static(&virtio_net_info); 1543 } 1544 1545 type_init(virtio_register_types) 1546