1 /* A network driver using virtio. 2 * 3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 //#define DEBUG 19 #include <linux/netdevice.h> 20 #include <linux/etherdevice.h> 21 #include <linux/ethtool.h> 22 #include <linux/module.h> 23 #include <linux/virtio.h> 24 #include <linux/virtio_net.h> 25 #include <linux/bpf.h> 26 #include <linux/bpf_trace.h> 27 #include <linux/scatterlist.h> 28 #include <linux/if_vlan.h> 29 #include <linux/slab.h> 30 #include <linux/cpu.h> 31 #include <linux/average.h> 32 #include <linux/filter.h> 33 #include <net/route.h> 34 #include <net/xdp.h> 35 36 static int napi_weight = NAPI_POLL_WEIGHT; 37 module_param(napi_weight, int, 0444); 38 39 static bool csum = true, gso = true, napi_tx; 40 module_param(csum, bool, 0444); 41 module_param(gso, bool, 0444); 42 module_param(napi_tx, bool, 0644); 43 44 /* FIXME: MTU in config. */ 45 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 46 #define GOOD_COPY_LEN 128 47 48 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 49 50 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ 51 #define VIRTIO_XDP_HEADROOM 256 52 53 /* RX packet size EWMA. The average packet size is used to determine the packet 54 * buffer size when refilling RX rings. As the entire RX ring may be refilled 55 * at once, the weight is chosen so that the EWMA will be insensitive to short- 56 * term, transient changes in packet size. 57 */ 58 DECLARE_EWMA(pkt_len, 0, 64) 59 60 #define VIRTNET_DRIVER_VERSION "1.0.0" 61 62 static const unsigned long guest_offloads[] = { 63 VIRTIO_NET_F_GUEST_TSO4, 64 VIRTIO_NET_F_GUEST_TSO6, 65 VIRTIO_NET_F_GUEST_ECN, 66 VIRTIO_NET_F_GUEST_UFO 67 }; 68 69 struct virtnet_stats { 70 struct u64_stats_sync tx_syncp; 71 struct u64_stats_sync rx_syncp; 72 u64 tx_bytes; 73 u64 tx_packets; 74 75 u64 rx_bytes; 76 u64 rx_packets; 77 }; 78 79 /* Internal representation of a send virtqueue */ 80 struct send_queue { 81 /* Virtqueue associated with this send _queue */ 82 struct virtqueue *vq; 83 84 /* TX: fragments + linear part + virtio header */ 85 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 86 87 /* Name of the send queue: output.$index */ 88 char name[40]; 89 90 struct napi_struct napi; 91 }; 92 93 /* Internal representation of a receive virtqueue */ 94 struct receive_queue { 95 /* Virtqueue associated with this receive_queue */ 96 struct virtqueue *vq; 97 98 struct napi_struct napi; 99 100 struct bpf_prog __rcu *xdp_prog; 101 102 /* Chain pages by the private ptr. */ 103 struct page *pages; 104 105 /* Average packet length for mergeable receive buffers. */ 106 struct ewma_pkt_len mrg_avg_pkt_len; 107 108 /* Page frag for packet buffer allocation. */ 109 struct page_frag alloc_frag; 110 111 /* RX: fragments + linear part + virtio header */ 112 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 113 114 /* Min single buffer size for mergeable buffers case. */ 115 unsigned int min_buf_len; 116 117 /* Name of this receive queue: input.$index */ 118 char name[40]; 119 120 struct xdp_rxq_info xdp_rxq; 121 }; 122 123 struct virtnet_info { 124 struct virtio_device *vdev; 125 struct virtqueue *cvq; 126 struct net_device *dev; 127 struct send_queue *sq; 128 struct receive_queue *rq; 129 unsigned int status; 130 131 /* Max # of queue pairs supported by the device */ 132 u16 max_queue_pairs; 133 134 /* # of queue pairs currently used by the driver */ 135 u16 curr_queue_pairs; 136 137 /* # of XDP queue pairs currently used by the driver */ 138 u16 xdp_queue_pairs; 139 140 /* I like... big packets and I cannot lie! */ 141 bool big_packets; 142 143 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 144 bool mergeable_rx_bufs; 145 146 /* Has control virtqueue */ 147 bool has_cvq; 148 149 /* Host can handle any s/g split between our header and packet data */ 150 bool any_header_sg; 151 152 /* Packet virtio header size */ 153 u8 hdr_len; 154 155 /* Active statistics */ 156 struct virtnet_stats __percpu *stats; 157 158 /* Work struct for refilling if we run low on memory. */ 159 struct delayed_work refill; 160 161 /* Work struct for config space updates */ 162 struct work_struct config_work; 163 164 /* Does the affinity hint is set for virtqueues? */ 165 bool affinity_hint_set; 166 167 /* CPU hotplug instances for online & dead */ 168 struct hlist_node node; 169 struct hlist_node node_dead; 170 171 /* Control VQ buffers: protected by the rtnl lock */ 172 struct virtio_net_ctrl_hdr ctrl_hdr; 173 virtio_net_ctrl_ack ctrl_status; 174 struct virtio_net_ctrl_mq ctrl_mq; 175 u8 ctrl_promisc; 176 u8 ctrl_allmulti; 177 u16 ctrl_vid; 178 u64 ctrl_offloads; 179 180 /* Ethtool settings */ 181 u8 duplex; 182 u32 speed; 183 184 unsigned long guest_offloads; 185 }; 186 187 struct padded_vnet_hdr { 188 struct virtio_net_hdr_mrg_rxbuf hdr; 189 /* 190 * hdr is in a separate sg buffer, and data sg buffer shares same page 191 * with this header sg. This padding makes next sg 16 byte aligned 192 * after the header. 193 */ 194 char padding[4]; 195 }; 196 197 /* Converting between virtqueue no. and kernel tx/rx queue no. 198 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 199 */ 200 static int vq2txq(struct virtqueue *vq) 201 { 202 return (vq->index - 1) / 2; 203 } 204 205 static int txq2vq(int txq) 206 { 207 return txq * 2 + 1; 208 } 209 210 static int vq2rxq(struct virtqueue *vq) 211 { 212 return vq->index / 2; 213 } 214 215 static int rxq2vq(int rxq) 216 { 217 return rxq * 2; 218 } 219 220 static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) 221 { 222 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; 223 } 224 225 /* 226 * private is used to chain pages for big packets, put the whole 227 * most recent used list in the beginning for reuse 228 */ 229 static void give_pages(struct receive_queue *rq, struct page *page) 230 { 231 struct page *end; 232 233 /* Find end of list, sew whole thing into vi->rq.pages. */ 234 for (end = page; end->private; end = (struct page *)end->private); 235 end->private = (unsigned long)rq->pages; 236 rq->pages = page; 237 } 238 239 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 240 { 241 struct page *p = rq->pages; 242 243 if (p) { 244 rq->pages = (struct page *)p->private; 245 /* clear private here, it is used to chain pages */ 246 p->private = 0; 247 } else 248 p = alloc_page(gfp_mask); 249 return p; 250 } 251 252 static void virtqueue_napi_schedule(struct napi_struct *napi, 253 struct virtqueue *vq) 254 { 255 if (napi_schedule_prep(napi)) { 256 virtqueue_disable_cb(vq); 257 __napi_schedule(napi); 258 } 259 } 260 261 static void virtqueue_napi_complete(struct napi_struct *napi, 262 struct virtqueue *vq, int processed) 263 { 264 int opaque; 265 266 opaque = virtqueue_enable_cb_prepare(vq); 267 if (napi_complete_done(napi, processed)) { 268 if (unlikely(virtqueue_poll(vq, opaque))) 269 virtqueue_napi_schedule(napi, vq); 270 } else { 271 virtqueue_disable_cb(vq); 272 } 273 } 274 275 static void skb_xmit_done(struct virtqueue *vq) 276 { 277 struct virtnet_info *vi = vq->vdev->priv; 278 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; 279 280 /* Suppress further interrupts. */ 281 virtqueue_disable_cb(vq); 282 283 if (napi->weight) 284 virtqueue_napi_schedule(napi, vq); 285 else 286 /* We were probably waiting for more output buffers. */ 287 netif_wake_subqueue(vi->dev, vq2txq(vq)); 288 } 289 290 #define MRG_CTX_HEADER_SHIFT 22 291 static void *mergeable_len_to_ctx(unsigned int truesize, 292 unsigned int headroom) 293 { 294 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); 295 } 296 297 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) 298 { 299 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; 300 } 301 302 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) 303 { 304 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); 305 } 306 307 /* Called from bottom half context */ 308 static struct sk_buff *page_to_skb(struct virtnet_info *vi, 309 struct receive_queue *rq, 310 struct page *page, unsigned int offset, 311 unsigned int len, unsigned int truesize) 312 { 313 struct sk_buff *skb; 314 struct virtio_net_hdr_mrg_rxbuf *hdr; 315 unsigned int copy, hdr_len, hdr_padded_len; 316 char *p; 317 318 p = page_address(page) + offset; 319 320 /* copy small packet so we can reuse these pages for small data */ 321 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 322 if (unlikely(!skb)) 323 return NULL; 324 325 hdr = skb_vnet_hdr(skb); 326 327 hdr_len = vi->hdr_len; 328 if (vi->mergeable_rx_bufs) 329 hdr_padded_len = sizeof(*hdr); 330 else 331 hdr_padded_len = sizeof(struct padded_vnet_hdr); 332 333 memcpy(hdr, p, hdr_len); 334 335 len -= hdr_len; 336 offset += hdr_padded_len; 337 p += hdr_padded_len; 338 339 copy = len; 340 if (copy > skb_tailroom(skb)) 341 copy = skb_tailroom(skb); 342 skb_put_data(skb, p, copy); 343 344 len -= copy; 345 offset += copy; 346 347 if (vi->mergeable_rx_bufs) { 348 if (len) 349 skb_add_rx_frag(skb, 0, page, offset, len, truesize); 350 else 351 put_page(page); 352 return skb; 353 } 354 355 /* 356 * Verify that we can indeed put this data into a skb. 357 * This is here to handle cases when the device erroneously 358 * tries to receive more than is possible. This is usually 359 * the case of a broken device. 360 */ 361 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 362 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 363 dev_kfree_skb(skb); 364 return NULL; 365 } 366 BUG_ON(offset >= PAGE_SIZE); 367 while (len) { 368 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 369 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 370 frag_size, truesize); 371 len -= frag_size; 372 page = (struct page *)page->private; 373 offset = 0; 374 } 375 376 if (page) 377 give_pages(rq, page); 378 379 return skb; 380 } 381 382 static void virtnet_xdp_flush(struct net_device *dev) 383 { 384 struct virtnet_info *vi = netdev_priv(dev); 385 struct send_queue *sq; 386 unsigned int qp; 387 388 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); 389 sq = &vi->sq[qp]; 390 391 virtqueue_kick(sq->vq); 392 } 393 394 static bool __virtnet_xdp_xmit(struct virtnet_info *vi, 395 struct xdp_buff *xdp) 396 { 397 struct virtio_net_hdr_mrg_rxbuf *hdr; 398 unsigned int len; 399 struct send_queue *sq; 400 unsigned int qp; 401 void *xdp_sent; 402 int err; 403 404 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); 405 sq = &vi->sq[qp]; 406 407 /* Free up any pending old buffers before queueing new ones. */ 408 while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { 409 struct page *sent_page = virt_to_head_page(xdp_sent); 410 411 put_page(sent_page); 412 } 413 414 xdp->data -= vi->hdr_len; 415 /* Zero header and leave csum up to XDP layers */ 416 hdr = xdp->data; 417 memset(hdr, 0, vi->hdr_len); 418 419 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); 420 421 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); 422 if (unlikely(err)) { 423 struct page *page = virt_to_head_page(xdp->data); 424 425 put_page(page); 426 return false; 427 } 428 429 return true; 430 } 431 432 static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) 433 { 434 struct virtnet_info *vi = netdev_priv(dev); 435 bool sent = __virtnet_xdp_xmit(vi, xdp); 436 437 if (!sent) 438 return -ENOSPC; 439 return 0; 440 } 441 442 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) 443 { 444 return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; 445 } 446 447 /* We copy the packet for XDP in the following cases: 448 * 449 * 1) Packet is scattered across multiple rx buffers. 450 * 2) Headroom space is insufficient. 451 * 452 * This is inefficient but it's a temporary condition that 453 * we hit right after XDP is enabled and until queue is refilled 454 * with large buffers with sufficient headroom - so it should affect 455 * at most queue size packets. 456 * Afterwards, the conditions to enable 457 * XDP should preclude the underlying device from sending packets 458 * across multiple buffers (num_buf > 1), and we make sure buffers 459 * have enough headroom. 460 */ 461 static struct page *xdp_linearize_page(struct receive_queue *rq, 462 u16 *num_buf, 463 struct page *p, 464 int offset, 465 int page_off, 466 unsigned int *len) 467 { 468 struct page *page = alloc_page(GFP_ATOMIC); 469 470 if (!page) 471 return NULL; 472 473 memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 474 page_off += *len; 475 476 while (--*num_buf) { 477 unsigned int buflen; 478 void *buf; 479 int off; 480 481 buf = virtqueue_get_buf(rq->vq, &buflen); 482 if (unlikely(!buf)) 483 goto err_buf; 484 485 p = virt_to_head_page(buf); 486 off = buf - page_address(p); 487 488 /* guard against a misconfigured or uncooperative backend that 489 * is sending packet larger than the MTU. 490 */ 491 if ((page_off + buflen) > PAGE_SIZE) { 492 put_page(p); 493 goto err_buf; 494 } 495 496 memcpy(page_address(page) + page_off, 497 page_address(p) + off, buflen); 498 page_off += buflen; 499 put_page(p); 500 } 501 502 /* Headroom does not contribute to packet length */ 503 *len = page_off - VIRTIO_XDP_HEADROOM; 504 return page; 505 err_buf: 506 __free_pages(page, 0); 507 return NULL; 508 } 509 510 static struct sk_buff *receive_small(struct net_device *dev, 511 struct virtnet_info *vi, 512 struct receive_queue *rq, 513 void *buf, void *ctx, 514 unsigned int len, 515 bool *xdp_xmit) 516 { 517 struct sk_buff *skb; 518 struct bpf_prog *xdp_prog; 519 unsigned int xdp_headroom = (unsigned long)ctx; 520 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; 521 unsigned int headroom = vi->hdr_len + header_offset; 522 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 523 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 524 struct page *page = virt_to_head_page(buf); 525 unsigned int delta = 0, err; 526 struct page *xdp_page; 527 len -= vi->hdr_len; 528 529 rcu_read_lock(); 530 xdp_prog = rcu_dereference(rq->xdp_prog); 531 if (xdp_prog) { 532 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; 533 struct xdp_buff xdp; 534 void *orig_data; 535 u32 act; 536 537 if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) 538 goto err_xdp; 539 540 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { 541 int offset = buf - page_address(page) + header_offset; 542 unsigned int tlen = len + vi->hdr_len; 543 u16 num_buf = 1; 544 545 xdp_headroom = virtnet_get_headroom(vi); 546 header_offset = VIRTNET_RX_PAD + xdp_headroom; 547 headroom = vi->hdr_len + header_offset; 548 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 549 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 550 xdp_page = xdp_linearize_page(rq, &num_buf, page, 551 offset, header_offset, 552 &tlen); 553 if (!xdp_page) 554 goto err_xdp; 555 556 buf = page_address(xdp_page); 557 put_page(page); 558 page = xdp_page; 559 } 560 561 xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; 562 xdp.data = xdp.data_hard_start + xdp_headroom; 563 xdp_set_data_meta_invalid(&xdp); 564 xdp.data_end = xdp.data + len; 565 xdp.rxq = &rq->xdp_rxq; 566 orig_data = xdp.data; 567 act = bpf_prog_run_xdp(xdp_prog, &xdp); 568 569 switch (act) { 570 case XDP_PASS: 571 /* Recalculate length in case bpf program changed it */ 572 delta = orig_data - xdp.data; 573 break; 574 case XDP_TX: 575 if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) 576 trace_xdp_exception(vi->dev, xdp_prog, act); 577 else 578 *xdp_xmit = true; 579 rcu_read_unlock(); 580 goto xdp_xmit; 581 case XDP_REDIRECT: 582 err = xdp_do_redirect(dev, &xdp, xdp_prog); 583 if (!err) 584 *xdp_xmit = true; 585 rcu_read_unlock(); 586 goto xdp_xmit; 587 default: 588 bpf_warn_invalid_xdp_action(act); 589 case XDP_ABORTED: 590 trace_xdp_exception(vi->dev, xdp_prog, act); 591 case XDP_DROP: 592 goto err_xdp; 593 } 594 } 595 rcu_read_unlock(); 596 597 skb = build_skb(buf, buflen); 598 if (!skb) { 599 put_page(page); 600 goto err; 601 } 602 skb_reserve(skb, headroom - delta); 603 skb_put(skb, len + delta); 604 if (!delta) { 605 buf += header_offset; 606 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); 607 } /* keep zeroed vnet hdr since packet was changed by bpf */ 608 609 err: 610 return skb; 611 612 err_xdp: 613 rcu_read_unlock(); 614 dev->stats.rx_dropped++; 615 put_page(page); 616 xdp_xmit: 617 return NULL; 618 } 619 620 static struct sk_buff *receive_big(struct net_device *dev, 621 struct virtnet_info *vi, 622 struct receive_queue *rq, 623 void *buf, 624 unsigned int len) 625 { 626 struct page *page = buf; 627 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); 628 629 if (unlikely(!skb)) 630 goto err; 631 632 return skb; 633 634 err: 635 dev->stats.rx_dropped++; 636 give_pages(rq, page); 637 return NULL; 638 } 639 640 static struct sk_buff *receive_mergeable(struct net_device *dev, 641 struct virtnet_info *vi, 642 struct receive_queue *rq, 643 void *buf, 644 void *ctx, 645 unsigned int len, 646 bool *xdp_xmit) 647 { 648 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 649 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 650 struct page *page = virt_to_head_page(buf); 651 int offset = buf - page_address(page); 652 struct sk_buff *head_skb, *curr_skb; 653 struct bpf_prog *xdp_prog; 654 unsigned int truesize; 655 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 656 int err; 657 658 head_skb = NULL; 659 660 rcu_read_lock(); 661 xdp_prog = rcu_dereference(rq->xdp_prog); 662 if (xdp_prog) { 663 struct page *xdp_page; 664 struct xdp_buff xdp; 665 void *data; 666 u32 act; 667 668 /* This happens when rx buffer size is underestimated */ 669 if (unlikely(num_buf > 1 || 670 headroom < virtnet_get_headroom(vi))) { 671 /* linearize data for XDP */ 672 xdp_page = xdp_linearize_page(rq, &num_buf, 673 page, offset, 674 VIRTIO_XDP_HEADROOM, 675 &len); 676 if (!xdp_page) 677 goto err_xdp; 678 offset = VIRTIO_XDP_HEADROOM; 679 } else { 680 xdp_page = page; 681 } 682 683 /* Transient failure which in theory could occur if 684 * in-flight packets from before XDP was enabled reach 685 * the receive path after XDP is loaded. In practice I 686 * was not able to create this condition. 687 */ 688 if (unlikely(hdr->hdr.gso_type)) 689 goto err_xdp; 690 691 /* Allow consuming headroom but reserve enough space to push 692 * the descriptor on if we get an XDP_TX return code. 693 */ 694 data = page_address(xdp_page) + offset; 695 xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; 696 xdp.data = data + vi->hdr_len; 697 xdp_set_data_meta_invalid(&xdp); 698 xdp.data_end = xdp.data + (len - vi->hdr_len); 699 xdp.rxq = &rq->xdp_rxq; 700 701 act = bpf_prog_run_xdp(xdp_prog, &xdp); 702 703 if (act != XDP_PASS) 704 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); 705 706 switch (act) { 707 case XDP_PASS: 708 /* recalculate offset to account for any header 709 * adjustments. Note other cases do not build an 710 * skb and avoid using offset 711 */ 712 offset = xdp.data - 713 page_address(xdp_page) - vi->hdr_len; 714 715 /* We can only create skb based on xdp_page. */ 716 if (unlikely(xdp_page != page)) { 717 rcu_read_unlock(); 718 put_page(page); 719 head_skb = page_to_skb(vi, rq, xdp_page, 720 offset, len, PAGE_SIZE); 721 return head_skb; 722 } 723 break; 724 case XDP_TX: 725 if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) 726 trace_xdp_exception(vi->dev, xdp_prog, act); 727 else 728 *xdp_xmit = true; 729 if (unlikely(xdp_page != page)) 730 goto err_xdp; 731 rcu_read_unlock(); 732 goto xdp_xmit; 733 case XDP_REDIRECT: 734 err = xdp_do_redirect(dev, &xdp, xdp_prog); 735 if (!err) 736 *xdp_xmit = true; 737 rcu_read_unlock(); 738 goto xdp_xmit; 739 default: 740 bpf_warn_invalid_xdp_action(act); 741 case XDP_ABORTED: 742 trace_xdp_exception(vi->dev, xdp_prog, act); 743 case XDP_DROP: 744 if (unlikely(xdp_page != page)) 745 __free_pages(xdp_page, 0); 746 goto err_xdp; 747 } 748 } 749 rcu_read_unlock(); 750 751 truesize = mergeable_ctx_to_truesize(ctx); 752 if (unlikely(len > truesize)) { 753 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 754 dev->name, len, (unsigned long)ctx); 755 dev->stats.rx_length_errors++; 756 goto err_skb; 757 } 758 759 head_skb = page_to_skb(vi, rq, page, offset, len, truesize); 760 curr_skb = head_skb; 761 762 if (unlikely(!curr_skb)) 763 goto err_skb; 764 while (--num_buf) { 765 int num_skb_frags; 766 767 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); 768 if (unlikely(!buf)) { 769 pr_debug("%s: rx error: %d buffers out of %d missing\n", 770 dev->name, num_buf, 771 virtio16_to_cpu(vi->vdev, 772 hdr->num_buffers)); 773 dev->stats.rx_length_errors++; 774 goto err_buf; 775 } 776 777 page = virt_to_head_page(buf); 778 779 truesize = mergeable_ctx_to_truesize(ctx); 780 if (unlikely(len > truesize)) { 781 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 782 dev->name, len, (unsigned long)ctx); 783 dev->stats.rx_length_errors++; 784 goto err_skb; 785 } 786 787 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 788 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 789 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 790 791 if (unlikely(!nskb)) 792 goto err_skb; 793 if (curr_skb == head_skb) 794 skb_shinfo(curr_skb)->frag_list = nskb; 795 else 796 curr_skb->next = nskb; 797 curr_skb = nskb; 798 head_skb->truesize += nskb->truesize; 799 num_skb_frags = 0; 800 } 801 if (curr_skb != head_skb) { 802 head_skb->data_len += len; 803 head_skb->len += len; 804 head_skb->truesize += truesize; 805 } 806 offset = buf - page_address(page); 807 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 808 put_page(page); 809 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 810 len, truesize); 811 } else { 812 skb_add_rx_frag(curr_skb, num_skb_frags, page, 813 offset, len, truesize); 814 } 815 } 816 817 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 818 return head_skb; 819 820 err_xdp: 821 rcu_read_unlock(); 822 err_skb: 823 put_page(page); 824 while (--num_buf) { 825 buf = virtqueue_get_buf(rq->vq, &len); 826 if (unlikely(!buf)) { 827 pr_debug("%s: rx error: %d buffers missing\n", 828 dev->name, num_buf); 829 dev->stats.rx_length_errors++; 830 break; 831 } 832 page = virt_to_head_page(buf); 833 put_page(page); 834 } 835 err_buf: 836 dev->stats.rx_dropped++; 837 dev_kfree_skb(head_skb); 838 xdp_xmit: 839 return NULL; 840 } 841 842 static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 843 void *buf, unsigned int len, void **ctx, bool *xdp_xmit) 844 { 845 struct net_device *dev = vi->dev; 846 struct sk_buff *skb; 847 struct virtio_net_hdr_mrg_rxbuf *hdr; 848 int ret; 849 850 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 851 pr_debug("%s: short packet %i\n", dev->name, len); 852 dev->stats.rx_length_errors++; 853 if (vi->mergeable_rx_bufs) { 854 put_page(virt_to_head_page(buf)); 855 } else if (vi->big_packets) { 856 give_pages(rq, buf); 857 } else { 858 put_page(virt_to_head_page(buf)); 859 } 860 return 0; 861 } 862 863 if (vi->mergeable_rx_bufs) 864 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit); 865 else if (vi->big_packets) 866 skb = receive_big(dev, vi, rq, buf, len); 867 else 868 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit); 869 870 if (unlikely(!skb)) 871 return 0; 872 873 hdr = skb_vnet_hdr(skb); 874 875 ret = skb->len; 876 877 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 878 skb->ip_summed = CHECKSUM_UNNECESSARY; 879 880 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 881 virtio_is_little_endian(vi->vdev))) { 882 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 883 dev->name, hdr->hdr.gso_type, 884 hdr->hdr.gso_size); 885 goto frame_err; 886 } 887 888 skb->protocol = eth_type_trans(skb, dev); 889 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 890 ntohs(skb->protocol), skb->len, skb->pkt_type); 891 892 napi_gro_receive(&rq->napi, skb); 893 return ret; 894 895 frame_err: 896 dev->stats.rx_frame_errors++; 897 dev_kfree_skb(skb); 898 return 0; 899 } 900 901 /* Unlike mergeable buffers, all buffers are allocated to the 902 * same size, except for the headroom. For this reason we do 903 * not need to use mergeable_len_to_ctx here - it is enough 904 * to store the headroom as the context ignoring the truesize. 905 */ 906 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 907 gfp_t gfp) 908 { 909 struct page_frag *alloc_frag = &rq->alloc_frag; 910 char *buf; 911 unsigned int xdp_headroom = virtnet_get_headroom(vi); 912 void *ctx = (void *)(unsigned long)xdp_headroom; 913 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; 914 int err; 915 916 len = SKB_DATA_ALIGN(len) + 917 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 918 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) 919 return -ENOMEM; 920 921 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 922 get_page(alloc_frag->page); 923 alloc_frag->offset += len; 924 sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, 925 vi->hdr_len + GOOD_PACKET_LEN); 926 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 927 if (err < 0) 928 put_page(virt_to_head_page(buf)); 929 return err; 930 } 931 932 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 933 gfp_t gfp) 934 { 935 struct page *first, *list = NULL; 936 char *p; 937 int i, err, offset; 938 939 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); 940 941 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ 942 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 943 first = get_a_page(rq, gfp); 944 if (!first) { 945 if (list) 946 give_pages(rq, list); 947 return -ENOMEM; 948 } 949 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 950 951 /* chain new page in list head to match sg */ 952 first->private = (unsigned long)list; 953 list = first; 954 } 955 956 first = get_a_page(rq, gfp); 957 if (!first) { 958 give_pages(rq, list); 959 return -ENOMEM; 960 } 961 p = page_address(first); 962 963 /* rq->sg[0], rq->sg[1] share the same page */ 964 /* a separated rq->sg[0] for header - required in case !any_header_sg */ 965 sg_set_buf(&rq->sg[0], p, vi->hdr_len); 966 967 /* rq->sg[1] for data packet, from offset */ 968 offset = sizeof(struct padded_vnet_hdr); 969 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 970 971 /* chain first in list head */ 972 first->private = (unsigned long)list; 973 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, 974 first, gfp); 975 if (err < 0) 976 give_pages(rq, first); 977 978 return err; 979 } 980 981 static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 982 struct ewma_pkt_len *avg_pkt_len) 983 { 984 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 985 unsigned int len; 986 987 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 988 rq->min_buf_len, PAGE_SIZE - hdr_len); 989 return ALIGN(len, L1_CACHE_BYTES); 990 } 991 992 static int add_recvbuf_mergeable(struct virtnet_info *vi, 993 struct receive_queue *rq, gfp_t gfp) 994 { 995 struct page_frag *alloc_frag = &rq->alloc_frag; 996 unsigned int headroom = virtnet_get_headroom(vi); 997 char *buf; 998 void *ctx; 999 int err; 1000 unsigned int len, hole; 1001 1002 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); 1003 if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) 1004 return -ENOMEM; 1005 1006 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1007 buf += headroom; /* advance address leaving hole at front of pkt */ 1008 get_page(alloc_frag->page); 1009 alloc_frag->offset += len + headroom; 1010 hole = alloc_frag->size - alloc_frag->offset; 1011 if (hole < len + headroom) { 1012 /* To avoid internal fragmentation, if there is very likely not 1013 * enough space for another buffer, add the remaining space to 1014 * the current buffer. 1015 */ 1016 len += hole; 1017 alloc_frag->offset += hole; 1018 } 1019 1020 sg_init_one(rq->sg, buf, len); 1021 ctx = mergeable_len_to_ctx(len, headroom); 1022 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 1023 if (err < 0) 1024 put_page(virt_to_head_page(buf)); 1025 1026 return err; 1027 } 1028 1029 /* 1030 * Returns false if we couldn't fill entirely (OOM). 1031 * 1032 * Normally run in the receive path, but can also be run from ndo_open 1033 * before we're receiving packets, or from refill_work which is 1034 * careful to disable receiving (using napi_disable). 1035 */ 1036 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 1037 gfp_t gfp) 1038 { 1039 int err; 1040 bool oom; 1041 1042 do { 1043 if (vi->mergeable_rx_bufs) 1044 err = add_recvbuf_mergeable(vi, rq, gfp); 1045 else if (vi->big_packets) 1046 err = add_recvbuf_big(vi, rq, gfp); 1047 else 1048 err = add_recvbuf_small(vi, rq, gfp); 1049 1050 oom = err == -ENOMEM; 1051 if (err) 1052 break; 1053 } while (rq->vq->num_free); 1054 virtqueue_kick(rq->vq); 1055 return !oom; 1056 } 1057 1058 static void skb_recv_done(struct virtqueue *rvq) 1059 { 1060 struct virtnet_info *vi = rvq->vdev->priv; 1061 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 1062 1063 virtqueue_napi_schedule(&rq->napi, rvq); 1064 } 1065 1066 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) 1067 { 1068 napi_enable(napi); 1069 1070 /* If all buffers were filled by other side before we napi_enabled, we 1071 * won't get another interrupt, so process any outstanding packets now. 1072 * Call local_bh_enable after to trigger softIRQ processing. 1073 */ 1074 local_bh_disable(); 1075 virtqueue_napi_schedule(napi, vq); 1076 local_bh_enable(); 1077 } 1078 1079 static void virtnet_napi_tx_enable(struct virtnet_info *vi, 1080 struct virtqueue *vq, 1081 struct napi_struct *napi) 1082 { 1083 if (!napi->weight) 1084 return; 1085 1086 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only 1087 * enable the feature if this is likely affine with the transmit path. 1088 */ 1089 if (!vi->affinity_hint_set) { 1090 napi->weight = 0; 1091 return; 1092 } 1093 1094 return virtnet_napi_enable(vq, napi); 1095 } 1096 1097 static void virtnet_napi_tx_disable(struct napi_struct *napi) 1098 { 1099 if (napi->weight) 1100 napi_disable(napi); 1101 } 1102 1103 static void refill_work(struct work_struct *work) 1104 { 1105 struct virtnet_info *vi = 1106 container_of(work, struct virtnet_info, refill.work); 1107 bool still_empty; 1108 int i; 1109 1110 for (i = 0; i < vi->curr_queue_pairs; i++) { 1111 struct receive_queue *rq = &vi->rq[i]; 1112 1113 napi_disable(&rq->napi); 1114 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 1115 virtnet_napi_enable(rq->vq, &rq->napi); 1116 1117 /* In theory, this can happen: if we don't get any buffers in 1118 * we will *never* try to fill again. 1119 */ 1120 if (still_empty) 1121 schedule_delayed_work(&vi->refill, HZ/2); 1122 } 1123 } 1124 1125 static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) 1126 { 1127 struct virtnet_info *vi = rq->vq->vdev->priv; 1128 unsigned int len, received = 0, bytes = 0; 1129 void *buf; 1130 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 1131 1132 if (!vi->big_packets || vi->mergeable_rx_bufs) { 1133 void *ctx; 1134 1135 while (received < budget && 1136 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { 1137 bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit); 1138 received++; 1139 } 1140 } else { 1141 while (received < budget && 1142 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 1143 bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit); 1144 received++; 1145 } 1146 } 1147 1148 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { 1149 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) 1150 schedule_delayed_work(&vi->refill, 0); 1151 } 1152 1153 u64_stats_update_begin(&stats->rx_syncp); 1154 stats->rx_bytes += bytes; 1155 stats->rx_packets += received; 1156 u64_stats_update_end(&stats->rx_syncp); 1157 1158 return received; 1159 } 1160 1161 static void free_old_xmit_skbs(struct send_queue *sq) 1162 { 1163 struct sk_buff *skb; 1164 unsigned int len; 1165 struct virtnet_info *vi = sq->vq->vdev->priv; 1166 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 1167 unsigned int packets = 0; 1168 unsigned int bytes = 0; 1169 1170 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 1171 pr_debug("Sent skb %p\n", skb); 1172 1173 bytes += skb->len; 1174 packets++; 1175 1176 dev_consume_skb_any(skb); 1177 } 1178 1179 /* Avoid overhead when no packets have been processed 1180 * happens when called speculatively from start_xmit. 1181 */ 1182 if (!packets) 1183 return; 1184 1185 u64_stats_update_begin(&stats->tx_syncp); 1186 stats->tx_bytes += bytes; 1187 stats->tx_packets += packets; 1188 u64_stats_update_end(&stats->tx_syncp); 1189 } 1190 1191 static void virtnet_poll_cleantx(struct receive_queue *rq) 1192 { 1193 struct virtnet_info *vi = rq->vq->vdev->priv; 1194 unsigned int index = vq2rxq(rq->vq); 1195 struct send_queue *sq = &vi->sq[index]; 1196 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 1197 1198 if (!sq->napi.weight) 1199 return; 1200 1201 if (__netif_tx_trylock(txq)) { 1202 free_old_xmit_skbs(sq); 1203 __netif_tx_unlock(txq); 1204 } 1205 1206 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 1207 netif_tx_wake_queue(txq); 1208 } 1209 1210 static int virtnet_poll(struct napi_struct *napi, int budget) 1211 { 1212 struct receive_queue *rq = 1213 container_of(napi, struct receive_queue, napi); 1214 unsigned int received; 1215 bool xdp_xmit = false; 1216 1217 virtnet_poll_cleantx(rq); 1218 1219 received = virtnet_receive(rq, budget, &xdp_xmit); 1220 1221 /* Out of packets? */ 1222 if (received < budget) 1223 virtqueue_napi_complete(napi, rq->vq, received); 1224 1225 if (xdp_xmit) 1226 xdp_do_flush_map(); 1227 1228 return received; 1229 } 1230 1231 static int virtnet_open(struct net_device *dev) 1232 { 1233 struct virtnet_info *vi = netdev_priv(dev); 1234 int i, err; 1235 1236 for (i = 0; i < vi->max_queue_pairs; i++) { 1237 if (i < vi->curr_queue_pairs) 1238 /* Make sure we have some buffers: if oom use wq. */ 1239 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 1240 schedule_delayed_work(&vi->refill, 0); 1241 1242 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); 1243 if (err < 0) 1244 return err; 1245 1246 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 1247 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); 1248 } 1249 1250 return 0; 1251 } 1252 1253 static int virtnet_poll_tx(struct napi_struct *napi, int budget) 1254 { 1255 struct send_queue *sq = container_of(napi, struct send_queue, napi); 1256 struct virtnet_info *vi = sq->vq->vdev->priv; 1257 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); 1258 1259 __netif_tx_lock(txq, raw_smp_processor_id()); 1260 free_old_xmit_skbs(sq); 1261 __netif_tx_unlock(txq); 1262 1263 virtqueue_napi_complete(napi, sq->vq, 0); 1264 1265 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 1266 netif_tx_wake_queue(txq); 1267 1268 return 0; 1269 } 1270 1271 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 1272 { 1273 struct virtio_net_hdr_mrg_rxbuf *hdr; 1274 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 1275 struct virtnet_info *vi = sq->vq->vdev->priv; 1276 int num_sg; 1277 unsigned hdr_len = vi->hdr_len; 1278 bool can_push; 1279 1280 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 1281 1282 can_push = vi->any_header_sg && 1283 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 1284 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 1285 /* Even if we can, don't push here yet as this would skew 1286 * csum_start offset below. */ 1287 if (can_push) 1288 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 1289 else 1290 hdr = skb_vnet_hdr(skb); 1291 1292 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 1293 virtio_is_little_endian(vi->vdev), false)) 1294 BUG(); 1295 1296 if (vi->mergeable_rx_bufs) 1297 hdr->num_buffers = 0; 1298 1299 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 1300 if (can_push) { 1301 __skb_push(skb, hdr_len); 1302 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 1303 if (unlikely(num_sg < 0)) 1304 return num_sg; 1305 /* Pull header back to avoid skew in tx bytes calculations. */ 1306 __skb_pull(skb, hdr_len); 1307 } else { 1308 sg_set_buf(sq->sg, hdr, hdr_len); 1309 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 1310 if (unlikely(num_sg < 0)) 1311 return num_sg; 1312 num_sg++; 1313 } 1314 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 1315 } 1316 1317 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 1318 { 1319 struct virtnet_info *vi = netdev_priv(dev); 1320 int qnum = skb_get_queue_mapping(skb); 1321 struct send_queue *sq = &vi->sq[qnum]; 1322 int err; 1323 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 1324 bool kick = !skb->xmit_more; 1325 bool use_napi = sq->napi.weight; 1326 1327 /* Free up any pending old buffers before queueing new ones. */ 1328 free_old_xmit_skbs(sq); 1329 1330 if (use_napi && kick) 1331 virtqueue_enable_cb_delayed(sq->vq); 1332 1333 /* timestamp packet in software */ 1334 skb_tx_timestamp(skb); 1335 1336 /* Try to transmit */ 1337 err = xmit_skb(sq, skb); 1338 1339 /* This should not happen! */ 1340 if (unlikely(err)) { 1341 dev->stats.tx_fifo_errors++; 1342 if (net_ratelimit()) 1343 dev_warn(&dev->dev, 1344 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); 1345 dev->stats.tx_dropped++; 1346 dev_kfree_skb_any(skb); 1347 return NETDEV_TX_OK; 1348 } 1349 1350 /* Don't wait up for transmitted skbs to be freed. */ 1351 if (!use_napi) { 1352 skb_orphan(skb); 1353 nf_reset(skb); 1354 } 1355 1356 /* If running out of space, stop queue to avoid getting packets that we 1357 * are then unable to transmit. 1358 * An alternative would be to force queuing layer to requeue the skb by 1359 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 1360 * returned in a normal path of operation: it means that driver is not 1361 * maintaining the TX queue stop/start state properly, and causes 1362 * the stack to do a non-trivial amount of useless work. 1363 * Since most packets only take 1 or 2 ring slots, stopping the queue 1364 * early means 16 slots are typically wasted. 1365 */ 1366 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 1367 netif_stop_subqueue(dev, qnum); 1368 if (!use_napi && 1369 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 1370 /* More just got used, free them then recheck. */ 1371 free_old_xmit_skbs(sq); 1372 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1373 netif_start_subqueue(dev, qnum); 1374 virtqueue_disable_cb(sq->vq); 1375 } 1376 } 1377 } 1378 1379 if (kick || netif_xmit_stopped(txq)) 1380 virtqueue_kick(sq->vq); 1381 1382 return NETDEV_TX_OK; 1383 } 1384 1385 /* 1386 * Send command via the control virtqueue and check status. Commands 1387 * supported by the hypervisor, as indicated by feature bits, should 1388 * never fail unless improperly formatted. 1389 */ 1390 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 1391 struct scatterlist *out) 1392 { 1393 struct scatterlist *sgs[4], hdr, stat; 1394 unsigned out_num = 0, tmp; 1395 1396 /* Caller should know better */ 1397 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 1398 1399 vi->ctrl_status = ~0; 1400 vi->ctrl_hdr.class = class; 1401 vi->ctrl_hdr.cmd = cmd; 1402 /* Add header */ 1403 sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); 1404 sgs[out_num++] = &hdr; 1405 1406 if (out) 1407 sgs[out_num++] = out; 1408 1409 /* Add return status. */ 1410 sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); 1411 sgs[out_num] = &stat; 1412 1413 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1414 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 1415 1416 if (unlikely(!virtqueue_kick(vi->cvq))) 1417 return vi->ctrl_status == VIRTIO_NET_OK; 1418 1419 /* Spin for a response, the kick causes an ioport write, trapping 1420 * into the hypervisor, so the request should be handled immediately. 1421 */ 1422 while (!virtqueue_get_buf(vi->cvq, &tmp) && 1423 !virtqueue_is_broken(vi->cvq)) 1424 cpu_relax(); 1425 1426 return vi->ctrl_status == VIRTIO_NET_OK; 1427 } 1428 1429 static int virtnet_set_mac_address(struct net_device *dev, void *p) 1430 { 1431 struct virtnet_info *vi = netdev_priv(dev); 1432 struct virtio_device *vdev = vi->vdev; 1433 int ret; 1434 struct sockaddr *addr; 1435 struct scatterlist sg; 1436 1437 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 1438 if (!addr) 1439 return -ENOMEM; 1440 1441 ret = eth_prepare_mac_addr_change(dev, addr); 1442 if (ret) 1443 goto out; 1444 1445 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 1446 sg_init_one(&sg, addr->sa_data, dev->addr_len); 1447 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1448 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 1449 dev_warn(&vdev->dev, 1450 "Failed to set mac address by vq command.\n"); 1451 ret = -EINVAL; 1452 goto out; 1453 } 1454 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 1455 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1456 unsigned int i; 1457 1458 /* Naturally, this has an atomicity problem. */ 1459 for (i = 0; i < dev->addr_len; i++) 1460 virtio_cwrite8(vdev, 1461 offsetof(struct virtio_net_config, mac) + 1462 i, addr->sa_data[i]); 1463 } 1464 1465 eth_commit_mac_addr_change(dev, p); 1466 ret = 0; 1467 1468 out: 1469 kfree(addr); 1470 return ret; 1471 } 1472 1473 static void virtnet_stats(struct net_device *dev, 1474 struct rtnl_link_stats64 *tot) 1475 { 1476 struct virtnet_info *vi = netdev_priv(dev); 1477 int cpu; 1478 unsigned int start; 1479 1480 for_each_possible_cpu(cpu) { 1481 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); 1482 u64 tpackets, tbytes, rpackets, rbytes; 1483 1484 do { 1485 start = u64_stats_fetch_begin_irq(&stats->tx_syncp); 1486 tpackets = stats->tx_packets; 1487 tbytes = stats->tx_bytes; 1488 } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); 1489 1490 do { 1491 start = u64_stats_fetch_begin_irq(&stats->rx_syncp); 1492 rpackets = stats->rx_packets; 1493 rbytes = stats->rx_bytes; 1494 } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); 1495 1496 tot->rx_packets += rpackets; 1497 tot->tx_packets += tpackets; 1498 tot->rx_bytes += rbytes; 1499 tot->tx_bytes += tbytes; 1500 } 1501 1502 tot->tx_dropped = dev->stats.tx_dropped; 1503 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 1504 tot->rx_dropped = dev->stats.rx_dropped; 1505 tot->rx_length_errors = dev->stats.rx_length_errors; 1506 tot->rx_frame_errors = dev->stats.rx_frame_errors; 1507 } 1508 1509 #ifdef CONFIG_NET_POLL_CONTROLLER 1510 static void virtnet_netpoll(struct net_device *dev) 1511 { 1512 struct virtnet_info *vi = netdev_priv(dev); 1513 int i; 1514 1515 for (i = 0; i < vi->curr_queue_pairs; i++) 1516 napi_schedule(&vi->rq[i].napi); 1517 } 1518 #endif 1519 1520 static void virtnet_ack_link_announce(struct virtnet_info *vi) 1521 { 1522 rtnl_lock(); 1523 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 1524 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 1525 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 1526 rtnl_unlock(); 1527 } 1528 1529 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 1530 { 1531 struct scatterlist sg; 1532 struct net_device *dev = vi->dev; 1533 1534 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 1535 return 0; 1536 1537 vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 1538 sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); 1539 1540 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 1541 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 1542 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 1543 queue_pairs); 1544 return -EINVAL; 1545 } else { 1546 vi->curr_queue_pairs = queue_pairs; 1547 /* virtnet_open() will refill when device is going to up. */ 1548 if (dev->flags & IFF_UP) 1549 schedule_delayed_work(&vi->refill, 0); 1550 } 1551 1552 return 0; 1553 } 1554 1555 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 1556 { 1557 int err; 1558 1559 rtnl_lock(); 1560 err = _virtnet_set_queues(vi, queue_pairs); 1561 rtnl_unlock(); 1562 return err; 1563 } 1564 1565 static int virtnet_close(struct net_device *dev) 1566 { 1567 struct virtnet_info *vi = netdev_priv(dev); 1568 int i; 1569 1570 /* Make sure refill_work doesn't re-enable napi! */ 1571 cancel_delayed_work_sync(&vi->refill); 1572 1573 for (i = 0; i < vi->max_queue_pairs; i++) { 1574 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); 1575 napi_disable(&vi->rq[i].napi); 1576 virtnet_napi_tx_disable(&vi->sq[i].napi); 1577 } 1578 1579 return 0; 1580 } 1581 1582 static void virtnet_set_rx_mode(struct net_device *dev) 1583 { 1584 struct virtnet_info *vi = netdev_priv(dev); 1585 struct scatterlist sg[2]; 1586 struct virtio_net_ctrl_mac *mac_data; 1587 struct netdev_hw_addr *ha; 1588 int uc_count; 1589 int mc_count; 1590 void *buf; 1591 int i; 1592 1593 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 1594 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 1595 return; 1596 1597 vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); 1598 vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 1599 1600 sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); 1601 1602 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1603 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 1604 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 1605 vi->ctrl_promisc ? "en" : "dis"); 1606 1607 sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); 1608 1609 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1610 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 1611 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 1612 vi->ctrl_allmulti ? "en" : "dis"); 1613 1614 uc_count = netdev_uc_count(dev); 1615 mc_count = netdev_mc_count(dev); 1616 /* MAC filter - use one buffer for both lists */ 1617 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 1618 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 1619 mac_data = buf; 1620 if (!buf) 1621 return; 1622 1623 sg_init_table(sg, 2); 1624 1625 /* Store the unicast list and count in the front of the buffer */ 1626 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 1627 i = 0; 1628 netdev_for_each_uc_addr(ha, dev) 1629 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1630 1631 sg_set_buf(&sg[0], mac_data, 1632 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 1633 1634 /* multicast list and count fill the end */ 1635 mac_data = (void *)&mac_data->macs[uc_count][0]; 1636 1637 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 1638 i = 0; 1639 netdev_for_each_mc_addr(ha, dev) 1640 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1641 1642 sg_set_buf(&sg[1], mac_data, 1643 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 1644 1645 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1646 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 1647 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 1648 1649 kfree(buf); 1650 } 1651 1652 static int virtnet_vlan_rx_add_vid(struct net_device *dev, 1653 __be16 proto, u16 vid) 1654 { 1655 struct virtnet_info *vi = netdev_priv(dev); 1656 struct scatterlist sg; 1657 1658 vi->ctrl_vid = vid; 1659 sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 1660 1661 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1662 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 1663 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 1664 return 0; 1665 } 1666 1667 static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 1668 __be16 proto, u16 vid) 1669 { 1670 struct virtnet_info *vi = netdev_priv(dev); 1671 struct scatterlist sg; 1672 1673 vi->ctrl_vid = vid; 1674 sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 1675 1676 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1677 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 1678 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 1679 return 0; 1680 } 1681 1682 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) 1683 { 1684 int i; 1685 1686 if (vi->affinity_hint_set) { 1687 for (i = 0; i < vi->max_queue_pairs; i++) { 1688 virtqueue_set_affinity(vi->rq[i].vq, -1); 1689 virtqueue_set_affinity(vi->sq[i].vq, -1); 1690 } 1691 1692 vi->affinity_hint_set = false; 1693 } 1694 } 1695 1696 static void virtnet_set_affinity(struct virtnet_info *vi) 1697 { 1698 int i; 1699 int cpu; 1700 1701 /* In multiqueue mode, when the number of cpu is equal to the number of 1702 * queue pairs, we let the queue pairs to be private to one cpu by 1703 * setting the affinity hint to eliminate the contention. 1704 */ 1705 if (vi->curr_queue_pairs == 1 || 1706 vi->max_queue_pairs != num_online_cpus()) { 1707 virtnet_clean_affinity(vi, -1); 1708 return; 1709 } 1710 1711 i = 0; 1712 for_each_online_cpu(cpu) { 1713 virtqueue_set_affinity(vi->rq[i].vq, cpu); 1714 virtqueue_set_affinity(vi->sq[i].vq, cpu); 1715 netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); 1716 i++; 1717 } 1718 1719 vi->affinity_hint_set = true; 1720 } 1721 1722 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 1723 { 1724 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 1725 node); 1726 virtnet_set_affinity(vi); 1727 return 0; 1728 } 1729 1730 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 1731 { 1732 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 1733 node_dead); 1734 virtnet_set_affinity(vi); 1735 return 0; 1736 } 1737 1738 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 1739 { 1740 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 1741 node); 1742 1743 virtnet_clean_affinity(vi, cpu); 1744 return 0; 1745 } 1746 1747 static enum cpuhp_state virtionet_online; 1748 1749 static int virtnet_cpu_notif_add(struct virtnet_info *vi) 1750 { 1751 int ret; 1752 1753 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 1754 if (ret) 1755 return ret; 1756 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 1757 &vi->node_dead); 1758 if (!ret) 1759 return ret; 1760 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 1761 return ret; 1762 } 1763 1764 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 1765 { 1766 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 1767 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 1768 &vi->node_dead); 1769 } 1770 1771 static void virtnet_get_ringparam(struct net_device *dev, 1772 struct ethtool_ringparam *ring) 1773 { 1774 struct virtnet_info *vi = netdev_priv(dev); 1775 1776 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); 1777 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); 1778 ring->rx_pending = ring->rx_max_pending; 1779 ring->tx_pending = ring->tx_max_pending; 1780 } 1781 1782 1783 static void virtnet_get_drvinfo(struct net_device *dev, 1784 struct ethtool_drvinfo *info) 1785 { 1786 struct virtnet_info *vi = netdev_priv(dev); 1787 struct virtio_device *vdev = vi->vdev; 1788 1789 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1790 strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 1791 strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 1792 1793 } 1794 1795 /* TODO: Eliminate OOO packets during switching */ 1796 static int virtnet_set_channels(struct net_device *dev, 1797 struct ethtool_channels *channels) 1798 { 1799 struct virtnet_info *vi = netdev_priv(dev); 1800 u16 queue_pairs = channels->combined_count; 1801 int err; 1802 1803 /* We don't support separate rx/tx channels. 1804 * We don't allow setting 'other' channels. 1805 */ 1806 if (channels->rx_count || channels->tx_count || channels->other_count) 1807 return -EINVAL; 1808 1809 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 1810 return -EINVAL; 1811 1812 /* For now we don't support modifying channels while XDP is loaded 1813 * also when XDP is loaded all RX queues have XDP programs so we only 1814 * need to check a single RX queue. 1815 */ 1816 if (vi->rq[0].xdp_prog) 1817 return -EINVAL; 1818 1819 get_online_cpus(); 1820 err = _virtnet_set_queues(vi, queue_pairs); 1821 if (!err) { 1822 netif_set_real_num_tx_queues(dev, queue_pairs); 1823 netif_set_real_num_rx_queues(dev, queue_pairs); 1824 1825 virtnet_set_affinity(vi); 1826 } 1827 put_online_cpus(); 1828 1829 return err; 1830 } 1831 1832 static void virtnet_get_channels(struct net_device *dev, 1833 struct ethtool_channels *channels) 1834 { 1835 struct virtnet_info *vi = netdev_priv(dev); 1836 1837 channels->combined_count = vi->curr_queue_pairs; 1838 channels->max_combined = vi->max_queue_pairs; 1839 channels->max_other = 0; 1840 channels->rx_count = 0; 1841 channels->tx_count = 0; 1842 channels->other_count = 0; 1843 } 1844 1845 /* Check if the user is trying to change anything besides speed/duplex */ 1846 static bool 1847 virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd) 1848 { 1849 struct ethtool_link_ksettings diff1 = *cmd; 1850 struct ethtool_link_ksettings diff2 = {}; 1851 1852 /* cmd is always set so we need to clear it, validate the port type 1853 * and also without autonegotiation we can ignore advertising 1854 */ 1855 diff1.base.speed = 0; 1856 diff2.base.port = PORT_OTHER; 1857 ethtool_link_ksettings_zero_link_mode(&diff1, advertising); 1858 diff1.base.duplex = 0; 1859 diff1.base.cmd = 0; 1860 diff1.base.link_mode_masks_nwords = 0; 1861 1862 return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) && 1863 bitmap_empty(diff1.link_modes.supported, 1864 __ETHTOOL_LINK_MODE_MASK_NBITS) && 1865 bitmap_empty(diff1.link_modes.advertising, 1866 __ETHTOOL_LINK_MODE_MASK_NBITS) && 1867 bitmap_empty(diff1.link_modes.lp_advertising, 1868 __ETHTOOL_LINK_MODE_MASK_NBITS); 1869 } 1870 1871 static int virtnet_set_link_ksettings(struct net_device *dev, 1872 const struct ethtool_link_ksettings *cmd) 1873 { 1874 struct virtnet_info *vi = netdev_priv(dev); 1875 u32 speed; 1876 1877 speed = cmd->base.speed; 1878 /* don't allow custom speed and duplex */ 1879 if (!ethtool_validate_speed(speed) || 1880 !ethtool_validate_duplex(cmd->base.duplex) || 1881 !virtnet_validate_ethtool_cmd(cmd)) 1882 return -EINVAL; 1883 vi->speed = speed; 1884 vi->duplex = cmd->base.duplex; 1885 1886 return 0; 1887 } 1888 1889 static int virtnet_get_link_ksettings(struct net_device *dev, 1890 struct ethtool_link_ksettings *cmd) 1891 { 1892 struct virtnet_info *vi = netdev_priv(dev); 1893 1894 cmd->base.speed = vi->speed; 1895 cmd->base.duplex = vi->duplex; 1896 cmd->base.port = PORT_OTHER; 1897 1898 return 0; 1899 } 1900 1901 static void virtnet_init_settings(struct net_device *dev) 1902 { 1903 struct virtnet_info *vi = netdev_priv(dev); 1904 1905 vi->speed = SPEED_UNKNOWN; 1906 vi->duplex = DUPLEX_UNKNOWN; 1907 } 1908 1909 static const struct ethtool_ops virtnet_ethtool_ops = { 1910 .get_drvinfo = virtnet_get_drvinfo, 1911 .get_link = ethtool_op_get_link, 1912 .get_ringparam = virtnet_get_ringparam, 1913 .set_channels = virtnet_set_channels, 1914 .get_channels = virtnet_get_channels, 1915 .get_ts_info = ethtool_op_get_ts_info, 1916 .get_link_ksettings = virtnet_get_link_ksettings, 1917 .set_link_ksettings = virtnet_set_link_ksettings, 1918 }; 1919 1920 static void virtnet_freeze_down(struct virtio_device *vdev) 1921 { 1922 struct virtnet_info *vi = vdev->priv; 1923 int i; 1924 1925 /* Make sure no work handler is accessing the device */ 1926 flush_work(&vi->config_work); 1927 1928 netif_device_detach(vi->dev); 1929 netif_tx_disable(vi->dev); 1930 cancel_delayed_work_sync(&vi->refill); 1931 1932 if (netif_running(vi->dev)) { 1933 for (i = 0; i < vi->max_queue_pairs; i++) { 1934 napi_disable(&vi->rq[i].napi); 1935 virtnet_napi_tx_disable(&vi->sq[i].napi); 1936 } 1937 } 1938 } 1939 1940 static int init_vqs(struct virtnet_info *vi); 1941 1942 static int virtnet_restore_up(struct virtio_device *vdev) 1943 { 1944 struct virtnet_info *vi = vdev->priv; 1945 int err, i; 1946 1947 err = init_vqs(vi); 1948 if (err) 1949 return err; 1950 1951 virtio_device_ready(vdev); 1952 1953 if (netif_running(vi->dev)) { 1954 for (i = 0; i < vi->curr_queue_pairs; i++) 1955 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 1956 schedule_delayed_work(&vi->refill, 0); 1957 1958 for (i = 0; i < vi->max_queue_pairs; i++) { 1959 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 1960 virtnet_napi_tx_enable(vi, vi->sq[i].vq, 1961 &vi->sq[i].napi); 1962 } 1963 } 1964 1965 netif_device_attach(vi->dev); 1966 return err; 1967 } 1968 1969 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 1970 { 1971 struct scatterlist sg; 1972 vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); 1973 1974 sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); 1975 1976 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 1977 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { 1978 dev_warn(&vi->dev->dev, "Fail to set guest offload. \n"); 1979 return -EINVAL; 1980 } 1981 1982 return 0; 1983 } 1984 1985 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) 1986 { 1987 u64 offloads = 0; 1988 1989 if (!vi->guest_offloads) 1990 return 0; 1991 1992 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) 1993 offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; 1994 1995 return virtnet_set_guest_offloads(vi, offloads); 1996 } 1997 1998 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) 1999 { 2000 u64 offloads = vi->guest_offloads; 2001 2002 if (!vi->guest_offloads) 2003 return 0; 2004 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) 2005 offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; 2006 2007 return virtnet_set_guest_offloads(vi, offloads); 2008 } 2009 2010 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, 2011 struct netlink_ext_ack *extack) 2012 { 2013 unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); 2014 struct virtnet_info *vi = netdev_priv(dev); 2015 struct bpf_prog *old_prog; 2016 u16 xdp_qp = 0, curr_qp; 2017 int i, err; 2018 2019 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) 2020 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 2021 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 2022 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 2023 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { 2024 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); 2025 return -EOPNOTSUPP; 2026 } 2027 2028 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 2029 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); 2030 return -EINVAL; 2031 } 2032 2033 if (dev->mtu > max_sz) { 2034 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); 2035 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); 2036 return -EINVAL; 2037 } 2038 2039 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 2040 if (prog) 2041 xdp_qp = nr_cpu_ids; 2042 2043 /* XDP requires extra queues for XDP_TX */ 2044 if (curr_qp + xdp_qp > vi->max_queue_pairs) { 2045 NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available"); 2046 netdev_warn(dev, "request %i queues but max is %i\n", 2047 curr_qp + xdp_qp, vi->max_queue_pairs); 2048 return -ENOMEM; 2049 } 2050 2051 if (prog) { 2052 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); 2053 if (IS_ERR(prog)) 2054 return PTR_ERR(prog); 2055 } 2056 2057 /* Make sure NAPI is not using any XDP TX queues for RX. */ 2058 for (i = 0; i < vi->max_queue_pairs; i++) 2059 napi_disable(&vi->rq[i].napi); 2060 2061 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 2062 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 2063 if (err) 2064 goto err; 2065 vi->xdp_queue_pairs = xdp_qp; 2066 2067 for (i = 0; i < vi->max_queue_pairs; i++) { 2068 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 2069 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 2070 if (i == 0) { 2071 if (!old_prog) 2072 virtnet_clear_guest_offloads(vi); 2073 if (!prog) 2074 virtnet_restore_guest_offloads(vi); 2075 } 2076 if (old_prog) 2077 bpf_prog_put(old_prog); 2078 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2079 } 2080 2081 return 0; 2082 2083 err: 2084 for (i = 0; i < vi->max_queue_pairs; i++) 2085 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2086 if (prog) 2087 bpf_prog_sub(prog, vi->max_queue_pairs - 1); 2088 return err; 2089 } 2090 2091 static u32 virtnet_xdp_query(struct net_device *dev) 2092 { 2093 struct virtnet_info *vi = netdev_priv(dev); 2094 const struct bpf_prog *xdp_prog; 2095 int i; 2096 2097 for (i = 0; i < vi->max_queue_pairs; i++) { 2098 xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog); 2099 if (xdp_prog) 2100 return xdp_prog->aux->id; 2101 } 2102 return 0; 2103 } 2104 2105 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2106 { 2107 switch (xdp->command) { 2108 case XDP_SETUP_PROG: 2109 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); 2110 case XDP_QUERY_PROG: 2111 xdp->prog_id = virtnet_xdp_query(dev); 2112 xdp->prog_attached = !!xdp->prog_id; 2113 return 0; 2114 default: 2115 return -EINVAL; 2116 } 2117 } 2118 2119 static const struct net_device_ops virtnet_netdev = { 2120 .ndo_open = virtnet_open, 2121 .ndo_stop = virtnet_close, 2122 .ndo_start_xmit = start_xmit, 2123 .ndo_validate_addr = eth_validate_addr, 2124 .ndo_set_mac_address = virtnet_set_mac_address, 2125 .ndo_set_rx_mode = virtnet_set_rx_mode, 2126 .ndo_get_stats64 = virtnet_stats, 2127 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 2128 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 2129 #ifdef CONFIG_NET_POLL_CONTROLLER 2130 .ndo_poll_controller = virtnet_netpoll, 2131 #endif 2132 .ndo_bpf = virtnet_xdp, 2133 .ndo_xdp_xmit = virtnet_xdp_xmit, 2134 .ndo_xdp_flush = virtnet_xdp_flush, 2135 .ndo_features_check = passthru_features_check, 2136 }; 2137 2138 static void virtnet_config_changed_work(struct work_struct *work) 2139 { 2140 struct virtnet_info *vi = 2141 container_of(work, struct virtnet_info, config_work); 2142 u16 v; 2143 2144 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 2145 struct virtio_net_config, status, &v) < 0) 2146 return; 2147 2148 if (v & VIRTIO_NET_S_ANNOUNCE) { 2149 netdev_notify_peers(vi->dev); 2150 virtnet_ack_link_announce(vi); 2151 } 2152 2153 /* Ignore unknown (future) status bits */ 2154 v &= VIRTIO_NET_S_LINK_UP; 2155 2156 if (vi->status == v) 2157 return; 2158 2159 vi->status = v; 2160 2161 if (vi->status & VIRTIO_NET_S_LINK_UP) { 2162 netif_carrier_on(vi->dev); 2163 netif_tx_wake_all_queues(vi->dev); 2164 } else { 2165 netif_carrier_off(vi->dev); 2166 netif_tx_stop_all_queues(vi->dev); 2167 } 2168 } 2169 2170 static void virtnet_config_changed(struct virtio_device *vdev) 2171 { 2172 struct virtnet_info *vi = vdev->priv; 2173 2174 schedule_work(&vi->config_work); 2175 } 2176 2177 static void virtnet_free_queues(struct virtnet_info *vi) 2178 { 2179 int i; 2180 2181 for (i = 0; i < vi->max_queue_pairs; i++) { 2182 napi_hash_del(&vi->rq[i].napi); 2183 netif_napi_del(&vi->rq[i].napi); 2184 netif_napi_del(&vi->sq[i].napi); 2185 } 2186 2187 /* We called napi_hash_del() before netif_napi_del(), 2188 * we need to respect an RCU grace period before freeing vi->rq 2189 */ 2190 synchronize_net(); 2191 2192 kfree(vi->rq); 2193 kfree(vi->sq); 2194 } 2195 2196 static void _free_receive_bufs(struct virtnet_info *vi) 2197 { 2198 struct bpf_prog *old_prog; 2199 int i; 2200 2201 for (i = 0; i < vi->max_queue_pairs; i++) { 2202 while (vi->rq[i].pages) 2203 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 2204 2205 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 2206 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 2207 if (old_prog) 2208 bpf_prog_put(old_prog); 2209 } 2210 } 2211 2212 static void free_receive_bufs(struct virtnet_info *vi) 2213 { 2214 rtnl_lock(); 2215 _free_receive_bufs(vi); 2216 rtnl_unlock(); 2217 } 2218 2219 static void free_receive_page_frags(struct virtnet_info *vi) 2220 { 2221 int i; 2222 for (i = 0; i < vi->max_queue_pairs; i++) 2223 if (vi->rq[i].alloc_frag.page) 2224 put_page(vi->rq[i].alloc_frag.page); 2225 } 2226 2227 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 2228 { 2229 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 2230 return false; 2231 else if (q < vi->curr_queue_pairs) 2232 return true; 2233 else 2234 return false; 2235 } 2236 2237 static void free_unused_bufs(struct virtnet_info *vi) 2238 { 2239 void *buf; 2240 int i; 2241 2242 for (i = 0; i < vi->max_queue_pairs; i++) { 2243 struct virtqueue *vq = vi->sq[i].vq; 2244 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 2245 if (!is_xdp_raw_buffer_queue(vi, i)) 2246 dev_kfree_skb(buf); 2247 else 2248 put_page(virt_to_head_page(buf)); 2249 } 2250 } 2251 2252 for (i = 0; i < vi->max_queue_pairs; i++) { 2253 struct virtqueue *vq = vi->rq[i].vq; 2254 2255 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 2256 if (vi->mergeable_rx_bufs) { 2257 put_page(virt_to_head_page(buf)); 2258 } else if (vi->big_packets) { 2259 give_pages(&vi->rq[i], buf); 2260 } else { 2261 put_page(virt_to_head_page(buf)); 2262 } 2263 } 2264 } 2265 } 2266 2267 static void virtnet_del_vqs(struct virtnet_info *vi) 2268 { 2269 struct virtio_device *vdev = vi->vdev; 2270 2271 virtnet_clean_affinity(vi, -1); 2272 2273 vdev->config->del_vqs(vdev); 2274 2275 virtnet_free_queues(vi); 2276 } 2277 2278 /* How large should a single buffer be so a queue full of these can fit at 2279 * least one full packet? 2280 * Logic below assumes the mergeable buffer header is used. 2281 */ 2282 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) 2283 { 2284 const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 2285 unsigned int rq_size = virtqueue_get_vring_size(vq); 2286 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; 2287 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 2288 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 2289 2290 return max(max(min_buf_len, hdr_len) - hdr_len, 2291 (unsigned int)GOOD_PACKET_LEN); 2292 } 2293 2294 static int virtnet_find_vqs(struct virtnet_info *vi) 2295 { 2296 vq_callback_t **callbacks; 2297 struct virtqueue **vqs; 2298 int ret = -ENOMEM; 2299 int i, total_vqs; 2300 const char **names; 2301 bool *ctx; 2302 2303 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 2304 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 2305 * possible control vq. 2306 */ 2307 total_vqs = vi->max_queue_pairs * 2 + 2308 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 2309 2310 /* Allocate space for find_vqs parameters */ 2311 vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); 2312 if (!vqs) 2313 goto err_vq; 2314 callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); 2315 if (!callbacks) 2316 goto err_callback; 2317 names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); 2318 if (!names) 2319 goto err_names; 2320 if (!vi->big_packets || vi->mergeable_rx_bufs) { 2321 ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL); 2322 if (!ctx) 2323 goto err_ctx; 2324 } else { 2325 ctx = NULL; 2326 } 2327 2328 /* Parameters for control virtqueue, if any */ 2329 if (vi->has_cvq) { 2330 callbacks[total_vqs - 1] = NULL; 2331 names[total_vqs - 1] = "control"; 2332 } 2333 2334 /* Allocate/initialize parameters for send/receive virtqueues */ 2335 for (i = 0; i < vi->max_queue_pairs; i++) { 2336 callbacks[rxq2vq(i)] = skb_recv_done; 2337 callbacks[txq2vq(i)] = skb_xmit_done; 2338 sprintf(vi->rq[i].name, "input.%d", i); 2339 sprintf(vi->sq[i].name, "output.%d", i); 2340 names[rxq2vq(i)] = vi->rq[i].name; 2341 names[txq2vq(i)] = vi->sq[i].name; 2342 if (ctx) 2343 ctx[rxq2vq(i)] = true; 2344 } 2345 2346 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, 2347 names, ctx, NULL); 2348 if (ret) 2349 goto err_find; 2350 2351 if (vi->has_cvq) { 2352 vi->cvq = vqs[total_vqs - 1]; 2353 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 2354 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2355 } 2356 2357 for (i = 0; i < vi->max_queue_pairs; i++) { 2358 vi->rq[i].vq = vqs[rxq2vq(i)]; 2359 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); 2360 vi->sq[i].vq = vqs[txq2vq(i)]; 2361 } 2362 2363 kfree(names); 2364 kfree(callbacks); 2365 kfree(vqs); 2366 kfree(ctx); 2367 2368 return 0; 2369 2370 err_find: 2371 kfree(ctx); 2372 err_ctx: 2373 kfree(names); 2374 err_names: 2375 kfree(callbacks); 2376 err_callback: 2377 kfree(vqs); 2378 err_vq: 2379 return ret; 2380 } 2381 2382 static int virtnet_alloc_queues(struct virtnet_info *vi) 2383 { 2384 int i; 2385 2386 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); 2387 if (!vi->sq) 2388 goto err_sq; 2389 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); 2390 if (!vi->rq) 2391 goto err_rq; 2392 2393 INIT_DELAYED_WORK(&vi->refill, refill_work); 2394 for (i = 0; i < vi->max_queue_pairs; i++) { 2395 vi->rq[i].pages = NULL; 2396 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, 2397 napi_weight); 2398 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, 2399 napi_tx ? napi_weight : 0); 2400 2401 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 2402 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 2403 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 2404 } 2405 2406 return 0; 2407 2408 err_rq: 2409 kfree(vi->sq); 2410 err_sq: 2411 return -ENOMEM; 2412 } 2413 2414 static int init_vqs(struct virtnet_info *vi) 2415 { 2416 int ret; 2417 2418 /* Allocate send & receive queues */ 2419 ret = virtnet_alloc_queues(vi); 2420 if (ret) 2421 goto err; 2422 2423 ret = virtnet_find_vqs(vi); 2424 if (ret) 2425 goto err_free; 2426 2427 get_online_cpus(); 2428 virtnet_set_affinity(vi); 2429 put_online_cpus(); 2430 2431 return 0; 2432 2433 err_free: 2434 virtnet_free_queues(vi); 2435 err: 2436 return ret; 2437 } 2438 2439 #ifdef CONFIG_SYSFS 2440 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 2441 char *buf) 2442 { 2443 struct virtnet_info *vi = netdev_priv(queue->dev); 2444 unsigned int queue_index = get_netdev_rx_queue_index(queue); 2445 struct ewma_pkt_len *avg; 2446 2447 BUG_ON(queue_index >= vi->max_queue_pairs); 2448 avg = &vi->rq[queue_index].mrg_avg_pkt_len; 2449 return sprintf(buf, "%u\n", 2450 get_mergeable_buf_len(&vi->rq[queue_index], avg)); 2451 } 2452 2453 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 2454 __ATTR_RO(mergeable_rx_buffer_size); 2455 2456 static struct attribute *virtio_net_mrg_rx_attrs[] = { 2457 &mergeable_rx_buffer_size_attribute.attr, 2458 NULL 2459 }; 2460 2461 static const struct attribute_group virtio_net_mrg_rx_group = { 2462 .name = "virtio_net", 2463 .attrs = virtio_net_mrg_rx_attrs 2464 }; 2465 #endif 2466 2467 static bool virtnet_fail_on_feature(struct virtio_device *vdev, 2468 unsigned int fbit, 2469 const char *fname, const char *dname) 2470 { 2471 if (!virtio_has_feature(vdev, fbit)) 2472 return false; 2473 2474 dev_err(&vdev->dev, "device advertises feature %s but not %s", 2475 fname, dname); 2476 2477 return true; 2478 } 2479 2480 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 2481 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 2482 2483 static bool virtnet_validate_features(struct virtio_device *vdev) 2484 { 2485 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 2486 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 2487 "VIRTIO_NET_F_CTRL_VQ") || 2488 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 2489 "VIRTIO_NET_F_CTRL_VQ") || 2490 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 2491 "VIRTIO_NET_F_CTRL_VQ") || 2492 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 2493 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 2494 "VIRTIO_NET_F_CTRL_VQ"))) { 2495 return false; 2496 } 2497 2498 return true; 2499 } 2500 2501 #define MIN_MTU ETH_MIN_MTU 2502 #define MAX_MTU ETH_MAX_MTU 2503 2504 static int virtnet_validate(struct virtio_device *vdev) 2505 { 2506 if (!vdev->config->get) { 2507 dev_err(&vdev->dev, "%s failure: config access disabled\n", 2508 __func__); 2509 return -EINVAL; 2510 } 2511 2512 if (!virtnet_validate_features(vdev)) 2513 return -EINVAL; 2514 2515 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 2516 int mtu = virtio_cread16(vdev, 2517 offsetof(struct virtio_net_config, 2518 mtu)); 2519 if (mtu < MIN_MTU) 2520 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 2521 } 2522 2523 return 0; 2524 } 2525 2526 static int virtnet_probe(struct virtio_device *vdev) 2527 { 2528 int i, err; 2529 struct net_device *dev; 2530 struct virtnet_info *vi; 2531 u16 max_queue_pairs; 2532 int mtu; 2533 2534 /* Find if host supports multiqueue virtio_net device */ 2535 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 2536 struct virtio_net_config, 2537 max_virtqueue_pairs, &max_queue_pairs); 2538 2539 /* We need at least 2 queue's */ 2540 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 2541 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 2542 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 2543 max_queue_pairs = 1; 2544 2545 /* Allocate ourselves a network device with room for our info */ 2546 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 2547 if (!dev) 2548 return -ENOMEM; 2549 2550 /* Set up network device as normal. */ 2551 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 2552 dev->netdev_ops = &virtnet_netdev; 2553 dev->features = NETIF_F_HIGHDMA; 2554 2555 dev->ethtool_ops = &virtnet_ethtool_ops; 2556 SET_NETDEV_DEV(dev, &vdev->dev); 2557 2558 /* Do we support "hardware" checksums? */ 2559 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 2560 /* This opens up the world of extra features. */ 2561 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 2562 if (csum) 2563 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 2564 2565 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 2566 dev->hw_features |= NETIF_F_TSO 2567 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 2568 } 2569 /* Individual feature bits: what can host handle? */ 2570 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 2571 dev->hw_features |= NETIF_F_TSO; 2572 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 2573 dev->hw_features |= NETIF_F_TSO6; 2574 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 2575 dev->hw_features |= NETIF_F_TSO_ECN; 2576 2577 dev->features |= NETIF_F_GSO_ROBUST; 2578 2579 if (gso) 2580 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 2581 /* (!csum && gso) case will be fixed by register_netdev() */ 2582 } 2583 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 2584 dev->features |= NETIF_F_RXCSUM; 2585 2586 dev->vlan_features = dev->features; 2587 2588 /* MTU range: 68 - 65535 */ 2589 dev->min_mtu = MIN_MTU; 2590 dev->max_mtu = MAX_MTU; 2591 2592 /* Configuration may specify what MAC to use. Otherwise random. */ 2593 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) 2594 virtio_cread_bytes(vdev, 2595 offsetof(struct virtio_net_config, mac), 2596 dev->dev_addr, dev->addr_len); 2597 else 2598 eth_hw_addr_random(dev); 2599 2600 /* Set up our device-specific information */ 2601 vi = netdev_priv(dev); 2602 vi->dev = dev; 2603 vi->vdev = vdev; 2604 vdev->priv = vi; 2605 vi->stats = alloc_percpu(struct virtnet_stats); 2606 err = -ENOMEM; 2607 if (vi->stats == NULL) 2608 goto free; 2609 2610 for_each_possible_cpu(i) { 2611 struct virtnet_stats *virtnet_stats; 2612 virtnet_stats = per_cpu_ptr(vi->stats, i); 2613 u64_stats_init(&virtnet_stats->tx_syncp); 2614 u64_stats_init(&virtnet_stats->rx_syncp); 2615 } 2616 2617 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 2618 2619 /* If we can receive ANY GSO packets, we must allocate large ones. */ 2620 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 2621 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 2622 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || 2623 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) 2624 vi->big_packets = true; 2625 2626 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 2627 vi->mergeable_rx_bufs = true; 2628 2629 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 2630 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 2631 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 2632 else 2633 vi->hdr_len = sizeof(struct virtio_net_hdr); 2634 2635 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 2636 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 2637 vi->any_header_sg = true; 2638 2639 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 2640 vi->has_cvq = true; 2641 2642 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 2643 mtu = virtio_cread16(vdev, 2644 offsetof(struct virtio_net_config, 2645 mtu)); 2646 if (mtu < dev->min_mtu) { 2647 /* Should never trigger: MTU was previously validated 2648 * in virtnet_validate. 2649 */ 2650 dev_err(&vdev->dev, "device MTU appears to have changed " 2651 "it is now %d < %d", mtu, dev->min_mtu); 2652 goto free_stats; 2653 } 2654 2655 dev->mtu = mtu; 2656 dev->max_mtu = mtu; 2657 2658 /* TODO: size buffers correctly in this case. */ 2659 if (dev->mtu > ETH_DATA_LEN) 2660 vi->big_packets = true; 2661 } 2662 2663 if (vi->any_header_sg) 2664 dev->needed_headroom = vi->hdr_len; 2665 2666 /* Enable multiqueue by default */ 2667 if (num_online_cpus() >= max_queue_pairs) 2668 vi->curr_queue_pairs = max_queue_pairs; 2669 else 2670 vi->curr_queue_pairs = num_online_cpus(); 2671 vi->max_queue_pairs = max_queue_pairs; 2672 2673 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 2674 err = init_vqs(vi); 2675 if (err) 2676 goto free_stats; 2677 2678 #ifdef CONFIG_SYSFS 2679 if (vi->mergeable_rx_bufs) 2680 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 2681 #endif 2682 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 2683 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 2684 2685 virtnet_init_settings(dev); 2686 2687 err = register_netdev(dev); 2688 if (err) { 2689 pr_debug("virtio_net: registering device failed\n"); 2690 goto free_vqs; 2691 } 2692 2693 virtio_device_ready(vdev); 2694 2695 err = virtnet_cpu_notif_add(vi); 2696 if (err) { 2697 pr_debug("virtio_net: registering cpu notifier failed\n"); 2698 goto free_unregister_netdev; 2699 } 2700 2701 virtnet_set_queues(vi, vi->curr_queue_pairs); 2702 2703 /* Assume link up if device can't report link status, 2704 otherwise get link status from config. */ 2705 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 2706 netif_carrier_off(dev); 2707 schedule_work(&vi->config_work); 2708 } else { 2709 vi->status = VIRTIO_NET_S_LINK_UP; 2710 netif_carrier_on(dev); 2711 } 2712 2713 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) 2714 if (virtio_has_feature(vi->vdev, guest_offloads[i])) 2715 set_bit(guest_offloads[i], &vi->guest_offloads); 2716 2717 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 2718 dev->name, max_queue_pairs); 2719 2720 return 0; 2721 2722 free_unregister_netdev: 2723 vi->vdev->config->reset(vdev); 2724 2725 unregister_netdev(dev); 2726 free_vqs: 2727 cancel_delayed_work_sync(&vi->refill); 2728 free_receive_page_frags(vi); 2729 virtnet_del_vqs(vi); 2730 free_stats: 2731 free_percpu(vi->stats); 2732 free: 2733 free_netdev(dev); 2734 return err; 2735 } 2736 2737 static void remove_vq_common(struct virtnet_info *vi) 2738 { 2739 vi->vdev->config->reset(vi->vdev); 2740 2741 /* Free unused buffers in both send and recv, if any. */ 2742 free_unused_bufs(vi); 2743 2744 free_receive_bufs(vi); 2745 2746 free_receive_page_frags(vi); 2747 2748 virtnet_del_vqs(vi); 2749 } 2750 2751 static void virtnet_remove(struct virtio_device *vdev) 2752 { 2753 struct virtnet_info *vi = vdev->priv; 2754 2755 virtnet_cpu_notif_remove(vi); 2756 2757 /* Make sure no work handler is accessing the device. */ 2758 flush_work(&vi->config_work); 2759 2760 unregister_netdev(vi->dev); 2761 2762 remove_vq_common(vi); 2763 2764 free_percpu(vi->stats); 2765 free_netdev(vi->dev); 2766 } 2767 2768 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) 2769 { 2770 struct virtnet_info *vi = vdev->priv; 2771 2772 virtnet_cpu_notif_remove(vi); 2773 virtnet_freeze_down(vdev); 2774 remove_vq_common(vi); 2775 2776 return 0; 2777 } 2778 2779 static __maybe_unused int virtnet_restore(struct virtio_device *vdev) 2780 { 2781 struct virtnet_info *vi = vdev->priv; 2782 int err; 2783 2784 err = virtnet_restore_up(vdev); 2785 if (err) 2786 return err; 2787 virtnet_set_queues(vi, vi->curr_queue_pairs); 2788 2789 err = virtnet_cpu_notif_add(vi); 2790 if (err) 2791 return err; 2792 2793 return 0; 2794 } 2795 2796 static struct virtio_device_id id_table[] = { 2797 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 2798 { 0 }, 2799 }; 2800 2801 #define VIRTNET_FEATURES \ 2802 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 2803 VIRTIO_NET_F_MAC, \ 2804 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 2805 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 2806 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 2807 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 2808 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 2809 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 2810 VIRTIO_NET_F_CTRL_MAC_ADDR, \ 2811 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2812 2813 static unsigned int features[] = { 2814 VIRTNET_FEATURES, 2815 }; 2816 2817 static unsigned int features_legacy[] = { 2818 VIRTNET_FEATURES, 2819 VIRTIO_NET_F_GSO, 2820 VIRTIO_F_ANY_LAYOUT, 2821 }; 2822 2823 static struct virtio_driver virtio_net_driver = { 2824 .feature_table = features, 2825 .feature_table_size = ARRAY_SIZE(features), 2826 .feature_table_legacy = features_legacy, 2827 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 2828 .driver.name = KBUILD_MODNAME, 2829 .driver.owner = THIS_MODULE, 2830 .id_table = id_table, 2831 .validate = virtnet_validate, 2832 .probe = virtnet_probe, 2833 .remove = virtnet_remove, 2834 .config_changed = virtnet_config_changed, 2835 #ifdef CONFIG_PM_SLEEP 2836 .freeze = virtnet_freeze, 2837 .restore = virtnet_restore, 2838 #endif 2839 }; 2840 2841 static __init int virtio_net_driver_init(void) 2842 { 2843 int ret; 2844 2845 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 2846 virtnet_cpu_online, 2847 virtnet_cpu_down_prep); 2848 if (ret < 0) 2849 goto out; 2850 virtionet_online = ret; 2851 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 2852 NULL, virtnet_cpu_dead); 2853 if (ret) 2854 goto err_dead; 2855 2856 ret = register_virtio_driver(&virtio_net_driver); 2857 if (ret) 2858 goto err_virtio; 2859 return 0; 2860 err_virtio: 2861 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 2862 err_dead: 2863 cpuhp_remove_multi_state(virtionet_online); 2864 out: 2865 return ret; 2866 } 2867 module_init(virtio_net_driver_init); 2868 2869 static __exit void virtio_net_driver_exit(void) 2870 { 2871 unregister_virtio_driver(&virtio_net_driver); 2872 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 2873 cpuhp_remove_multi_state(virtionet_online); 2874 } 2875 module_exit(virtio_net_driver_exit); 2876 2877 MODULE_DEVICE_TABLE(virtio, id_table); 2878 MODULE_DESCRIPTION("Virtio network driver"); 2879 MODULE_LICENSE("GPL"); 2880