1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* A network driver using virtio. 3 * 4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 5 */ 6 //#define DEBUG 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/ethtool.h> 10 #include <linux/module.h> 11 #include <linux/virtio.h> 12 #include <linux/virtio_net.h> 13 #include <linux/bpf.h> 14 #include <linux/bpf_trace.h> 15 #include <linux/scatterlist.h> 16 #include <linux/if_vlan.h> 17 #include <linux/slab.h> 18 #include <linux/cpu.h> 19 #include <linux/average.h> 20 #include <linux/filter.h> 21 #include <linux/kernel.h> 22 #include <net/route.h> 23 #include <net/xdp.h> 24 #include <net/net_failover.h> 25 #include <net/netdev_rx_queue.h> 26 27 static int napi_weight = NAPI_POLL_WEIGHT; 28 module_param(napi_weight, int, 0444); 29 30 static bool csum = true, gso = true, napi_tx = true; 31 module_param(csum, bool, 0444); 32 module_param(gso, bool, 0444); 33 module_param(napi_tx, bool, 0644); 34 35 /* FIXME: MTU in config. */ 36 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 37 #define GOOD_COPY_LEN 128 38 39 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 40 41 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ 42 #define VIRTIO_XDP_HEADROOM 256 43 44 /* Separating two types of XDP xmit */ 45 #define VIRTIO_XDP_TX BIT(0) 46 #define VIRTIO_XDP_REDIR BIT(1) 47 48 #define VIRTIO_XDP_FLAG BIT(0) 49 50 /* RX packet size EWMA. The average packet size is used to determine the packet 51 * buffer size when refilling RX rings. As the entire RX ring may be refilled 52 * at once, the weight is chosen so that the EWMA will be insensitive to short- 53 * term, transient changes in packet size. 54 */ 55 DECLARE_EWMA(pkt_len, 0, 64) 56 57 #define VIRTNET_DRIVER_VERSION "1.0.0" 58 59 static const unsigned long guest_offloads[] = { 60 VIRTIO_NET_F_GUEST_TSO4, 61 VIRTIO_NET_F_GUEST_TSO6, 62 VIRTIO_NET_F_GUEST_ECN, 63 VIRTIO_NET_F_GUEST_UFO, 64 VIRTIO_NET_F_GUEST_CSUM, 65 VIRTIO_NET_F_GUEST_USO4, 66 VIRTIO_NET_F_GUEST_USO6, 67 VIRTIO_NET_F_GUEST_HDRLEN 68 }; 69 70 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ 71 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ 72 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ 73 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \ 74 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \ 75 (1ULL << VIRTIO_NET_F_GUEST_USO6)) 76 77 struct virtnet_stat_desc { 78 char desc[ETH_GSTRING_LEN]; 79 size_t offset; 80 }; 81 82 struct virtnet_sq_stats { 83 struct u64_stats_sync syncp; 84 u64 packets; 85 u64 bytes; 86 u64 xdp_tx; 87 u64 xdp_tx_drops; 88 u64 kicks; 89 u64 tx_timeouts; 90 }; 91 92 struct virtnet_rq_stats { 93 struct u64_stats_sync syncp; 94 u64 packets; 95 u64 bytes; 96 u64 drops; 97 u64 xdp_packets; 98 u64 xdp_tx; 99 u64 xdp_redirects; 100 u64 xdp_drops; 101 u64 kicks; 102 }; 103 104 #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) 105 #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) 106 107 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { 108 { "packets", VIRTNET_SQ_STAT(packets) }, 109 { "bytes", VIRTNET_SQ_STAT(bytes) }, 110 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, 111 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, 112 { "kicks", VIRTNET_SQ_STAT(kicks) }, 113 { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, 114 }; 115 116 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { 117 { "packets", VIRTNET_RQ_STAT(packets) }, 118 { "bytes", VIRTNET_RQ_STAT(bytes) }, 119 { "drops", VIRTNET_RQ_STAT(drops) }, 120 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, 121 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, 122 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, 123 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, 124 { "kicks", VIRTNET_RQ_STAT(kicks) }, 125 }; 126 127 #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) 128 #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) 129 130 struct virtnet_interrupt_coalesce { 131 u32 max_packets; 132 u32 max_usecs; 133 }; 134 135 /* The dma information of pages allocated at a time. */ 136 struct virtnet_rq_dma { 137 dma_addr_t addr; 138 u32 ref; 139 u16 len; 140 u16 need_sync; 141 }; 142 143 /* Internal representation of a send virtqueue */ 144 struct send_queue { 145 /* Virtqueue associated with this send _queue */ 146 struct virtqueue *vq; 147 148 /* TX: fragments + linear part + virtio header */ 149 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 150 151 /* Name of the send queue: output.$index */ 152 char name[16]; 153 154 struct virtnet_sq_stats stats; 155 156 struct virtnet_interrupt_coalesce intr_coal; 157 158 struct napi_struct napi; 159 160 /* Record whether sq is in reset state. */ 161 bool reset; 162 }; 163 164 /* Internal representation of a receive virtqueue */ 165 struct receive_queue { 166 /* Virtqueue associated with this receive_queue */ 167 struct virtqueue *vq; 168 169 struct napi_struct napi; 170 171 struct bpf_prog __rcu *xdp_prog; 172 173 struct virtnet_rq_stats stats; 174 175 struct virtnet_interrupt_coalesce intr_coal; 176 177 /* Chain pages by the private ptr. */ 178 struct page *pages; 179 180 /* Average packet length for mergeable receive buffers. */ 181 struct ewma_pkt_len mrg_avg_pkt_len; 182 183 /* Page frag for packet buffer allocation. */ 184 struct page_frag alloc_frag; 185 186 /* RX: fragments + linear part + virtio header */ 187 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 188 189 /* Min single buffer size for mergeable buffers case. */ 190 unsigned int min_buf_len; 191 192 /* Name of this receive queue: input.$index */ 193 char name[16]; 194 195 struct xdp_rxq_info xdp_rxq; 196 197 /* Record the last dma info to free after new pages is allocated. */ 198 struct virtnet_rq_dma *last_dma; 199 200 /* Do dma by self */ 201 bool do_dma; 202 }; 203 204 /* This structure can contain rss message with maximum settings for indirection table and keysize 205 * Note, that default structure that describes RSS configuration virtio_net_rss_config 206 * contains same info but can't handle table values. 207 * In any case, structure would be passed to virtio hw through sg_buf split by parts 208 * because table sizes may be differ according to the device configuration. 209 */ 210 #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 211 #define VIRTIO_NET_RSS_MAX_TABLE_LEN 128 212 struct virtio_net_ctrl_rss { 213 u32 hash_types; 214 u16 indirection_table_mask; 215 u16 unclassified_queue; 216 u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN]; 217 u16 max_tx_vq; 218 u8 hash_key_length; 219 u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE]; 220 }; 221 222 /* Control VQ buffers: protected by the rtnl lock */ 223 struct control_buf { 224 struct virtio_net_ctrl_hdr hdr; 225 virtio_net_ctrl_ack status; 226 struct virtio_net_ctrl_mq mq; 227 u8 promisc; 228 u8 allmulti; 229 __virtio16 vid; 230 __virtio64 offloads; 231 struct virtio_net_ctrl_rss rss; 232 struct virtio_net_ctrl_coal_tx coal_tx; 233 struct virtio_net_ctrl_coal_rx coal_rx; 234 struct virtio_net_ctrl_coal_vq coal_vq; 235 }; 236 237 struct virtnet_info { 238 struct virtio_device *vdev; 239 struct virtqueue *cvq; 240 struct net_device *dev; 241 struct send_queue *sq; 242 struct receive_queue *rq; 243 unsigned int status; 244 245 /* Max # of queue pairs supported by the device */ 246 u16 max_queue_pairs; 247 248 /* # of queue pairs currently used by the driver */ 249 u16 curr_queue_pairs; 250 251 /* # of XDP queue pairs currently used by the driver */ 252 u16 xdp_queue_pairs; 253 254 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ 255 bool xdp_enabled; 256 257 /* I like... big packets and I cannot lie! */ 258 bool big_packets; 259 260 /* number of sg entries allocated for big packets */ 261 unsigned int big_packets_num_skbfrags; 262 263 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 264 bool mergeable_rx_bufs; 265 266 /* Host supports rss and/or hash report */ 267 bool has_rss; 268 bool has_rss_hash_report; 269 u8 rss_key_size; 270 u16 rss_indir_table_size; 271 u32 rss_hash_types_supported; 272 u32 rss_hash_types_saved; 273 274 /* Has control virtqueue */ 275 bool has_cvq; 276 277 /* Host can handle any s/g split between our header and packet data */ 278 bool any_header_sg; 279 280 /* Packet virtio header size */ 281 u8 hdr_len; 282 283 /* Work struct for delayed refilling if we run low on memory. */ 284 struct delayed_work refill; 285 286 /* Is delayed refill enabled? */ 287 bool refill_enabled; 288 289 /* The lock to synchronize the access to refill_enabled */ 290 spinlock_t refill_lock; 291 292 /* Work struct for config space updates */ 293 struct work_struct config_work; 294 295 /* Does the affinity hint is set for virtqueues? */ 296 bool affinity_hint_set; 297 298 /* CPU hotplug instances for online & dead */ 299 struct hlist_node node; 300 struct hlist_node node_dead; 301 302 struct control_buf *ctrl; 303 304 /* Ethtool settings */ 305 u8 duplex; 306 u32 speed; 307 308 /* Interrupt coalescing settings */ 309 struct virtnet_interrupt_coalesce intr_coal_tx; 310 struct virtnet_interrupt_coalesce intr_coal_rx; 311 312 unsigned long guest_offloads; 313 unsigned long guest_offloads_capable; 314 315 /* failover when STANDBY feature enabled */ 316 struct failover *failover; 317 }; 318 319 struct padded_vnet_hdr { 320 struct virtio_net_hdr_v1_hash hdr; 321 /* 322 * hdr is in a separate sg buffer, and data sg buffer shares same page 323 * with this header sg. This padding makes next sg 16 byte aligned 324 * after the header. 325 */ 326 char padding[12]; 327 }; 328 329 struct virtio_net_common_hdr { 330 union { 331 struct virtio_net_hdr hdr; 332 struct virtio_net_hdr_mrg_rxbuf mrg_hdr; 333 struct virtio_net_hdr_v1_hash hash_v1_hdr; 334 }; 335 }; 336 337 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); 338 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); 339 340 static bool is_xdp_frame(void *ptr) 341 { 342 return (unsigned long)ptr & VIRTIO_XDP_FLAG; 343 } 344 345 static void *xdp_to_ptr(struct xdp_frame *ptr) 346 { 347 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); 348 } 349 350 static struct xdp_frame *ptr_to_xdp(void *ptr) 351 { 352 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); 353 } 354 355 /* Converting between virtqueue no. and kernel tx/rx queue no. 356 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 357 */ 358 static int vq2txq(struct virtqueue *vq) 359 { 360 return (vq->index - 1) / 2; 361 } 362 363 static int txq2vq(int txq) 364 { 365 return txq * 2 + 1; 366 } 367 368 static int vq2rxq(struct virtqueue *vq) 369 { 370 return vq->index / 2; 371 } 372 373 static int rxq2vq(int rxq) 374 { 375 return rxq * 2; 376 } 377 378 static inline struct virtio_net_common_hdr * 379 skb_vnet_common_hdr(struct sk_buff *skb) 380 { 381 return (struct virtio_net_common_hdr *)skb->cb; 382 } 383 384 /* 385 * private is used to chain pages for big packets, put the whole 386 * most recent used list in the beginning for reuse 387 */ 388 static void give_pages(struct receive_queue *rq, struct page *page) 389 { 390 struct page *end; 391 392 /* Find end of list, sew whole thing into vi->rq.pages. */ 393 for (end = page; end->private; end = (struct page *)end->private); 394 end->private = (unsigned long)rq->pages; 395 rq->pages = page; 396 } 397 398 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 399 { 400 struct page *p = rq->pages; 401 402 if (p) { 403 rq->pages = (struct page *)p->private; 404 /* clear private here, it is used to chain pages */ 405 p->private = 0; 406 } else 407 p = alloc_page(gfp_mask); 408 return p; 409 } 410 411 static void enable_delayed_refill(struct virtnet_info *vi) 412 { 413 spin_lock_bh(&vi->refill_lock); 414 vi->refill_enabled = true; 415 spin_unlock_bh(&vi->refill_lock); 416 } 417 418 static void disable_delayed_refill(struct virtnet_info *vi) 419 { 420 spin_lock_bh(&vi->refill_lock); 421 vi->refill_enabled = false; 422 spin_unlock_bh(&vi->refill_lock); 423 } 424 425 static void virtqueue_napi_schedule(struct napi_struct *napi, 426 struct virtqueue *vq) 427 { 428 if (napi_schedule_prep(napi)) { 429 virtqueue_disable_cb(vq); 430 __napi_schedule(napi); 431 } 432 } 433 434 static void virtqueue_napi_complete(struct napi_struct *napi, 435 struct virtqueue *vq, int processed) 436 { 437 int opaque; 438 439 opaque = virtqueue_enable_cb_prepare(vq); 440 if (napi_complete_done(napi, processed)) { 441 if (unlikely(virtqueue_poll(vq, opaque))) 442 virtqueue_napi_schedule(napi, vq); 443 } else { 444 virtqueue_disable_cb(vq); 445 } 446 } 447 448 static void skb_xmit_done(struct virtqueue *vq) 449 { 450 struct virtnet_info *vi = vq->vdev->priv; 451 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; 452 453 /* Suppress further interrupts. */ 454 virtqueue_disable_cb(vq); 455 456 if (napi->weight) 457 virtqueue_napi_schedule(napi, vq); 458 else 459 /* We were probably waiting for more output buffers. */ 460 netif_wake_subqueue(vi->dev, vq2txq(vq)); 461 } 462 463 #define MRG_CTX_HEADER_SHIFT 22 464 static void *mergeable_len_to_ctx(unsigned int truesize, 465 unsigned int headroom) 466 { 467 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); 468 } 469 470 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) 471 { 472 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; 473 } 474 475 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) 476 { 477 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); 478 } 479 480 static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen, 481 unsigned int headroom, 482 unsigned int len) 483 { 484 struct sk_buff *skb; 485 486 skb = build_skb(buf, buflen); 487 if (unlikely(!skb)) 488 return NULL; 489 490 skb_reserve(skb, headroom); 491 skb_put(skb, len); 492 493 return skb; 494 } 495 496 /* Called from bottom half context */ 497 static struct sk_buff *page_to_skb(struct virtnet_info *vi, 498 struct receive_queue *rq, 499 struct page *page, unsigned int offset, 500 unsigned int len, unsigned int truesize, 501 unsigned int headroom) 502 { 503 struct sk_buff *skb; 504 struct virtio_net_common_hdr *hdr; 505 unsigned int copy, hdr_len, hdr_padded_len; 506 struct page *page_to_free = NULL; 507 int tailroom, shinfo_size; 508 char *p, *hdr_p, *buf; 509 510 p = page_address(page) + offset; 511 hdr_p = p; 512 513 hdr_len = vi->hdr_len; 514 if (vi->mergeable_rx_bufs) 515 hdr_padded_len = hdr_len; 516 else 517 hdr_padded_len = sizeof(struct padded_vnet_hdr); 518 519 buf = p - headroom; 520 len -= hdr_len; 521 offset += hdr_padded_len; 522 p += hdr_padded_len; 523 tailroom = truesize - headroom - hdr_padded_len - len; 524 525 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 526 527 /* copy small packet so we can reuse these pages */ 528 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { 529 skb = virtnet_build_skb(buf, truesize, p - buf, len); 530 if (unlikely(!skb)) 531 return NULL; 532 533 page = (struct page *)page->private; 534 if (page) 535 give_pages(rq, page); 536 goto ok; 537 } 538 539 /* copy small packet so we can reuse these pages for small data */ 540 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 541 if (unlikely(!skb)) 542 return NULL; 543 544 /* Copy all frame if it fits skb->head, otherwise 545 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. 546 */ 547 if (len <= skb_tailroom(skb)) 548 copy = len; 549 else 550 copy = ETH_HLEN; 551 skb_put_data(skb, p, copy); 552 553 len -= copy; 554 offset += copy; 555 556 if (vi->mergeable_rx_bufs) { 557 if (len) 558 skb_add_rx_frag(skb, 0, page, offset, len, truesize); 559 else 560 page_to_free = page; 561 goto ok; 562 } 563 564 /* 565 * Verify that we can indeed put this data into a skb. 566 * This is here to handle cases when the device erroneously 567 * tries to receive more than is possible. This is usually 568 * the case of a broken device. 569 */ 570 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 571 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 572 dev_kfree_skb(skb); 573 return NULL; 574 } 575 BUG_ON(offset >= PAGE_SIZE); 576 while (len) { 577 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 578 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 579 frag_size, truesize); 580 len -= frag_size; 581 page = (struct page *)page->private; 582 offset = 0; 583 } 584 585 if (page) 586 give_pages(rq, page); 587 588 ok: 589 hdr = skb_vnet_common_hdr(skb); 590 memcpy(hdr, hdr_p, hdr_len); 591 if (page_to_free) 592 put_page(page_to_free); 593 594 return skb; 595 } 596 597 static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len) 598 { 599 struct page *page = virt_to_head_page(buf); 600 struct virtnet_rq_dma *dma; 601 void *head; 602 int offset; 603 604 head = page_address(page); 605 606 dma = head; 607 608 --dma->ref; 609 610 if (dma->need_sync && len) { 611 offset = buf - (head + sizeof(*dma)); 612 613 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, 614 offset, len, 615 DMA_FROM_DEVICE); 616 } 617 618 if (dma->ref) 619 return; 620 621 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len, 622 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 623 put_page(page); 624 } 625 626 static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) 627 { 628 void *buf; 629 630 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); 631 if (buf && rq->do_dma) 632 virtnet_rq_unmap(rq, buf, *len); 633 634 return buf; 635 } 636 637 static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq) 638 { 639 void *buf; 640 641 buf = virtqueue_detach_unused_buf(rq->vq); 642 if (buf && rq->do_dma) 643 virtnet_rq_unmap(rq, buf, 0); 644 645 return buf; 646 } 647 648 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) 649 { 650 struct virtnet_rq_dma *dma; 651 dma_addr_t addr; 652 u32 offset; 653 void *head; 654 655 if (!rq->do_dma) { 656 sg_init_one(rq->sg, buf, len); 657 return; 658 } 659 660 head = page_address(rq->alloc_frag.page); 661 662 offset = buf - head; 663 664 dma = head; 665 666 addr = dma->addr - sizeof(*dma) + offset; 667 668 sg_init_table(rq->sg, 1); 669 rq->sg[0].dma_address = addr; 670 rq->sg[0].length = len; 671 } 672 673 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) 674 { 675 struct page_frag *alloc_frag = &rq->alloc_frag; 676 struct virtnet_rq_dma *dma; 677 void *buf, *head; 678 dma_addr_t addr; 679 680 if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp))) 681 return NULL; 682 683 head = page_address(alloc_frag->page); 684 685 if (rq->do_dma) { 686 dma = head; 687 688 /* new pages */ 689 if (!alloc_frag->offset) { 690 if (rq->last_dma) { 691 /* Now, the new page is allocated, the last dma 692 * will not be used. So the dma can be unmapped 693 * if the ref is 0. 694 */ 695 virtnet_rq_unmap(rq, rq->last_dma, 0); 696 rq->last_dma = NULL; 697 } 698 699 dma->len = alloc_frag->size - sizeof(*dma); 700 701 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, 702 dma->len, DMA_FROM_DEVICE, 0); 703 if (virtqueue_dma_mapping_error(rq->vq, addr)) 704 return NULL; 705 706 dma->addr = addr; 707 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); 708 709 /* Add a reference to dma to prevent the entire dma from 710 * being released during error handling. This reference 711 * will be freed after the pages are no longer used. 712 */ 713 get_page(alloc_frag->page); 714 dma->ref = 1; 715 alloc_frag->offset = sizeof(*dma); 716 717 rq->last_dma = dma; 718 } 719 720 ++dma->ref; 721 } 722 723 buf = head + alloc_frag->offset; 724 725 get_page(alloc_frag->page); 726 alloc_frag->offset += size; 727 728 return buf; 729 } 730 731 static void virtnet_rq_set_premapped(struct virtnet_info *vi) 732 { 733 int i; 734 735 /* disable for big mode */ 736 if (!vi->mergeable_rx_bufs && vi->big_packets) 737 return; 738 739 for (i = 0; i < vi->max_queue_pairs; i++) { 740 if (virtqueue_set_dma_premapped(vi->rq[i].vq)) 741 continue; 742 743 vi->rq[i].do_dma = true; 744 } 745 } 746 747 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) 748 { 749 unsigned int len; 750 unsigned int packets = 0; 751 unsigned int bytes = 0; 752 void *ptr; 753 754 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 755 if (likely(!is_xdp_frame(ptr))) { 756 struct sk_buff *skb = ptr; 757 758 pr_debug("Sent skb %p\n", skb); 759 760 bytes += skb->len; 761 napi_consume_skb(skb, in_napi); 762 } else { 763 struct xdp_frame *frame = ptr_to_xdp(ptr); 764 765 bytes += xdp_get_frame_len(frame); 766 xdp_return_frame(frame); 767 } 768 packets++; 769 } 770 771 /* Avoid overhead when no packets have been processed 772 * happens when called speculatively from start_xmit. 773 */ 774 if (!packets) 775 return; 776 777 u64_stats_update_begin(&sq->stats.syncp); 778 sq->stats.bytes += bytes; 779 sq->stats.packets += packets; 780 u64_stats_update_end(&sq->stats.syncp); 781 } 782 783 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 784 { 785 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 786 return false; 787 else if (q < vi->curr_queue_pairs) 788 return true; 789 else 790 return false; 791 } 792 793 static void check_sq_full_and_disable(struct virtnet_info *vi, 794 struct net_device *dev, 795 struct send_queue *sq) 796 { 797 bool use_napi = sq->napi.weight; 798 int qnum; 799 800 qnum = sq - vi->sq; 801 802 /* If running out of space, stop queue to avoid getting packets that we 803 * are then unable to transmit. 804 * An alternative would be to force queuing layer to requeue the skb by 805 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 806 * returned in a normal path of operation: it means that driver is not 807 * maintaining the TX queue stop/start state properly, and causes 808 * the stack to do a non-trivial amount of useless work. 809 * Since most packets only take 1 or 2 ring slots, stopping the queue 810 * early means 16 slots are typically wasted. 811 */ 812 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 813 netif_stop_subqueue(dev, qnum); 814 if (use_napi) { 815 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) 816 virtqueue_napi_schedule(&sq->napi, sq->vq); 817 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 818 /* More just got used, free them then recheck. */ 819 free_old_xmit_skbs(sq, false); 820 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 821 netif_start_subqueue(dev, qnum); 822 virtqueue_disable_cb(sq->vq); 823 } 824 } 825 } 826 } 827 828 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, 829 struct send_queue *sq, 830 struct xdp_frame *xdpf) 831 { 832 struct virtio_net_hdr_mrg_rxbuf *hdr; 833 struct skb_shared_info *shinfo; 834 u8 nr_frags = 0; 835 int err, i; 836 837 if (unlikely(xdpf->headroom < vi->hdr_len)) 838 return -EOVERFLOW; 839 840 if (unlikely(xdp_frame_has_frags(xdpf))) { 841 shinfo = xdp_get_shared_info_from_frame(xdpf); 842 nr_frags = shinfo->nr_frags; 843 } 844 845 /* In wrapping function virtnet_xdp_xmit(), we need to free 846 * up the pending old buffers, where we need to calculate the 847 * position of skb_shared_info in xdp_get_frame_len() and 848 * xdp_return_frame(), which will involve to xdpf->data and 849 * xdpf->headroom. Therefore, we need to update the value of 850 * headroom synchronously here. 851 */ 852 xdpf->headroom -= vi->hdr_len; 853 xdpf->data -= vi->hdr_len; 854 /* Zero header and leave csum up to XDP layers */ 855 hdr = xdpf->data; 856 memset(hdr, 0, vi->hdr_len); 857 xdpf->len += vi->hdr_len; 858 859 sg_init_table(sq->sg, nr_frags + 1); 860 sg_set_buf(sq->sg, xdpf->data, xdpf->len); 861 for (i = 0; i < nr_frags; i++) { 862 skb_frag_t *frag = &shinfo->frags[i]; 863 864 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), 865 skb_frag_size(frag), skb_frag_off(frag)); 866 } 867 868 err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1, 869 xdp_to_ptr(xdpf), GFP_ATOMIC); 870 if (unlikely(err)) 871 return -ENOSPC; /* Caller handle free/refcnt */ 872 873 return 0; 874 } 875 876 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on 877 * the current cpu, so it does not need to be locked. 878 * 879 * Here we use marco instead of inline functions because we have to deal with 880 * three issues at the same time: 1. the choice of sq. 2. judge and execute the 881 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline 882 * functions to perfectly solve these three problems at the same time. 883 */ 884 #define virtnet_xdp_get_sq(vi) ({ \ 885 int cpu = smp_processor_id(); \ 886 struct netdev_queue *txq; \ 887 typeof(vi) v = (vi); \ 888 unsigned int qp; \ 889 \ 890 if (v->curr_queue_pairs > nr_cpu_ids) { \ 891 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ 892 qp += cpu; \ 893 txq = netdev_get_tx_queue(v->dev, qp); \ 894 __netif_tx_acquire(txq); \ 895 } else { \ 896 qp = cpu % v->curr_queue_pairs; \ 897 txq = netdev_get_tx_queue(v->dev, qp); \ 898 __netif_tx_lock(txq, cpu); \ 899 } \ 900 v->sq + qp; \ 901 }) 902 903 #define virtnet_xdp_put_sq(vi, q) { \ 904 struct netdev_queue *txq; \ 905 typeof(vi) v = (vi); \ 906 \ 907 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ 908 if (v->curr_queue_pairs > nr_cpu_ids) \ 909 __netif_tx_release(txq); \ 910 else \ 911 __netif_tx_unlock(txq); \ 912 } 913 914 static int virtnet_xdp_xmit(struct net_device *dev, 915 int n, struct xdp_frame **frames, u32 flags) 916 { 917 struct virtnet_info *vi = netdev_priv(dev); 918 struct receive_queue *rq = vi->rq; 919 struct bpf_prog *xdp_prog; 920 struct send_queue *sq; 921 unsigned int len; 922 int packets = 0; 923 int bytes = 0; 924 int nxmit = 0; 925 int kicks = 0; 926 void *ptr; 927 int ret; 928 int i; 929 930 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 931 * indicate XDP resources have been successfully allocated. 932 */ 933 xdp_prog = rcu_access_pointer(rq->xdp_prog); 934 if (!xdp_prog) 935 return -ENXIO; 936 937 sq = virtnet_xdp_get_sq(vi); 938 939 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { 940 ret = -EINVAL; 941 goto out; 942 } 943 944 /* Free up any pending old buffers before queueing new ones. */ 945 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 946 if (likely(is_xdp_frame(ptr))) { 947 struct xdp_frame *frame = ptr_to_xdp(ptr); 948 949 bytes += xdp_get_frame_len(frame); 950 xdp_return_frame(frame); 951 } else { 952 struct sk_buff *skb = ptr; 953 954 bytes += skb->len; 955 napi_consume_skb(skb, false); 956 } 957 packets++; 958 } 959 960 for (i = 0; i < n; i++) { 961 struct xdp_frame *xdpf = frames[i]; 962 963 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) 964 break; 965 nxmit++; 966 } 967 ret = nxmit; 968 969 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) 970 check_sq_full_and_disable(vi, dev, sq); 971 972 if (flags & XDP_XMIT_FLUSH) { 973 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) 974 kicks = 1; 975 } 976 out: 977 u64_stats_update_begin(&sq->stats.syncp); 978 sq->stats.bytes += bytes; 979 sq->stats.packets += packets; 980 sq->stats.xdp_tx += n; 981 sq->stats.xdp_tx_drops += n - nxmit; 982 sq->stats.kicks += kicks; 983 u64_stats_update_end(&sq->stats.syncp); 984 985 virtnet_xdp_put_sq(vi, sq); 986 return ret; 987 } 988 989 static void put_xdp_frags(struct xdp_buff *xdp) 990 { 991 struct skb_shared_info *shinfo; 992 struct page *xdp_page; 993 int i; 994 995 if (xdp_buff_has_frags(xdp)) { 996 shinfo = xdp_get_shared_info_from_buff(xdp); 997 for (i = 0; i < shinfo->nr_frags; i++) { 998 xdp_page = skb_frag_page(&shinfo->frags[i]); 999 put_page(xdp_page); 1000 } 1001 } 1002 } 1003 1004 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, 1005 struct net_device *dev, 1006 unsigned int *xdp_xmit, 1007 struct virtnet_rq_stats *stats) 1008 { 1009 struct xdp_frame *xdpf; 1010 int err; 1011 u32 act; 1012 1013 act = bpf_prog_run_xdp(xdp_prog, xdp); 1014 stats->xdp_packets++; 1015 1016 switch (act) { 1017 case XDP_PASS: 1018 return act; 1019 1020 case XDP_TX: 1021 stats->xdp_tx++; 1022 xdpf = xdp_convert_buff_to_frame(xdp); 1023 if (unlikely(!xdpf)) { 1024 netdev_dbg(dev, "convert buff to frame failed for xdp\n"); 1025 return XDP_DROP; 1026 } 1027 1028 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); 1029 if (unlikely(!err)) { 1030 xdp_return_frame_rx_napi(xdpf); 1031 } else if (unlikely(err < 0)) { 1032 trace_xdp_exception(dev, xdp_prog, act); 1033 return XDP_DROP; 1034 } 1035 *xdp_xmit |= VIRTIO_XDP_TX; 1036 return act; 1037 1038 case XDP_REDIRECT: 1039 stats->xdp_redirects++; 1040 err = xdp_do_redirect(dev, xdp, xdp_prog); 1041 if (err) 1042 return XDP_DROP; 1043 1044 *xdp_xmit |= VIRTIO_XDP_REDIR; 1045 return act; 1046 1047 default: 1048 bpf_warn_invalid_xdp_action(dev, xdp_prog, act); 1049 fallthrough; 1050 case XDP_ABORTED: 1051 trace_xdp_exception(dev, xdp_prog, act); 1052 fallthrough; 1053 case XDP_DROP: 1054 return XDP_DROP; 1055 } 1056 } 1057 1058 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) 1059 { 1060 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; 1061 } 1062 1063 /* We copy the packet for XDP in the following cases: 1064 * 1065 * 1) Packet is scattered across multiple rx buffers. 1066 * 2) Headroom space is insufficient. 1067 * 1068 * This is inefficient but it's a temporary condition that 1069 * we hit right after XDP is enabled and until queue is refilled 1070 * with large buffers with sufficient headroom - so it should affect 1071 * at most queue size packets. 1072 * Afterwards, the conditions to enable 1073 * XDP should preclude the underlying device from sending packets 1074 * across multiple buffers (num_buf > 1), and we make sure buffers 1075 * have enough headroom. 1076 */ 1077 static struct page *xdp_linearize_page(struct receive_queue *rq, 1078 int *num_buf, 1079 struct page *p, 1080 int offset, 1081 int page_off, 1082 unsigned int *len) 1083 { 1084 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1085 struct page *page; 1086 1087 if (page_off + *len + tailroom > PAGE_SIZE) 1088 return NULL; 1089 1090 page = alloc_page(GFP_ATOMIC); 1091 if (!page) 1092 return NULL; 1093 1094 memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 1095 page_off += *len; 1096 1097 while (--*num_buf) { 1098 unsigned int buflen; 1099 void *buf; 1100 int off; 1101 1102 buf = virtnet_rq_get_buf(rq, &buflen, NULL); 1103 if (unlikely(!buf)) 1104 goto err_buf; 1105 1106 p = virt_to_head_page(buf); 1107 off = buf - page_address(p); 1108 1109 /* guard against a misconfigured or uncooperative backend that 1110 * is sending packet larger than the MTU. 1111 */ 1112 if ((page_off + buflen + tailroom) > PAGE_SIZE) { 1113 put_page(p); 1114 goto err_buf; 1115 } 1116 1117 memcpy(page_address(page) + page_off, 1118 page_address(p) + off, buflen); 1119 page_off += buflen; 1120 put_page(p); 1121 } 1122 1123 /* Headroom does not contribute to packet length */ 1124 *len = page_off - VIRTIO_XDP_HEADROOM; 1125 return page; 1126 err_buf: 1127 __free_pages(page, 0); 1128 return NULL; 1129 } 1130 1131 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, 1132 unsigned int xdp_headroom, 1133 void *buf, 1134 unsigned int len) 1135 { 1136 unsigned int header_offset; 1137 unsigned int headroom; 1138 unsigned int buflen; 1139 struct sk_buff *skb; 1140 1141 header_offset = VIRTNET_RX_PAD + xdp_headroom; 1142 headroom = vi->hdr_len + header_offset; 1143 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 1144 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1145 1146 skb = virtnet_build_skb(buf, buflen, headroom, len); 1147 if (unlikely(!skb)) 1148 return NULL; 1149 1150 buf += header_offset; 1151 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); 1152 1153 return skb; 1154 } 1155 1156 static struct sk_buff *receive_small_xdp(struct net_device *dev, 1157 struct virtnet_info *vi, 1158 struct receive_queue *rq, 1159 struct bpf_prog *xdp_prog, 1160 void *buf, 1161 unsigned int xdp_headroom, 1162 unsigned int len, 1163 unsigned int *xdp_xmit, 1164 struct virtnet_rq_stats *stats) 1165 { 1166 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; 1167 unsigned int headroom = vi->hdr_len + header_offset; 1168 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; 1169 struct page *page = virt_to_head_page(buf); 1170 struct page *xdp_page; 1171 unsigned int buflen; 1172 struct xdp_buff xdp; 1173 struct sk_buff *skb; 1174 unsigned int metasize = 0; 1175 u32 act; 1176 1177 if (unlikely(hdr->hdr.gso_type)) 1178 goto err_xdp; 1179 1180 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 1181 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1182 1183 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { 1184 int offset = buf - page_address(page) + header_offset; 1185 unsigned int tlen = len + vi->hdr_len; 1186 int num_buf = 1; 1187 1188 xdp_headroom = virtnet_get_headroom(vi); 1189 header_offset = VIRTNET_RX_PAD + xdp_headroom; 1190 headroom = vi->hdr_len + header_offset; 1191 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 1192 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1193 xdp_page = xdp_linearize_page(rq, &num_buf, page, 1194 offset, header_offset, 1195 &tlen); 1196 if (!xdp_page) 1197 goto err_xdp; 1198 1199 buf = page_address(xdp_page); 1200 put_page(page); 1201 page = xdp_page; 1202 } 1203 1204 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); 1205 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, 1206 xdp_headroom, len, true); 1207 1208 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); 1209 1210 switch (act) { 1211 case XDP_PASS: 1212 /* Recalculate length in case bpf program changed it */ 1213 len = xdp.data_end - xdp.data; 1214 metasize = xdp.data - xdp.data_meta; 1215 break; 1216 1217 case XDP_TX: 1218 case XDP_REDIRECT: 1219 goto xdp_xmit; 1220 1221 default: 1222 goto err_xdp; 1223 } 1224 1225 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); 1226 if (unlikely(!skb)) 1227 goto err; 1228 1229 if (metasize) 1230 skb_metadata_set(skb, metasize); 1231 1232 return skb; 1233 1234 err_xdp: 1235 stats->xdp_drops++; 1236 err: 1237 stats->drops++; 1238 put_page(page); 1239 xdp_xmit: 1240 return NULL; 1241 } 1242 1243 static struct sk_buff *receive_small(struct net_device *dev, 1244 struct virtnet_info *vi, 1245 struct receive_queue *rq, 1246 void *buf, void *ctx, 1247 unsigned int len, 1248 unsigned int *xdp_xmit, 1249 struct virtnet_rq_stats *stats) 1250 { 1251 unsigned int xdp_headroom = (unsigned long)ctx; 1252 struct page *page = virt_to_head_page(buf); 1253 struct sk_buff *skb; 1254 1255 len -= vi->hdr_len; 1256 stats->bytes += len; 1257 1258 if (unlikely(len > GOOD_PACKET_LEN)) { 1259 pr_debug("%s: rx error: len %u exceeds max size %d\n", 1260 dev->name, len, GOOD_PACKET_LEN); 1261 dev->stats.rx_length_errors++; 1262 goto err; 1263 } 1264 1265 if (unlikely(vi->xdp_enabled)) { 1266 struct bpf_prog *xdp_prog; 1267 1268 rcu_read_lock(); 1269 xdp_prog = rcu_dereference(rq->xdp_prog); 1270 if (xdp_prog) { 1271 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, 1272 xdp_headroom, len, xdp_xmit, 1273 stats); 1274 rcu_read_unlock(); 1275 return skb; 1276 } 1277 rcu_read_unlock(); 1278 } 1279 1280 skb = receive_small_build_skb(vi, xdp_headroom, buf, len); 1281 if (likely(skb)) 1282 return skb; 1283 1284 err: 1285 stats->drops++; 1286 put_page(page); 1287 return NULL; 1288 } 1289 1290 static struct sk_buff *receive_big(struct net_device *dev, 1291 struct virtnet_info *vi, 1292 struct receive_queue *rq, 1293 void *buf, 1294 unsigned int len, 1295 struct virtnet_rq_stats *stats) 1296 { 1297 struct page *page = buf; 1298 struct sk_buff *skb = 1299 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); 1300 1301 stats->bytes += len - vi->hdr_len; 1302 if (unlikely(!skb)) 1303 goto err; 1304 1305 return skb; 1306 1307 err: 1308 stats->drops++; 1309 give_pages(rq, page); 1310 return NULL; 1311 } 1312 1313 static void mergeable_buf_free(struct receive_queue *rq, int num_buf, 1314 struct net_device *dev, 1315 struct virtnet_rq_stats *stats) 1316 { 1317 struct page *page; 1318 void *buf; 1319 int len; 1320 1321 while (num_buf-- > 1) { 1322 buf = virtnet_rq_get_buf(rq, &len, NULL); 1323 if (unlikely(!buf)) { 1324 pr_debug("%s: rx error: %d buffers missing\n", 1325 dev->name, num_buf); 1326 dev->stats.rx_length_errors++; 1327 break; 1328 } 1329 stats->bytes += len; 1330 page = virt_to_head_page(buf); 1331 put_page(page); 1332 } 1333 } 1334 1335 /* Why not use xdp_build_skb_from_frame() ? 1336 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in 1337 * virtio-net there are 2 points that do not match its requirements: 1338 * 1. The size of the prefilled buffer is not fixed before xdp is set. 1339 * 2. xdp_build_skb_from_frame() does more checks that we don't need, 1340 * like eth_type_trans() (which virtio-net does in receive_buf()). 1341 */ 1342 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev, 1343 struct virtnet_info *vi, 1344 struct xdp_buff *xdp, 1345 unsigned int xdp_frags_truesz) 1346 { 1347 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 1348 unsigned int headroom, data_len; 1349 struct sk_buff *skb; 1350 int metasize; 1351 u8 nr_frags; 1352 1353 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { 1354 pr_debug("Error building skb as missing reserved tailroom for xdp"); 1355 return NULL; 1356 } 1357 1358 if (unlikely(xdp_buff_has_frags(xdp))) 1359 nr_frags = sinfo->nr_frags; 1360 1361 skb = build_skb(xdp->data_hard_start, xdp->frame_sz); 1362 if (unlikely(!skb)) 1363 return NULL; 1364 1365 headroom = xdp->data - xdp->data_hard_start; 1366 data_len = xdp->data_end - xdp->data; 1367 skb_reserve(skb, headroom); 1368 __skb_put(skb, data_len); 1369 1370 metasize = xdp->data - xdp->data_meta; 1371 metasize = metasize > 0 ? metasize : 0; 1372 if (metasize) 1373 skb_metadata_set(skb, metasize); 1374 1375 if (unlikely(xdp_buff_has_frags(xdp))) 1376 xdp_update_skb_shared_info(skb, nr_frags, 1377 sinfo->xdp_frags_size, 1378 xdp_frags_truesz, 1379 xdp_buff_is_frag_pfmemalloc(xdp)); 1380 1381 return skb; 1382 } 1383 1384 /* TODO: build xdp in big mode */ 1385 static int virtnet_build_xdp_buff_mrg(struct net_device *dev, 1386 struct virtnet_info *vi, 1387 struct receive_queue *rq, 1388 struct xdp_buff *xdp, 1389 void *buf, 1390 unsigned int len, 1391 unsigned int frame_sz, 1392 int *num_buf, 1393 unsigned int *xdp_frags_truesize, 1394 struct virtnet_rq_stats *stats) 1395 { 1396 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1397 unsigned int headroom, tailroom, room; 1398 unsigned int truesize, cur_frag_size; 1399 struct skb_shared_info *shinfo; 1400 unsigned int xdp_frags_truesz = 0; 1401 struct page *page; 1402 skb_frag_t *frag; 1403 int offset; 1404 void *ctx; 1405 1406 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); 1407 xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM, 1408 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); 1409 1410 if (!*num_buf) 1411 return 0; 1412 1413 if (*num_buf > 1) { 1414 /* If we want to build multi-buffer xdp, we need 1415 * to specify that the flags of xdp_buff have the 1416 * XDP_FLAGS_HAS_FRAG bit. 1417 */ 1418 if (!xdp_buff_has_frags(xdp)) 1419 xdp_buff_set_frags_flag(xdp); 1420 1421 shinfo = xdp_get_shared_info_from_buff(xdp); 1422 shinfo->nr_frags = 0; 1423 shinfo->xdp_frags_size = 0; 1424 } 1425 1426 if (*num_buf > MAX_SKB_FRAGS + 1) 1427 return -EINVAL; 1428 1429 while (--*num_buf > 0) { 1430 buf = virtnet_rq_get_buf(rq, &len, &ctx); 1431 if (unlikely(!buf)) { 1432 pr_debug("%s: rx error: %d buffers out of %d missing\n", 1433 dev->name, *num_buf, 1434 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); 1435 dev->stats.rx_length_errors++; 1436 goto err; 1437 } 1438 1439 stats->bytes += len; 1440 page = virt_to_head_page(buf); 1441 offset = buf - page_address(page); 1442 1443 truesize = mergeable_ctx_to_truesize(ctx); 1444 headroom = mergeable_ctx_to_headroom(ctx); 1445 tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1446 room = SKB_DATA_ALIGN(headroom + tailroom); 1447 1448 cur_frag_size = truesize; 1449 xdp_frags_truesz += cur_frag_size; 1450 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) { 1451 put_page(page); 1452 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1453 dev->name, len, (unsigned long)(truesize - room)); 1454 dev->stats.rx_length_errors++; 1455 goto err; 1456 } 1457 1458 frag = &shinfo->frags[shinfo->nr_frags++]; 1459 skb_frag_fill_page_desc(frag, page, offset, len); 1460 if (page_is_pfmemalloc(page)) 1461 xdp_buff_set_frag_pfmemalloc(xdp); 1462 1463 shinfo->xdp_frags_size += len; 1464 } 1465 1466 *xdp_frags_truesize = xdp_frags_truesz; 1467 return 0; 1468 1469 err: 1470 put_xdp_frags(xdp); 1471 return -EINVAL; 1472 } 1473 1474 static void *mergeable_xdp_get_buf(struct virtnet_info *vi, 1475 struct receive_queue *rq, 1476 struct bpf_prog *xdp_prog, 1477 void *ctx, 1478 unsigned int *frame_sz, 1479 int *num_buf, 1480 struct page **page, 1481 int offset, 1482 unsigned int *len, 1483 struct virtio_net_hdr_mrg_rxbuf *hdr) 1484 { 1485 unsigned int truesize = mergeable_ctx_to_truesize(ctx); 1486 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 1487 struct page *xdp_page; 1488 unsigned int xdp_room; 1489 1490 /* Transient failure which in theory could occur if 1491 * in-flight packets from before XDP was enabled reach 1492 * the receive path after XDP is loaded. 1493 */ 1494 if (unlikely(hdr->hdr.gso_type)) 1495 return NULL; 1496 1497 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers 1498 * with headroom may add hole in truesize, which 1499 * make their length exceed PAGE_SIZE. So we disabled the 1500 * hole mechanism for xdp. See add_recvbuf_mergeable(). 1501 */ 1502 *frame_sz = truesize; 1503 1504 if (likely(headroom >= virtnet_get_headroom(vi) && 1505 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { 1506 return page_address(*page) + offset; 1507 } 1508 1509 /* This happens when headroom is not enough because 1510 * of the buffer was prefilled before XDP is set. 1511 * This should only happen for the first several packets. 1512 * In fact, vq reset can be used here to help us clean up 1513 * the prefilled buffers, but many existing devices do not 1514 * support it, and we don't want to bother users who are 1515 * using xdp normally. 1516 */ 1517 if (!xdp_prog->aux->xdp_has_frags) { 1518 /* linearize data for XDP */ 1519 xdp_page = xdp_linearize_page(rq, num_buf, 1520 *page, offset, 1521 VIRTIO_XDP_HEADROOM, 1522 len); 1523 if (!xdp_page) 1524 return NULL; 1525 } else { 1526 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + 1527 sizeof(struct skb_shared_info)); 1528 if (*len + xdp_room > PAGE_SIZE) 1529 return NULL; 1530 1531 xdp_page = alloc_page(GFP_ATOMIC); 1532 if (!xdp_page) 1533 return NULL; 1534 1535 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, 1536 page_address(*page) + offset, *len); 1537 } 1538 1539 *frame_sz = PAGE_SIZE; 1540 1541 put_page(*page); 1542 1543 *page = xdp_page; 1544 1545 return page_address(*page) + VIRTIO_XDP_HEADROOM; 1546 } 1547 1548 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, 1549 struct virtnet_info *vi, 1550 struct receive_queue *rq, 1551 struct bpf_prog *xdp_prog, 1552 void *buf, 1553 void *ctx, 1554 unsigned int len, 1555 unsigned int *xdp_xmit, 1556 struct virtnet_rq_stats *stats) 1557 { 1558 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1559 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 1560 struct page *page = virt_to_head_page(buf); 1561 int offset = buf - page_address(page); 1562 unsigned int xdp_frags_truesz = 0; 1563 struct sk_buff *head_skb; 1564 unsigned int frame_sz; 1565 struct xdp_buff xdp; 1566 void *data; 1567 u32 act; 1568 int err; 1569 1570 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, 1571 offset, &len, hdr); 1572 if (unlikely(!data)) 1573 goto err_xdp; 1574 1575 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, 1576 &num_buf, &xdp_frags_truesz, stats); 1577 if (unlikely(err)) 1578 goto err_xdp; 1579 1580 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); 1581 1582 switch (act) { 1583 case XDP_PASS: 1584 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); 1585 if (unlikely(!head_skb)) 1586 break; 1587 return head_skb; 1588 1589 case XDP_TX: 1590 case XDP_REDIRECT: 1591 return NULL; 1592 1593 default: 1594 break; 1595 } 1596 1597 put_xdp_frags(&xdp); 1598 1599 err_xdp: 1600 put_page(page); 1601 mergeable_buf_free(rq, num_buf, dev, stats); 1602 1603 stats->xdp_drops++; 1604 stats->drops++; 1605 return NULL; 1606 } 1607 1608 static struct sk_buff *receive_mergeable(struct net_device *dev, 1609 struct virtnet_info *vi, 1610 struct receive_queue *rq, 1611 void *buf, 1612 void *ctx, 1613 unsigned int len, 1614 unsigned int *xdp_xmit, 1615 struct virtnet_rq_stats *stats) 1616 { 1617 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1618 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 1619 struct page *page = virt_to_head_page(buf); 1620 int offset = buf - page_address(page); 1621 struct sk_buff *head_skb, *curr_skb; 1622 unsigned int truesize = mergeable_ctx_to_truesize(ctx); 1623 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 1624 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1625 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 1626 1627 head_skb = NULL; 1628 stats->bytes += len - vi->hdr_len; 1629 1630 if (unlikely(len > truesize - room)) { 1631 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1632 dev->name, len, (unsigned long)(truesize - room)); 1633 dev->stats.rx_length_errors++; 1634 goto err_skb; 1635 } 1636 1637 if (unlikely(vi->xdp_enabled)) { 1638 struct bpf_prog *xdp_prog; 1639 1640 rcu_read_lock(); 1641 xdp_prog = rcu_dereference(rq->xdp_prog); 1642 if (xdp_prog) { 1643 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, 1644 len, xdp_xmit, stats); 1645 rcu_read_unlock(); 1646 return head_skb; 1647 } 1648 rcu_read_unlock(); 1649 } 1650 1651 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); 1652 curr_skb = head_skb; 1653 1654 if (unlikely(!curr_skb)) 1655 goto err_skb; 1656 while (--num_buf) { 1657 int num_skb_frags; 1658 1659 buf = virtnet_rq_get_buf(rq, &len, &ctx); 1660 if (unlikely(!buf)) { 1661 pr_debug("%s: rx error: %d buffers out of %d missing\n", 1662 dev->name, num_buf, 1663 virtio16_to_cpu(vi->vdev, 1664 hdr->num_buffers)); 1665 dev->stats.rx_length_errors++; 1666 goto err_buf; 1667 } 1668 1669 stats->bytes += len; 1670 page = virt_to_head_page(buf); 1671 1672 truesize = mergeable_ctx_to_truesize(ctx); 1673 headroom = mergeable_ctx_to_headroom(ctx); 1674 tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1675 room = SKB_DATA_ALIGN(headroom + tailroom); 1676 if (unlikely(len > truesize - room)) { 1677 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1678 dev->name, len, (unsigned long)(truesize - room)); 1679 dev->stats.rx_length_errors++; 1680 goto err_skb; 1681 } 1682 1683 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 1684 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 1685 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 1686 1687 if (unlikely(!nskb)) 1688 goto err_skb; 1689 if (curr_skb == head_skb) 1690 skb_shinfo(curr_skb)->frag_list = nskb; 1691 else 1692 curr_skb->next = nskb; 1693 curr_skb = nskb; 1694 head_skb->truesize += nskb->truesize; 1695 num_skb_frags = 0; 1696 } 1697 if (curr_skb != head_skb) { 1698 head_skb->data_len += len; 1699 head_skb->len += len; 1700 head_skb->truesize += truesize; 1701 } 1702 offset = buf - page_address(page); 1703 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 1704 put_page(page); 1705 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 1706 len, truesize); 1707 } else { 1708 skb_add_rx_frag(curr_skb, num_skb_frags, page, 1709 offset, len, truesize); 1710 } 1711 } 1712 1713 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 1714 return head_skb; 1715 1716 err_skb: 1717 put_page(page); 1718 mergeable_buf_free(rq, num_buf, dev, stats); 1719 1720 err_buf: 1721 stats->drops++; 1722 dev_kfree_skb(head_skb); 1723 return NULL; 1724 } 1725 1726 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, 1727 struct sk_buff *skb) 1728 { 1729 enum pkt_hash_types rss_hash_type; 1730 1731 if (!hdr_hash || !skb) 1732 return; 1733 1734 switch (__le16_to_cpu(hdr_hash->hash_report)) { 1735 case VIRTIO_NET_HASH_REPORT_TCPv4: 1736 case VIRTIO_NET_HASH_REPORT_UDPv4: 1737 case VIRTIO_NET_HASH_REPORT_TCPv6: 1738 case VIRTIO_NET_HASH_REPORT_UDPv6: 1739 case VIRTIO_NET_HASH_REPORT_TCPv6_EX: 1740 case VIRTIO_NET_HASH_REPORT_UDPv6_EX: 1741 rss_hash_type = PKT_HASH_TYPE_L4; 1742 break; 1743 case VIRTIO_NET_HASH_REPORT_IPv4: 1744 case VIRTIO_NET_HASH_REPORT_IPv6: 1745 case VIRTIO_NET_HASH_REPORT_IPv6_EX: 1746 rss_hash_type = PKT_HASH_TYPE_L3; 1747 break; 1748 case VIRTIO_NET_HASH_REPORT_NONE: 1749 default: 1750 rss_hash_type = PKT_HASH_TYPE_NONE; 1751 } 1752 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); 1753 } 1754 1755 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 1756 void *buf, unsigned int len, void **ctx, 1757 unsigned int *xdp_xmit, 1758 struct virtnet_rq_stats *stats) 1759 { 1760 struct net_device *dev = vi->dev; 1761 struct sk_buff *skb; 1762 struct virtio_net_common_hdr *hdr; 1763 1764 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 1765 pr_debug("%s: short packet %i\n", dev->name, len); 1766 dev->stats.rx_length_errors++; 1767 virtnet_rq_free_unused_buf(rq->vq, buf); 1768 return; 1769 } 1770 1771 if (vi->mergeable_rx_bufs) 1772 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, 1773 stats); 1774 else if (vi->big_packets) 1775 skb = receive_big(dev, vi, rq, buf, len, stats); 1776 else 1777 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); 1778 1779 if (unlikely(!skb)) 1780 return; 1781 1782 hdr = skb_vnet_common_hdr(skb); 1783 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) 1784 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); 1785 1786 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 1787 skb->ip_summed = CHECKSUM_UNNECESSARY; 1788 1789 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 1790 virtio_is_little_endian(vi->vdev))) { 1791 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 1792 dev->name, hdr->hdr.gso_type, 1793 hdr->hdr.gso_size); 1794 goto frame_err; 1795 } 1796 1797 skb_record_rx_queue(skb, vq2rxq(rq->vq)); 1798 skb->protocol = eth_type_trans(skb, dev); 1799 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 1800 ntohs(skb->protocol), skb->len, skb->pkt_type); 1801 1802 napi_gro_receive(&rq->napi, skb); 1803 return; 1804 1805 frame_err: 1806 dev->stats.rx_frame_errors++; 1807 dev_kfree_skb(skb); 1808 } 1809 1810 /* Unlike mergeable buffers, all buffers are allocated to the 1811 * same size, except for the headroom. For this reason we do 1812 * not need to use mergeable_len_to_ctx here - it is enough 1813 * to store the headroom as the context ignoring the truesize. 1814 */ 1815 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 1816 gfp_t gfp) 1817 { 1818 char *buf; 1819 unsigned int xdp_headroom = virtnet_get_headroom(vi); 1820 void *ctx = (void *)(unsigned long)xdp_headroom; 1821 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; 1822 int err; 1823 1824 len = SKB_DATA_ALIGN(len) + 1825 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1826 1827 buf = virtnet_rq_alloc(rq, len, gfp); 1828 if (unlikely(!buf)) 1829 return -ENOMEM; 1830 1831 virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom, 1832 vi->hdr_len + GOOD_PACKET_LEN); 1833 1834 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 1835 if (err < 0) { 1836 if (rq->do_dma) 1837 virtnet_rq_unmap(rq, buf, 0); 1838 put_page(virt_to_head_page(buf)); 1839 } 1840 1841 return err; 1842 } 1843 1844 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 1845 gfp_t gfp) 1846 { 1847 struct page *first, *list = NULL; 1848 char *p; 1849 int i, err, offset; 1850 1851 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); 1852 1853 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ 1854 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { 1855 first = get_a_page(rq, gfp); 1856 if (!first) { 1857 if (list) 1858 give_pages(rq, list); 1859 return -ENOMEM; 1860 } 1861 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 1862 1863 /* chain new page in list head to match sg */ 1864 first->private = (unsigned long)list; 1865 list = first; 1866 } 1867 1868 first = get_a_page(rq, gfp); 1869 if (!first) { 1870 give_pages(rq, list); 1871 return -ENOMEM; 1872 } 1873 p = page_address(first); 1874 1875 /* rq->sg[0], rq->sg[1] share the same page */ 1876 /* a separated rq->sg[0] for header - required in case !any_header_sg */ 1877 sg_set_buf(&rq->sg[0], p, vi->hdr_len); 1878 1879 /* rq->sg[1] for data packet, from offset */ 1880 offset = sizeof(struct padded_vnet_hdr); 1881 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 1882 1883 /* chain first in list head */ 1884 first->private = (unsigned long)list; 1885 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, 1886 first, gfp); 1887 if (err < 0) 1888 give_pages(rq, first); 1889 1890 return err; 1891 } 1892 1893 static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 1894 struct ewma_pkt_len *avg_pkt_len, 1895 unsigned int room) 1896 { 1897 struct virtnet_info *vi = rq->vq->vdev->priv; 1898 const size_t hdr_len = vi->hdr_len; 1899 unsigned int len; 1900 1901 if (room) 1902 return PAGE_SIZE - room; 1903 1904 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 1905 rq->min_buf_len, PAGE_SIZE - hdr_len); 1906 1907 return ALIGN(len, L1_CACHE_BYTES); 1908 } 1909 1910 static int add_recvbuf_mergeable(struct virtnet_info *vi, 1911 struct receive_queue *rq, gfp_t gfp) 1912 { 1913 struct page_frag *alloc_frag = &rq->alloc_frag; 1914 unsigned int headroom = virtnet_get_headroom(vi); 1915 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1916 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 1917 unsigned int len, hole; 1918 void *ctx; 1919 char *buf; 1920 int err; 1921 1922 /* Extra tailroom is needed to satisfy XDP's assumption. This 1923 * means rx frags coalescing won't work, but consider we've 1924 * disabled GSO for XDP, it won't be a big issue. 1925 */ 1926 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); 1927 1928 buf = virtnet_rq_alloc(rq, len + room, gfp); 1929 if (unlikely(!buf)) 1930 return -ENOMEM; 1931 1932 buf += headroom; /* advance address leaving hole at front of pkt */ 1933 hole = alloc_frag->size - alloc_frag->offset; 1934 if (hole < len + room) { 1935 /* To avoid internal fragmentation, if there is very likely not 1936 * enough space for another buffer, add the remaining space to 1937 * the current buffer. 1938 * XDP core assumes that frame_size of xdp_buff and the length 1939 * of the frag are PAGE_SIZE, so we disable the hole mechanism. 1940 */ 1941 if (!headroom) 1942 len += hole; 1943 alloc_frag->offset += hole; 1944 } 1945 1946 virtnet_rq_init_one_sg(rq, buf, len); 1947 1948 ctx = mergeable_len_to_ctx(len + room, headroom); 1949 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 1950 if (err < 0) { 1951 if (rq->do_dma) 1952 virtnet_rq_unmap(rq, buf, 0); 1953 put_page(virt_to_head_page(buf)); 1954 } 1955 1956 return err; 1957 } 1958 1959 /* 1960 * Returns false if we couldn't fill entirely (OOM). 1961 * 1962 * Normally run in the receive path, but can also be run from ndo_open 1963 * before we're receiving packets, or from refill_work which is 1964 * careful to disable receiving (using napi_disable). 1965 */ 1966 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 1967 gfp_t gfp) 1968 { 1969 int err; 1970 bool oom; 1971 1972 do { 1973 if (vi->mergeable_rx_bufs) 1974 err = add_recvbuf_mergeable(vi, rq, gfp); 1975 else if (vi->big_packets) 1976 err = add_recvbuf_big(vi, rq, gfp); 1977 else 1978 err = add_recvbuf_small(vi, rq, gfp); 1979 1980 oom = err == -ENOMEM; 1981 if (err) 1982 break; 1983 } while (rq->vq->num_free); 1984 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { 1985 unsigned long flags; 1986 1987 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); 1988 rq->stats.kicks++; 1989 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); 1990 } 1991 1992 return !oom; 1993 } 1994 1995 static void skb_recv_done(struct virtqueue *rvq) 1996 { 1997 struct virtnet_info *vi = rvq->vdev->priv; 1998 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 1999 2000 virtqueue_napi_schedule(&rq->napi, rvq); 2001 } 2002 2003 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) 2004 { 2005 napi_enable(napi); 2006 2007 /* If all buffers were filled by other side before we napi_enabled, we 2008 * won't get another interrupt, so process any outstanding packets now. 2009 * Call local_bh_enable after to trigger softIRQ processing. 2010 */ 2011 local_bh_disable(); 2012 virtqueue_napi_schedule(napi, vq); 2013 local_bh_enable(); 2014 } 2015 2016 static void virtnet_napi_tx_enable(struct virtnet_info *vi, 2017 struct virtqueue *vq, 2018 struct napi_struct *napi) 2019 { 2020 if (!napi->weight) 2021 return; 2022 2023 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only 2024 * enable the feature if this is likely affine with the transmit path. 2025 */ 2026 if (!vi->affinity_hint_set) { 2027 napi->weight = 0; 2028 return; 2029 } 2030 2031 return virtnet_napi_enable(vq, napi); 2032 } 2033 2034 static void virtnet_napi_tx_disable(struct napi_struct *napi) 2035 { 2036 if (napi->weight) 2037 napi_disable(napi); 2038 } 2039 2040 static void refill_work(struct work_struct *work) 2041 { 2042 struct virtnet_info *vi = 2043 container_of(work, struct virtnet_info, refill.work); 2044 bool still_empty; 2045 int i; 2046 2047 for (i = 0; i < vi->curr_queue_pairs; i++) { 2048 struct receive_queue *rq = &vi->rq[i]; 2049 2050 napi_disable(&rq->napi); 2051 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 2052 virtnet_napi_enable(rq->vq, &rq->napi); 2053 2054 /* In theory, this can happen: if we don't get any buffers in 2055 * we will *never* try to fill again. 2056 */ 2057 if (still_empty) 2058 schedule_delayed_work(&vi->refill, HZ/2); 2059 } 2060 } 2061 2062 static int virtnet_receive(struct receive_queue *rq, int budget, 2063 unsigned int *xdp_xmit) 2064 { 2065 struct virtnet_info *vi = rq->vq->vdev->priv; 2066 struct virtnet_rq_stats stats = {}; 2067 unsigned int len; 2068 void *buf; 2069 int i; 2070 2071 if (!vi->big_packets || vi->mergeable_rx_bufs) { 2072 void *ctx; 2073 2074 while (stats.packets < budget && 2075 (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { 2076 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); 2077 stats.packets++; 2078 } 2079 } else { 2080 while (stats.packets < budget && 2081 (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) { 2082 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); 2083 stats.packets++; 2084 } 2085 } 2086 2087 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { 2088 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { 2089 spin_lock(&vi->refill_lock); 2090 if (vi->refill_enabled) 2091 schedule_delayed_work(&vi->refill, 0); 2092 spin_unlock(&vi->refill_lock); 2093 } 2094 } 2095 2096 u64_stats_update_begin(&rq->stats.syncp); 2097 for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { 2098 size_t offset = virtnet_rq_stats_desc[i].offset; 2099 u64 *item; 2100 2101 item = (u64 *)((u8 *)&rq->stats + offset); 2102 *item += *(u64 *)((u8 *)&stats + offset); 2103 } 2104 u64_stats_update_end(&rq->stats.syncp); 2105 2106 return stats.packets; 2107 } 2108 2109 static void virtnet_poll_cleantx(struct receive_queue *rq) 2110 { 2111 struct virtnet_info *vi = rq->vq->vdev->priv; 2112 unsigned int index = vq2rxq(rq->vq); 2113 struct send_queue *sq = &vi->sq[index]; 2114 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 2115 2116 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) 2117 return; 2118 2119 if (__netif_tx_trylock(txq)) { 2120 if (sq->reset) { 2121 __netif_tx_unlock(txq); 2122 return; 2123 } 2124 2125 do { 2126 virtqueue_disable_cb(sq->vq); 2127 free_old_xmit_skbs(sq, true); 2128 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 2129 2130 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 2131 netif_tx_wake_queue(txq); 2132 2133 __netif_tx_unlock(txq); 2134 } 2135 } 2136 2137 static int virtnet_poll(struct napi_struct *napi, int budget) 2138 { 2139 struct receive_queue *rq = 2140 container_of(napi, struct receive_queue, napi); 2141 struct virtnet_info *vi = rq->vq->vdev->priv; 2142 struct send_queue *sq; 2143 unsigned int received; 2144 unsigned int xdp_xmit = 0; 2145 2146 virtnet_poll_cleantx(rq); 2147 2148 received = virtnet_receive(rq, budget, &xdp_xmit); 2149 2150 if (xdp_xmit & VIRTIO_XDP_REDIR) 2151 xdp_do_flush(); 2152 2153 /* Out of packets? */ 2154 if (received < budget) 2155 virtqueue_napi_complete(napi, rq->vq, received); 2156 2157 if (xdp_xmit & VIRTIO_XDP_TX) { 2158 sq = virtnet_xdp_get_sq(vi); 2159 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 2160 u64_stats_update_begin(&sq->stats.syncp); 2161 sq->stats.kicks++; 2162 u64_stats_update_end(&sq->stats.syncp); 2163 } 2164 virtnet_xdp_put_sq(vi, sq); 2165 } 2166 2167 return received; 2168 } 2169 2170 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) 2171 { 2172 virtnet_napi_tx_disable(&vi->sq[qp_index].napi); 2173 napi_disable(&vi->rq[qp_index].napi); 2174 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); 2175 } 2176 2177 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) 2178 { 2179 struct net_device *dev = vi->dev; 2180 int err; 2181 2182 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, 2183 vi->rq[qp_index].napi.napi_id); 2184 if (err < 0) 2185 return err; 2186 2187 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, 2188 MEM_TYPE_PAGE_SHARED, NULL); 2189 if (err < 0) 2190 goto err_xdp_reg_mem_model; 2191 2192 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); 2193 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); 2194 2195 return 0; 2196 2197 err_xdp_reg_mem_model: 2198 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); 2199 return err; 2200 } 2201 2202 static int virtnet_open(struct net_device *dev) 2203 { 2204 struct virtnet_info *vi = netdev_priv(dev); 2205 int i, err; 2206 2207 enable_delayed_refill(vi); 2208 2209 for (i = 0; i < vi->max_queue_pairs; i++) { 2210 if (i < vi->curr_queue_pairs) 2211 /* Make sure we have some buffers: if oom use wq. */ 2212 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 2213 schedule_delayed_work(&vi->refill, 0); 2214 2215 err = virtnet_enable_queue_pair(vi, i); 2216 if (err < 0) 2217 goto err_enable_qp; 2218 } 2219 2220 return 0; 2221 2222 err_enable_qp: 2223 disable_delayed_refill(vi); 2224 cancel_delayed_work_sync(&vi->refill); 2225 2226 for (i--; i >= 0; i--) 2227 virtnet_disable_queue_pair(vi, i); 2228 return err; 2229 } 2230 2231 static int virtnet_poll_tx(struct napi_struct *napi, int budget) 2232 { 2233 struct send_queue *sq = container_of(napi, struct send_queue, napi); 2234 struct virtnet_info *vi = sq->vq->vdev->priv; 2235 unsigned int index = vq2txq(sq->vq); 2236 struct netdev_queue *txq; 2237 int opaque; 2238 bool done; 2239 2240 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { 2241 /* We don't need to enable cb for XDP */ 2242 napi_complete_done(napi, 0); 2243 return 0; 2244 } 2245 2246 txq = netdev_get_tx_queue(vi->dev, index); 2247 __netif_tx_lock(txq, raw_smp_processor_id()); 2248 virtqueue_disable_cb(sq->vq); 2249 free_old_xmit_skbs(sq, true); 2250 2251 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 2252 netif_tx_wake_queue(txq); 2253 2254 opaque = virtqueue_enable_cb_prepare(sq->vq); 2255 2256 done = napi_complete_done(napi, 0); 2257 2258 if (!done) 2259 virtqueue_disable_cb(sq->vq); 2260 2261 __netif_tx_unlock(txq); 2262 2263 if (done) { 2264 if (unlikely(virtqueue_poll(sq->vq, opaque))) { 2265 if (napi_schedule_prep(napi)) { 2266 __netif_tx_lock(txq, raw_smp_processor_id()); 2267 virtqueue_disable_cb(sq->vq); 2268 __netif_tx_unlock(txq); 2269 __napi_schedule(napi); 2270 } 2271 } 2272 } 2273 2274 return 0; 2275 } 2276 2277 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 2278 { 2279 struct virtio_net_hdr_mrg_rxbuf *hdr; 2280 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 2281 struct virtnet_info *vi = sq->vq->vdev->priv; 2282 int num_sg; 2283 unsigned hdr_len = vi->hdr_len; 2284 bool can_push; 2285 2286 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 2287 2288 can_push = vi->any_header_sg && 2289 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 2290 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 2291 /* Even if we can, don't push here yet as this would skew 2292 * csum_start offset below. */ 2293 if (can_push) 2294 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 2295 else 2296 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr; 2297 2298 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 2299 virtio_is_little_endian(vi->vdev), false, 2300 0)) 2301 return -EPROTO; 2302 2303 if (vi->mergeable_rx_bufs) 2304 hdr->num_buffers = 0; 2305 2306 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 2307 if (can_push) { 2308 __skb_push(skb, hdr_len); 2309 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 2310 if (unlikely(num_sg < 0)) 2311 return num_sg; 2312 /* Pull header back to avoid skew in tx bytes calculations. */ 2313 __skb_pull(skb, hdr_len); 2314 } else { 2315 sg_set_buf(sq->sg, hdr, hdr_len); 2316 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 2317 if (unlikely(num_sg < 0)) 2318 return num_sg; 2319 num_sg++; 2320 } 2321 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 2322 } 2323 2324 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 2325 { 2326 struct virtnet_info *vi = netdev_priv(dev); 2327 int qnum = skb_get_queue_mapping(skb); 2328 struct send_queue *sq = &vi->sq[qnum]; 2329 int err; 2330 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 2331 bool kick = !netdev_xmit_more(); 2332 bool use_napi = sq->napi.weight; 2333 2334 /* Free up any pending old buffers before queueing new ones. */ 2335 do { 2336 if (use_napi) 2337 virtqueue_disable_cb(sq->vq); 2338 2339 free_old_xmit_skbs(sq, false); 2340 2341 } while (use_napi && kick && 2342 unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 2343 2344 /* timestamp packet in software */ 2345 skb_tx_timestamp(skb); 2346 2347 /* Try to transmit */ 2348 err = xmit_skb(sq, skb); 2349 2350 /* This should not happen! */ 2351 if (unlikely(err)) { 2352 dev->stats.tx_fifo_errors++; 2353 if (net_ratelimit()) 2354 dev_warn(&dev->dev, 2355 "Unexpected TXQ (%d) queue failure: %d\n", 2356 qnum, err); 2357 dev->stats.tx_dropped++; 2358 dev_kfree_skb_any(skb); 2359 return NETDEV_TX_OK; 2360 } 2361 2362 /* Don't wait up for transmitted skbs to be freed. */ 2363 if (!use_napi) { 2364 skb_orphan(skb); 2365 nf_reset_ct(skb); 2366 } 2367 2368 check_sq_full_and_disable(vi, dev, sq); 2369 2370 if (kick || netif_xmit_stopped(txq)) { 2371 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 2372 u64_stats_update_begin(&sq->stats.syncp); 2373 sq->stats.kicks++; 2374 u64_stats_update_end(&sq->stats.syncp); 2375 } 2376 } 2377 2378 return NETDEV_TX_OK; 2379 } 2380 2381 static int virtnet_rx_resize(struct virtnet_info *vi, 2382 struct receive_queue *rq, u32 ring_num) 2383 { 2384 bool running = netif_running(vi->dev); 2385 int err, qindex; 2386 2387 qindex = rq - vi->rq; 2388 2389 if (running) 2390 napi_disable(&rq->napi); 2391 2392 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf); 2393 if (err) 2394 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); 2395 2396 if (!try_fill_recv(vi, rq, GFP_KERNEL)) 2397 schedule_delayed_work(&vi->refill, 0); 2398 2399 if (running) 2400 virtnet_napi_enable(rq->vq, &rq->napi); 2401 return err; 2402 } 2403 2404 static int virtnet_tx_resize(struct virtnet_info *vi, 2405 struct send_queue *sq, u32 ring_num) 2406 { 2407 bool running = netif_running(vi->dev); 2408 struct netdev_queue *txq; 2409 int err, qindex; 2410 2411 qindex = sq - vi->sq; 2412 2413 if (running) 2414 virtnet_napi_tx_disable(&sq->napi); 2415 2416 txq = netdev_get_tx_queue(vi->dev, qindex); 2417 2418 /* 1. wait all ximt complete 2419 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue() 2420 */ 2421 __netif_tx_lock_bh(txq); 2422 2423 /* Prevent rx poll from accessing sq. */ 2424 sq->reset = true; 2425 2426 /* Prevent the upper layer from trying to send packets. */ 2427 netif_stop_subqueue(vi->dev, qindex); 2428 2429 __netif_tx_unlock_bh(txq); 2430 2431 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); 2432 if (err) 2433 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); 2434 2435 __netif_tx_lock_bh(txq); 2436 sq->reset = false; 2437 netif_tx_wake_queue(txq); 2438 __netif_tx_unlock_bh(txq); 2439 2440 if (running) 2441 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); 2442 return err; 2443 } 2444 2445 /* 2446 * Send command via the control virtqueue and check status. Commands 2447 * supported by the hypervisor, as indicated by feature bits, should 2448 * never fail unless improperly formatted. 2449 */ 2450 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 2451 struct scatterlist *out) 2452 { 2453 struct scatterlist *sgs[4], hdr, stat; 2454 unsigned out_num = 0, tmp; 2455 int ret; 2456 2457 /* Caller should know better */ 2458 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 2459 2460 vi->ctrl->status = ~0; 2461 vi->ctrl->hdr.class = class; 2462 vi->ctrl->hdr.cmd = cmd; 2463 /* Add header */ 2464 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); 2465 sgs[out_num++] = &hdr; 2466 2467 if (out) 2468 sgs[out_num++] = out; 2469 2470 /* Add return status. */ 2471 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); 2472 sgs[out_num] = &stat; 2473 2474 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 2475 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 2476 if (ret < 0) { 2477 dev_warn(&vi->vdev->dev, 2478 "Failed to add sgs for command vq: %d\n.", ret); 2479 return false; 2480 } 2481 2482 if (unlikely(!virtqueue_kick(vi->cvq))) 2483 return vi->ctrl->status == VIRTIO_NET_OK; 2484 2485 /* Spin for a response, the kick causes an ioport write, trapping 2486 * into the hypervisor, so the request should be handled immediately. 2487 */ 2488 while (!virtqueue_get_buf(vi->cvq, &tmp) && 2489 !virtqueue_is_broken(vi->cvq)) 2490 cpu_relax(); 2491 2492 return vi->ctrl->status == VIRTIO_NET_OK; 2493 } 2494 2495 static int virtnet_set_mac_address(struct net_device *dev, void *p) 2496 { 2497 struct virtnet_info *vi = netdev_priv(dev); 2498 struct virtio_device *vdev = vi->vdev; 2499 int ret; 2500 struct sockaddr *addr; 2501 struct scatterlist sg; 2502 2503 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 2504 return -EOPNOTSUPP; 2505 2506 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 2507 if (!addr) 2508 return -ENOMEM; 2509 2510 ret = eth_prepare_mac_addr_change(dev, addr); 2511 if (ret) 2512 goto out; 2513 2514 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 2515 sg_init_one(&sg, addr->sa_data, dev->addr_len); 2516 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 2517 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 2518 dev_warn(&vdev->dev, 2519 "Failed to set mac address by vq command.\n"); 2520 ret = -EINVAL; 2521 goto out; 2522 } 2523 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 2524 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2525 unsigned int i; 2526 2527 /* Naturally, this has an atomicity problem. */ 2528 for (i = 0; i < dev->addr_len; i++) 2529 virtio_cwrite8(vdev, 2530 offsetof(struct virtio_net_config, mac) + 2531 i, addr->sa_data[i]); 2532 } 2533 2534 eth_commit_mac_addr_change(dev, p); 2535 ret = 0; 2536 2537 out: 2538 kfree(addr); 2539 return ret; 2540 } 2541 2542 static void virtnet_stats(struct net_device *dev, 2543 struct rtnl_link_stats64 *tot) 2544 { 2545 struct virtnet_info *vi = netdev_priv(dev); 2546 unsigned int start; 2547 int i; 2548 2549 for (i = 0; i < vi->max_queue_pairs; i++) { 2550 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops; 2551 struct receive_queue *rq = &vi->rq[i]; 2552 struct send_queue *sq = &vi->sq[i]; 2553 2554 do { 2555 start = u64_stats_fetch_begin(&sq->stats.syncp); 2556 tpackets = sq->stats.packets; 2557 tbytes = sq->stats.bytes; 2558 terrors = sq->stats.tx_timeouts; 2559 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); 2560 2561 do { 2562 start = u64_stats_fetch_begin(&rq->stats.syncp); 2563 rpackets = rq->stats.packets; 2564 rbytes = rq->stats.bytes; 2565 rdrops = rq->stats.drops; 2566 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); 2567 2568 tot->rx_packets += rpackets; 2569 tot->tx_packets += tpackets; 2570 tot->rx_bytes += rbytes; 2571 tot->tx_bytes += tbytes; 2572 tot->rx_dropped += rdrops; 2573 tot->tx_errors += terrors; 2574 } 2575 2576 tot->tx_dropped = dev->stats.tx_dropped; 2577 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 2578 tot->rx_length_errors = dev->stats.rx_length_errors; 2579 tot->rx_frame_errors = dev->stats.rx_frame_errors; 2580 } 2581 2582 static void virtnet_ack_link_announce(struct virtnet_info *vi) 2583 { 2584 rtnl_lock(); 2585 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 2586 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 2587 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 2588 rtnl_unlock(); 2589 } 2590 2591 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 2592 { 2593 struct scatterlist sg; 2594 struct net_device *dev = vi->dev; 2595 2596 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 2597 return 0; 2598 2599 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 2600 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); 2601 2602 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 2603 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 2604 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 2605 queue_pairs); 2606 return -EINVAL; 2607 } else { 2608 vi->curr_queue_pairs = queue_pairs; 2609 /* virtnet_open() will refill when device is going to up. */ 2610 if (dev->flags & IFF_UP) 2611 schedule_delayed_work(&vi->refill, 0); 2612 } 2613 2614 return 0; 2615 } 2616 2617 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 2618 { 2619 int err; 2620 2621 rtnl_lock(); 2622 err = _virtnet_set_queues(vi, queue_pairs); 2623 rtnl_unlock(); 2624 return err; 2625 } 2626 2627 static int virtnet_close(struct net_device *dev) 2628 { 2629 struct virtnet_info *vi = netdev_priv(dev); 2630 int i; 2631 2632 /* Make sure NAPI doesn't schedule refill work */ 2633 disable_delayed_refill(vi); 2634 /* Make sure refill_work doesn't re-enable napi! */ 2635 cancel_delayed_work_sync(&vi->refill); 2636 2637 for (i = 0; i < vi->max_queue_pairs; i++) 2638 virtnet_disable_queue_pair(vi, i); 2639 2640 return 0; 2641 } 2642 2643 static void virtnet_set_rx_mode(struct net_device *dev) 2644 { 2645 struct virtnet_info *vi = netdev_priv(dev); 2646 struct scatterlist sg[2]; 2647 struct virtio_net_ctrl_mac *mac_data; 2648 struct netdev_hw_addr *ha; 2649 int uc_count; 2650 int mc_count; 2651 void *buf; 2652 int i; 2653 2654 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 2655 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 2656 return; 2657 2658 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); 2659 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 2660 2661 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); 2662 2663 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 2664 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 2665 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 2666 vi->ctrl->promisc ? "en" : "dis"); 2667 2668 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); 2669 2670 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 2671 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 2672 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 2673 vi->ctrl->allmulti ? "en" : "dis"); 2674 2675 uc_count = netdev_uc_count(dev); 2676 mc_count = netdev_mc_count(dev); 2677 /* MAC filter - use one buffer for both lists */ 2678 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 2679 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 2680 mac_data = buf; 2681 if (!buf) 2682 return; 2683 2684 sg_init_table(sg, 2); 2685 2686 /* Store the unicast list and count in the front of the buffer */ 2687 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 2688 i = 0; 2689 netdev_for_each_uc_addr(ha, dev) 2690 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 2691 2692 sg_set_buf(&sg[0], mac_data, 2693 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 2694 2695 /* multicast list and count fill the end */ 2696 mac_data = (void *)&mac_data->macs[uc_count][0]; 2697 2698 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 2699 i = 0; 2700 netdev_for_each_mc_addr(ha, dev) 2701 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 2702 2703 sg_set_buf(&sg[1], mac_data, 2704 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 2705 2706 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 2707 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 2708 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 2709 2710 kfree(buf); 2711 } 2712 2713 static int virtnet_vlan_rx_add_vid(struct net_device *dev, 2714 __be16 proto, u16 vid) 2715 { 2716 struct virtnet_info *vi = netdev_priv(dev); 2717 struct scatterlist sg; 2718 2719 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 2720 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 2721 2722 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 2723 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 2724 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 2725 return 0; 2726 } 2727 2728 static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 2729 __be16 proto, u16 vid) 2730 { 2731 struct virtnet_info *vi = netdev_priv(dev); 2732 struct scatterlist sg; 2733 2734 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 2735 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 2736 2737 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 2738 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 2739 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 2740 return 0; 2741 } 2742 2743 static void virtnet_clean_affinity(struct virtnet_info *vi) 2744 { 2745 int i; 2746 2747 if (vi->affinity_hint_set) { 2748 for (i = 0; i < vi->max_queue_pairs; i++) { 2749 virtqueue_set_affinity(vi->rq[i].vq, NULL); 2750 virtqueue_set_affinity(vi->sq[i].vq, NULL); 2751 } 2752 2753 vi->affinity_hint_set = false; 2754 } 2755 } 2756 2757 static void virtnet_set_affinity(struct virtnet_info *vi) 2758 { 2759 cpumask_var_t mask; 2760 int stragglers; 2761 int group_size; 2762 int i, j, cpu; 2763 int num_cpu; 2764 int stride; 2765 2766 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 2767 virtnet_clean_affinity(vi); 2768 return; 2769 } 2770 2771 num_cpu = num_online_cpus(); 2772 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); 2773 stragglers = num_cpu >= vi->curr_queue_pairs ? 2774 num_cpu % vi->curr_queue_pairs : 2775 0; 2776 cpu = cpumask_first(cpu_online_mask); 2777 2778 for (i = 0; i < vi->curr_queue_pairs; i++) { 2779 group_size = stride + (i < stragglers ? 1 : 0); 2780 2781 for (j = 0; j < group_size; j++) { 2782 cpumask_set_cpu(cpu, mask); 2783 cpu = cpumask_next_wrap(cpu, cpu_online_mask, 2784 nr_cpu_ids, false); 2785 } 2786 virtqueue_set_affinity(vi->rq[i].vq, mask); 2787 virtqueue_set_affinity(vi->sq[i].vq, mask); 2788 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); 2789 cpumask_clear(mask); 2790 } 2791 2792 vi->affinity_hint_set = true; 2793 free_cpumask_var(mask); 2794 } 2795 2796 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 2797 { 2798 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2799 node); 2800 virtnet_set_affinity(vi); 2801 return 0; 2802 } 2803 2804 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 2805 { 2806 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2807 node_dead); 2808 virtnet_set_affinity(vi); 2809 return 0; 2810 } 2811 2812 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 2813 { 2814 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2815 node); 2816 2817 virtnet_clean_affinity(vi); 2818 return 0; 2819 } 2820 2821 static enum cpuhp_state virtionet_online; 2822 2823 static int virtnet_cpu_notif_add(struct virtnet_info *vi) 2824 { 2825 int ret; 2826 2827 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 2828 if (ret) 2829 return ret; 2830 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 2831 &vi->node_dead); 2832 if (!ret) 2833 return ret; 2834 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 2835 return ret; 2836 } 2837 2838 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 2839 { 2840 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 2841 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 2842 &vi->node_dead); 2843 } 2844 2845 static void virtnet_get_ringparam(struct net_device *dev, 2846 struct ethtool_ringparam *ring, 2847 struct kernel_ethtool_ringparam *kernel_ring, 2848 struct netlink_ext_ack *extack) 2849 { 2850 struct virtnet_info *vi = netdev_priv(dev); 2851 2852 ring->rx_max_pending = vi->rq[0].vq->num_max; 2853 ring->tx_max_pending = vi->sq[0].vq->num_max; 2854 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 2855 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 2856 } 2857 2858 static int virtnet_set_ringparam(struct net_device *dev, 2859 struct ethtool_ringparam *ring, 2860 struct kernel_ethtool_ringparam *kernel_ring, 2861 struct netlink_ext_ack *extack) 2862 { 2863 struct virtnet_info *vi = netdev_priv(dev); 2864 u32 rx_pending, tx_pending; 2865 struct receive_queue *rq; 2866 struct send_queue *sq; 2867 int i, err; 2868 2869 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 2870 return -EINVAL; 2871 2872 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 2873 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 2874 2875 if (ring->rx_pending == rx_pending && 2876 ring->tx_pending == tx_pending) 2877 return 0; 2878 2879 if (ring->rx_pending > vi->rq[0].vq->num_max) 2880 return -EINVAL; 2881 2882 if (ring->tx_pending > vi->sq[0].vq->num_max) 2883 return -EINVAL; 2884 2885 for (i = 0; i < vi->max_queue_pairs; i++) { 2886 rq = vi->rq + i; 2887 sq = vi->sq + i; 2888 2889 if (ring->tx_pending != tx_pending) { 2890 err = virtnet_tx_resize(vi, sq, ring->tx_pending); 2891 if (err) 2892 return err; 2893 } 2894 2895 if (ring->rx_pending != rx_pending) { 2896 err = virtnet_rx_resize(vi, rq, ring->rx_pending); 2897 if (err) 2898 return err; 2899 } 2900 } 2901 2902 return 0; 2903 } 2904 2905 static bool virtnet_commit_rss_command(struct virtnet_info *vi) 2906 { 2907 struct net_device *dev = vi->dev; 2908 struct scatterlist sgs[4]; 2909 unsigned int sg_buf_size; 2910 2911 /* prepare sgs */ 2912 sg_init_table(sgs, 4); 2913 2914 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table); 2915 sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); 2916 2917 sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); 2918 sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); 2919 2920 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) 2921 - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); 2922 sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); 2923 2924 sg_buf_size = vi->rss_key_size; 2925 sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); 2926 2927 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 2928 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG 2929 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { 2930 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); 2931 return false; 2932 } 2933 return true; 2934 } 2935 2936 static void virtnet_init_default_rss(struct virtnet_info *vi) 2937 { 2938 u32 indir_val = 0; 2939 int i = 0; 2940 2941 vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; 2942 vi->rss_hash_types_saved = vi->rss_hash_types_supported; 2943 vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size 2944 ? vi->rss_indir_table_size - 1 : 0; 2945 vi->ctrl->rss.unclassified_queue = 0; 2946 2947 for (; i < vi->rss_indir_table_size; ++i) { 2948 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); 2949 vi->ctrl->rss.indirection_table[i] = indir_val; 2950 } 2951 2952 vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; 2953 vi->ctrl->rss.hash_key_length = vi->rss_key_size; 2954 2955 netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); 2956 } 2957 2958 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) 2959 { 2960 info->data = 0; 2961 switch (info->flow_type) { 2962 case TCP_V4_FLOW: 2963 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { 2964 info->data = RXH_IP_SRC | RXH_IP_DST | 2965 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2966 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 2967 info->data = RXH_IP_SRC | RXH_IP_DST; 2968 } 2969 break; 2970 case TCP_V6_FLOW: 2971 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { 2972 info->data = RXH_IP_SRC | RXH_IP_DST | 2973 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2974 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 2975 info->data = RXH_IP_SRC | RXH_IP_DST; 2976 } 2977 break; 2978 case UDP_V4_FLOW: 2979 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { 2980 info->data = RXH_IP_SRC | RXH_IP_DST | 2981 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2982 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 2983 info->data = RXH_IP_SRC | RXH_IP_DST; 2984 } 2985 break; 2986 case UDP_V6_FLOW: 2987 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { 2988 info->data = RXH_IP_SRC | RXH_IP_DST | 2989 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2990 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 2991 info->data = RXH_IP_SRC | RXH_IP_DST; 2992 } 2993 break; 2994 case IPV4_FLOW: 2995 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) 2996 info->data = RXH_IP_SRC | RXH_IP_DST; 2997 2998 break; 2999 case IPV6_FLOW: 3000 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) 3001 info->data = RXH_IP_SRC | RXH_IP_DST; 3002 3003 break; 3004 default: 3005 info->data = 0; 3006 break; 3007 } 3008 } 3009 3010 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) 3011 { 3012 u32 new_hashtypes = vi->rss_hash_types_saved; 3013 bool is_disable = info->data & RXH_DISCARD; 3014 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); 3015 3016 /* supports only 'sd', 'sdfn' and 'r' */ 3017 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) 3018 return false; 3019 3020 switch (info->flow_type) { 3021 case TCP_V4_FLOW: 3022 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4); 3023 if (!is_disable) 3024 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 3025 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0); 3026 break; 3027 case UDP_V4_FLOW: 3028 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4); 3029 if (!is_disable) 3030 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 3031 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0); 3032 break; 3033 case IPV4_FLOW: 3034 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4; 3035 if (!is_disable) 3036 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4; 3037 break; 3038 case TCP_V6_FLOW: 3039 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6); 3040 if (!is_disable) 3041 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 3042 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0); 3043 break; 3044 case UDP_V6_FLOW: 3045 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6); 3046 if (!is_disable) 3047 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 3048 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0); 3049 break; 3050 case IPV6_FLOW: 3051 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6; 3052 if (!is_disable) 3053 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6; 3054 break; 3055 default: 3056 /* unsupported flow */ 3057 return false; 3058 } 3059 3060 /* if unsupported hashtype was set */ 3061 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) 3062 return false; 3063 3064 if (new_hashtypes != vi->rss_hash_types_saved) { 3065 vi->rss_hash_types_saved = new_hashtypes; 3066 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; 3067 if (vi->dev->features & NETIF_F_RXHASH) 3068 return virtnet_commit_rss_command(vi); 3069 } 3070 3071 return true; 3072 } 3073 3074 static void virtnet_get_drvinfo(struct net_device *dev, 3075 struct ethtool_drvinfo *info) 3076 { 3077 struct virtnet_info *vi = netdev_priv(dev); 3078 struct virtio_device *vdev = vi->vdev; 3079 3080 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 3081 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 3082 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 3083 3084 } 3085 3086 /* TODO: Eliminate OOO packets during switching */ 3087 static int virtnet_set_channels(struct net_device *dev, 3088 struct ethtool_channels *channels) 3089 { 3090 struct virtnet_info *vi = netdev_priv(dev); 3091 u16 queue_pairs = channels->combined_count; 3092 int err; 3093 3094 /* We don't support separate rx/tx channels. 3095 * We don't allow setting 'other' channels. 3096 */ 3097 if (channels->rx_count || channels->tx_count || channels->other_count) 3098 return -EINVAL; 3099 3100 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 3101 return -EINVAL; 3102 3103 /* For now we don't support modifying channels while XDP is loaded 3104 * also when XDP is loaded all RX queues have XDP programs so we only 3105 * need to check a single RX queue. 3106 */ 3107 if (vi->rq[0].xdp_prog) 3108 return -EINVAL; 3109 3110 cpus_read_lock(); 3111 err = _virtnet_set_queues(vi, queue_pairs); 3112 if (err) { 3113 cpus_read_unlock(); 3114 goto err; 3115 } 3116 virtnet_set_affinity(vi); 3117 cpus_read_unlock(); 3118 3119 netif_set_real_num_tx_queues(dev, queue_pairs); 3120 netif_set_real_num_rx_queues(dev, queue_pairs); 3121 err: 3122 return err; 3123 } 3124 3125 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) 3126 { 3127 struct virtnet_info *vi = netdev_priv(dev); 3128 unsigned int i, j; 3129 u8 *p = data; 3130 3131 switch (stringset) { 3132 case ETH_SS_STATS: 3133 for (i = 0; i < vi->curr_queue_pairs; i++) { 3134 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) 3135 ethtool_sprintf(&p, "rx_queue_%u_%s", i, 3136 virtnet_rq_stats_desc[j].desc); 3137 } 3138 3139 for (i = 0; i < vi->curr_queue_pairs; i++) { 3140 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) 3141 ethtool_sprintf(&p, "tx_queue_%u_%s", i, 3142 virtnet_sq_stats_desc[j].desc); 3143 } 3144 break; 3145 } 3146 } 3147 3148 static int virtnet_get_sset_count(struct net_device *dev, int sset) 3149 { 3150 struct virtnet_info *vi = netdev_priv(dev); 3151 3152 switch (sset) { 3153 case ETH_SS_STATS: 3154 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + 3155 VIRTNET_SQ_STATS_LEN); 3156 default: 3157 return -EOPNOTSUPP; 3158 } 3159 } 3160 3161 static void virtnet_get_ethtool_stats(struct net_device *dev, 3162 struct ethtool_stats *stats, u64 *data) 3163 { 3164 struct virtnet_info *vi = netdev_priv(dev); 3165 unsigned int idx = 0, start, i, j; 3166 const u8 *stats_base; 3167 size_t offset; 3168 3169 for (i = 0; i < vi->curr_queue_pairs; i++) { 3170 struct receive_queue *rq = &vi->rq[i]; 3171 3172 stats_base = (u8 *)&rq->stats; 3173 do { 3174 start = u64_stats_fetch_begin(&rq->stats.syncp); 3175 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { 3176 offset = virtnet_rq_stats_desc[j].offset; 3177 data[idx + j] = *(u64 *)(stats_base + offset); 3178 } 3179 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); 3180 idx += VIRTNET_RQ_STATS_LEN; 3181 } 3182 3183 for (i = 0; i < vi->curr_queue_pairs; i++) { 3184 struct send_queue *sq = &vi->sq[i]; 3185 3186 stats_base = (u8 *)&sq->stats; 3187 do { 3188 start = u64_stats_fetch_begin(&sq->stats.syncp); 3189 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { 3190 offset = virtnet_sq_stats_desc[j].offset; 3191 data[idx + j] = *(u64 *)(stats_base + offset); 3192 } 3193 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); 3194 idx += VIRTNET_SQ_STATS_LEN; 3195 } 3196 } 3197 3198 static void virtnet_get_channels(struct net_device *dev, 3199 struct ethtool_channels *channels) 3200 { 3201 struct virtnet_info *vi = netdev_priv(dev); 3202 3203 channels->combined_count = vi->curr_queue_pairs; 3204 channels->max_combined = vi->max_queue_pairs; 3205 channels->max_other = 0; 3206 channels->rx_count = 0; 3207 channels->tx_count = 0; 3208 channels->other_count = 0; 3209 } 3210 3211 static int virtnet_set_link_ksettings(struct net_device *dev, 3212 const struct ethtool_link_ksettings *cmd) 3213 { 3214 struct virtnet_info *vi = netdev_priv(dev); 3215 3216 return ethtool_virtdev_set_link_ksettings(dev, cmd, 3217 &vi->speed, &vi->duplex); 3218 } 3219 3220 static int virtnet_get_link_ksettings(struct net_device *dev, 3221 struct ethtool_link_ksettings *cmd) 3222 { 3223 struct virtnet_info *vi = netdev_priv(dev); 3224 3225 cmd->base.speed = vi->speed; 3226 cmd->base.duplex = vi->duplex; 3227 cmd->base.port = PORT_OTHER; 3228 3229 return 0; 3230 } 3231 3232 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, 3233 struct ethtool_coalesce *ec) 3234 { 3235 struct scatterlist sgs_tx, sgs_rx; 3236 int i; 3237 3238 vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); 3239 vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); 3240 sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); 3241 3242 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 3243 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, 3244 &sgs_tx)) 3245 return -EINVAL; 3246 3247 /* Save parameters */ 3248 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; 3249 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; 3250 for (i = 0; i < vi->max_queue_pairs; i++) { 3251 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; 3252 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; 3253 } 3254 3255 vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); 3256 vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); 3257 sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); 3258 3259 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 3260 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, 3261 &sgs_rx)) 3262 return -EINVAL; 3263 3264 /* Save parameters */ 3265 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; 3266 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; 3267 for (i = 0; i < vi->max_queue_pairs; i++) { 3268 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; 3269 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; 3270 } 3271 3272 return 0; 3273 } 3274 3275 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, 3276 u16 vqn, u32 max_usecs, u32 max_packets) 3277 { 3278 struct scatterlist sgs; 3279 3280 vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); 3281 vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); 3282 vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); 3283 sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); 3284 3285 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 3286 VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, 3287 &sgs)) 3288 return -EINVAL; 3289 3290 return 0; 3291 } 3292 3293 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, 3294 struct ethtool_coalesce *ec, 3295 u16 queue) 3296 { 3297 int err; 3298 3299 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), 3300 ec->rx_coalesce_usecs, 3301 ec->rx_max_coalesced_frames); 3302 if (err) 3303 return err; 3304 3305 vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs; 3306 vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames; 3307 3308 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), 3309 ec->tx_coalesce_usecs, 3310 ec->tx_max_coalesced_frames); 3311 if (err) 3312 return err; 3313 3314 vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs; 3315 vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames; 3316 3317 return 0; 3318 } 3319 3320 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) 3321 { 3322 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL 3323 * feature is negotiated. 3324 */ 3325 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) 3326 return -EOPNOTSUPP; 3327 3328 if (ec->tx_max_coalesced_frames > 1 || 3329 ec->rx_max_coalesced_frames != 1) 3330 return -EINVAL; 3331 3332 return 0; 3333 } 3334 3335 static int virtnet_should_update_vq_weight(int dev_flags, int weight, 3336 int vq_weight, bool *should_update) 3337 { 3338 if (weight ^ vq_weight) { 3339 if (dev_flags & IFF_UP) 3340 return -EBUSY; 3341 *should_update = true; 3342 } 3343 3344 return 0; 3345 } 3346 3347 static int virtnet_set_coalesce(struct net_device *dev, 3348 struct ethtool_coalesce *ec, 3349 struct kernel_ethtool_coalesce *kernel_coal, 3350 struct netlink_ext_ack *extack) 3351 { 3352 struct virtnet_info *vi = netdev_priv(dev); 3353 int ret, queue_number, napi_weight; 3354 bool update_napi = false; 3355 3356 /* Can't change NAPI weight if the link is up */ 3357 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; 3358 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { 3359 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, 3360 vi->sq[queue_number].napi.weight, 3361 &update_napi); 3362 if (ret) 3363 return ret; 3364 3365 if (update_napi) { 3366 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be 3367 * updated for the sake of simplicity, which might not be necessary 3368 */ 3369 break; 3370 } 3371 } 3372 3373 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) 3374 ret = virtnet_send_notf_coal_cmds(vi, ec); 3375 else 3376 ret = virtnet_coal_params_supported(ec); 3377 3378 if (ret) 3379 return ret; 3380 3381 if (update_napi) { 3382 for (; queue_number < vi->max_queue_pairs; queue_number++) 3383 vi->sq[queue_number].napi.weight = napi_weight; 3384 } 3385 3386 return ret; 3387 } 3388 3389 static int virtnet_get_coalesce(struct net_device *dev, 3390 struct ethtool_coalesce *ec, 3391 struct kernel_ethtool_coalesce *kernel_coal, 3392 struct netlink_ext_ack *extack) 3393 { 3394 struct virtnet_info *vi = netdev_priv(dev); 3395 3396 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { 3397 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; 3398 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; 3399 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; 3400 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; 3401 } else { 3402 ec->rx_max_coalesced_frames = 1; 3403 3404 if (vi->sq[0].napi.weight) 3405 ec->tx_max_coalesced_frames = 1; 3406 } 3407 3408 return 0; 3409 } 3410 3411 static int virtnet_set_per_queue_coalesce(struct net_device *dev, 3412 u32 queue, 3413 struct ethtool_coalesce *ec) 3414 { 3415 struct virtnet_info *vi = netdev_priv(dev); 3416 int ret, napi_weight; 3417 bool update_napi = false; 3418 3419 if (queue >= vi->max_queue_pairs) 3420 return -EINVAL; 3421 3422 /* Can't change NAPI weight if the link is up */ 3423 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; 3424 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, 3425 vi->sq[queue].napi.weight, 3426 &update_napi); 3427 if (ret) 3428 return ret; 3429 3430 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 3431 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue); 3432 else 3433 ret = virtnet_coal_params_supported(ec); 3434 3435 if (ret) 3436 return ret; 3437 3438 if (update_napi) 3439 vi->sq[queue].napi.weight = napi_weight; 3440 3441 return 0; 3442 } 3443 3444 static int virtnet_get_per_queue_coalesce(struct net_device *dev, 3445 u32 queue, 3446 struct ethtool_coalesce *ec) 3447 { 3448 struct virtnet_info *vi = netdev_priv(dev); 3449 3450 if (queue >= vi->max_queue_pairs) 3451 return -EINVAL; 3452 3453 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { 3454 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; 3455 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; 3456 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; 3457 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; 3458 } else { 3459 ec->rx_max_coalesced_frames = 1; 3460 3461 if (vi->sq[queue].napi.weight) 3462 ec->tx_max_coalesced_frames = 1; 3463 } 3464 3465 return 0; 3466 } 3467 3468 static void virtnet_init_settings(struct net_device *dev) 3469 { 3470 struct virtnet_info *vi = netdev_priv(dev); 3471 3472 vi->speed = SPEED_UNKNOWN; 3473 vi->duplex = DUPLEX_UNKNOWN; 3474 } 3475 3476 static void virtnet_update_settings(struct virtnet_info *vi) 3477 { 3478 u32 speed; 3479 u8 duplex; 3480 3481 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) 3482 return; 3483 3484 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); 3485 3486 if (ethtool_validate_speed(speed)) 3487 vi->speed = speed; 3488 3489 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); 3490 3491 if (ethtool_validate_duplex(duplex)) 3492 vi->duplex = duplex; 3493 } 3494 3495 static u32 virtnet_get_rxfh_key_size(struct net_device *dev) 3496 { 3497 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; 3498 } 3499 3500 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev) 3501 { 3502 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; 3503 } 3504 3505 static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 3506 { 3507 struct virtnet_info *vi = netdev_priv(dev); 3508 int i; 3509 3510 if (indir) { 3511 for (i = 0; i < vi->rss_indir_table_size; ++i) 3512 indir[i] = vi->ctrl->rss.indirection_table[i]; 3513 } 3514 3515 if (key) 3516 memcpy(key, vi->ctrl->rss.key, vi->rss_key_size); 3517 3518 if (hfunc) 3519 *hfunc = ETH_RSS_HASH_TOP; 3520 3521 return 0; 3522 } 3523 3524 static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) 3525 { 3526 struct virtnet_info *vi = netdev_priv(dev); 3527 int i; 3528 3529 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 3530 return -EOPNOTSUPP; 3531 3532 if (indir) { 3533 for (i = 0; i < vi->rss_indir_table_size; ++i) 3534 vi->ctrl->rss.indirection_table[i] = indir[i]; 3535 } 3536 if (key) 3537 memcpy(vi->ctrl->rss.key, key, vi->rss_key_size); 3538 3539 virtnet_commit_rss_command(vi); 3540 3541 return 0; 3542 } 3543 3544 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) 3545 { 3546 struct virtnet_info *vi = netdev_priv(dev); 3547 int rc = 0; 3548 3549 switch (info->cmd) { 3550 case ETHTOOL_GRXRINGS: 3551 info->data = vi->curr_queue_pairs; 3552 break; 3553 case ETHTOOL_GRXFH: 3554 virtnet_get_hashflow(vi, info); 3555 break; 3556 default: 3557 rc = -EOPNOTSUPP; 3558 } 3559 3560 return rc; 3561 } 3562 3563 static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 3564 { 3565 struct virtnet_info *vi = netdev_priv(dev); 3566 int rc = 0; 3567 3568 switch (info->cmd) { 3569 case ETHTOOL_SRXFH: 3570 if (!virtnet_set_hashflow(vi, info)) 3571 rc = -EINVAL; 3572 3573 break; 3574 default: 3575 rc = -EOPNOTSUPP; 3576 } 3577 3578 return rc; 3579 } 3580 3581 static const struct ethtool_ops virtnet_ethtool_ops = { 3582 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 3583 ETHTOOL_COALESCE_USECS, 3584 .get_drvinfo = virtnet_get_drvinfo, 3585 .get_link = ethtool_op_get_link, 3586 .get_ringparam = virtnet_get_ringparam, 3587 .set_ringparam = virtnet_set_ringparam, 3588 .get_strings = virtnet_get_strings, 3589 .get_sset_count = virtnet_get_sset_count, 3590 .get_ethtool_stats = virtnet_get_ethtool_stats, 3591 .set_channels = virtnet_set_channels, 3592 .get_channels = virtnet_get_channels, 3593 .get_ts_info = ethtool_op_get_ts_info, 3594 .get_link_ksettings = virtnet_get_link_ksettings, 3595 .set_link_ksettings = virtnet_set_link_ksettings, 3596 .set_coalesce = virtnet_set_coalesce, 3597 .get_coalesce = virtnet_get_coalesce, 3598 .set_per_queue_coalesce = virtnet_set_per_queue_coalesce, 3599 .get_per_queue_coalesce = virtnet_get_per_queue_coalesce, 3600 .get_rxfh_key_size = virtnet_get_rxfh_key_size, 3601 .get_rxfh_indir_size = virtnet_get_rxfh_indir_size, 3602 .get_rxfh = virtnet_get_rxfh, 3603 .set_rxfh = virtnet_set_rxfh, 3604 .get_rxnfc = virtnet_get_rxnfc, 3605 .set_rxnfc = virtnet_set_rxnfc, 3606 }; 3607 3608 static void virtnet_freeze_down(struct virtio_device *vdev) 3609 { 3610 struct virtnet_info *vi = vdev->priv; 3611 3612 /* Make sure no work handler is accessing the device */ 3613 flush_work(&vi->config_work); 3614 3615 netif_tx_lock_bh(vi->dev); 3616 netif_device_detach(vi->dev); 3617 netif_tx_unlock_bh(vi->dev); 3618 if (netif_running(vi->dev)) 3619 virtnet_close(vi->dev); 3620 } 3621 3622 static int init_vqs(struct virtnet_info *vi); 3623 3624 static int virtnet_restore_up(struct virtio_device *vdev) 3625 { 3626 struct virtnet_info *vi = vdev->priv; 3627 int err; 3628 3629 err = init_vqs(vi); 3630 if (err) 3631 return err; 3632 3633 virtio_device_ready(vdev); 3634 3635 enable_delayed_refill(vi); 3636 3637 if (netif_running(vi->dev)) { 3638 err = virtnet_open(vi->dev); 3639 if (err) 3640 return err; 3641 } 3642 3643 netif_tx_lock_bh(vi->dev); 3644 netif_device_attach(vi->dev); 3645 netif_tx_unlock_bh(vi->dev); 3646 return err; 3647 } 3648 3649 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 3650 { 3651 struct scatterlist sg; 3652 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); 3653 3654 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); 3655 3656 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 3657 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { 3658 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); 3659 return -EINVAL; 3660 } 3661 3662 return 0; 3663 } 3664 3665 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) 3666 { 3667 u64 offloads = 0; 3668 3669 if (!vi->guest_offloads) 3670 return 0; 3671 3672 return virtnet_set_guest_offloads(vi, offloads); 3673 } 3674 3675 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) 3676 { 3677 u64 offloads = vi->guest_offloads; 3678 3679 if (!vi->guest_offloads) 3680 return 0; 3681 3682 return virtnet_set_guest_offloads(vi, offloads); 3683 } 3684 3685 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, 3686 struct netlink_ext_ack *extack) 3687 { 3688 unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + 3689 sizeof(struct skb_shared_info)); 3690 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; 3691 struct virtnet_info *vi = netdev_priv(dev); 3692 struct bpf_prog *old_prog; 3693 u16 xdp_qp = 0, curr_qp; 3694 int i, err; 3695 3696 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) 3697 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 3698 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 3699 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 3700 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 3701 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || 3702 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || 3703 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { 3704 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); 3705 return -EOPNOTSUPP; 3706 } 3707 3708 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 3709 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); 3710 return -EINVAL; 3711 } 3712 3713 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { 3714 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags"); 3715 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); 3716 return -EINVAL; 3717 } 3718 3719 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 3720 if (prog) 3721 xdp_qp = nr_cpu_ids; 3722 3723 /* XDP requires extra queues for XDP_TX */ 3724 if (curr_qp + xdp_qp > vi->max_queue_pairs) { 3725 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", 3726 curr_qp + xdp_qp, vi->max_queue_pairs); 3727 xdp_qp = 0; 3728 } 3729 3730 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); 3731 if (!prog && !old_prog) 3732 return 0; 3733 3734 if (prog) 3735 bpf_prog_add(prog, vi->max_queue_pairs - 1); 3736 3737 /* Make sure NAPI is not using any XDP TX queues for RX. */ 3738 if (netif_running(dev)) { 3739 for (i = 0; i < vi->max_queue_pairs; i++) { 3740 napi_disable(&vi->rq[i].napi); 3741 virtnet_napi_tx_disable(&vi->sq[i].napi); 3742 } 3743 } 3744 3745 if (!prog) { 3746 for (i = 0; i < vi->max_queue_pairs; i++) { 3747 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 3748 if (i == 0) 3749 virtnet_restore_guest_offloads(vi); 3750 } 3751 synchronize_net(); 3752 } 3753 3754 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 3755 if (err) 3756 goto err; 3757 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 3758 vi->xdp_queue_pairs = xdp_qp; 3759 3760 if (prog) { 3761 vi->xdp_enabled = true; 3762 for (i = 0; i < vi->max_queue_pairs; i++) { 3763 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 3764 if (i == 0 && !old_prog) 3765 virtnet_clear_guest_offloads(vi); 3766 } 3767 if (!old_prog) 3768 xdp_features_set_redirect_target(dev, true); 3769 } else { 3770 xdp_features_clear_redirect_target(dev); 3771 vi->xdp_enabled = false; 3772 } 3773 3774 for (i = 0; i < vi->max_queue_pairs; i++) { 3775 if (old_prog) 3776 bpf_prog_put(old_prog); 3777 if (netif_running(dev)) { 3778 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 3779 virtnet_napi_tx_enable(vi, vi->sq[i].vq, 3780 &vi->sq[i].napi); 3781 } 3782 } 3783 3784 return 0; 3785 3786 err: 3787 if (!prog) { 3788 virtnet_clear_guest_offloads(vi); 3789 for (i = 0; i < vi->max_queue_pairs; i++) 3790 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); 3791 } 3792 3793 if (netif_running(dev)) { 3794 for (i = 0; i < vi->max_queue_pairs; i++) { 3795 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 3796 virtnet_napi_tx_enable(vi, vi->sq[i].vq, 3797 &vi->sq[i].napi); 3798 } 3799 } 3800 if (prog) 3801 bpf_prog_sub(prog, vi->max_queue_pairs - 1); 3802 return err; 3803 } 3804 3805 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) 3806 { 3807 switch (xdp->command) { 3808 case XDP_SETUP_PROG: 3809 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); 3810 default: 3811 return -EINVAL; 3812 } 3813 } 3814 3815 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, 3816 size_t len) 3817 { 3818 struct virtnet_info *vi = netdev_priv(dev); 3819 int ret; 3820 3821 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 3822 return -EOPNOTSUPP; 3823 3824 ret = snprintf(buf, len, "sby"); 3825 if (ret >= len) 3826 return -EOPNOTSUPP; 3827 3828 return 0; 3829 } 3830 3831 static int virtnet_set_features(struct net_device *dev, 3832 netdev_features_t features) 3833 { 3834 struct virtnet_info *vi = netdev_priv(dev); 3835 u64 offloads; 3836 int err; 3837 3838 if ((dev->features ^ features) & NETIF_F_GRO_HW) { 3839 if (vi->xdp_enabled) 3840 return -EBUSY; 3841 3842 if (features & NETIF_F_GRO_HW) 3843 offloads = vi->guest_offloads_capable; 3844 else 3845 offloads = vi->guest_offloads_capable & 3846 ~GUEST_OFFLOAD_GRO_HW_MASK; 3847 3848 err = virtnet_set_guest_offloads(vi, offloads); 3849 if (err) 3850 return err; 3851 vi->guest_offloads = offloads; 3852 } 3853 3854 if ((dev->features ^ features) & NETIF_F_RXHASH) { 3855 if (features & NETIF_F_RXHASH) 3856 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; 3857 else 3858 vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; 3859 3860 if (!virtnet_commit_rss_command(vi)) 3861 return -EINVAL; 3862 } 3863 3864 return 0; 3865 } 3866 3867 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) 3868 { 3869 struct virtnet_info *priv = netdev_priv(dev); 3870 struct send_queue *sq = &priv->sq[txqueue]; 3871 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); 3872 3873 u64_stats_update_begin(&sq->stats.syncp); 3874 sq->stats.tx_timeouts++; 3875 u64_stats_update_end(&sq->stats.syncp); 3876 3877 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", 3878 txqueue, sq->name, sq->vq->index, sq->vq->name, 3879 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); 3880 } 3881 3882 static const struct net_device_ops virtnet_netdev = { 3883 .ndo_open = virtnet_open, 3884 .ndo_stop = virtnet_close, 3885 .ndo_start_xmit = start_xmit, 3886 .ndo_validate_addr = eth_validate_addr, 3887 .ndo_set_mac_address = virtnet_set_mac_address, 3888 .ndo_set_rx_mode = virtnet_set_rx_mode, 3889 .ndo_get_stats64 = virtnet_stats, 3890 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 3891 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 3892 .ndo_bpf = virtnet_xdp, 3893 .ndo_xdp_xmit = virtnet_xdp_xmit, 3894 .ndo_features_check = passthru_features_check, 3895 .ndo_get_phys_port_name = virtnet_get_phys_port_name, 3896 .ndo_set_features = virtnet_set_features, 3897 .ndo_tx_timeout = virtnet_tx_timeout, 3898 }; 3899 3900 static void virtnet_config_changed_work(struct work_struct *work) 3901 { 3902 struct virtnet_info *vi = 3903 container_of(work, struct virtnet_info, config_work); 3904 u16 v; 3905 3906 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 3907 struct virtio_net_config, status, &v) < 0) 3908 return; 3909 3910 if (v & VIRTIO_NET_S_ANNOUNCE) { 3911 netdev_notify_peers(vi->dev); 3912 virtnet_ack_link_announce(vi); 3913 } 3914 3915 /* Ignore unknown (future) status bits */ 3916 v &= VIRTIO_NET_S_LINK_UP; 3917 3918 if (vi->status == v) 3919 return; 3920 3921 vi->status = v; 3922 3923 if (vi->status & VIRTIO_NET_S_LINK_UP) { 3924 virtnet_update_settings(vi); 3925 netif_carrier_on(vi->dev); 3926 netif_tx_wake_all_queues(vi->dev); 3927 } else { 3928 netif_carrier_off(vi->dev); 3929 netif_tx_stop_all_queues(vi->dev); 3930 } 3931 } 3932 3933 static void virtnet_config_changed(struct virtio_device *vdev) 3934 { 3935 struct virtnet_info *vi = vdev->priv; 3936 3937 schedule_work(&vi->config_work); 3938 } 3939 3940 static void virtnet_free_queues(struct virtnet_info *vi) 3941 { 3942 int i; 3943 3944 for (i = 0; i < vi->max_queue_pairs; i++) { 3945 __netif_napi_del(&vi->rq[i].napi); 3946 __netif_napi_del(&vi->sq[i].napi); 3947 } 3948 3949 /* We called __netif_napi_del(), 3950 * we need to respect an RCU grace period before freeing vi->rq 3951 */ 3952 synchronize_net(); 3953 3954 kfree(vi->rq); 3955 kfree(vi->sq); 3956 kfree(vi->ctrl); 3957 } 3958 3959 static void _free_receive_bufs(struct virtnet_info *vi) 3960 { 3961 struct bpf_prog *old_prog; 3962 int i; 3963 3964 for (i = 0; i < vi->max_queue_pairs; i++) { 3965 while (vi->rq[i].pages) 3966 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 3967 3968 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 3969 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 3970 if (old_prog) 3971 bpf_prog_put(old_prog); 3972 } 3973 } 3974 3975 static void free_receive_bufs(struct virtnet_info *vi) 3976 { 3977 rtnl_lock(); 3978 _free_receive_bufs(vi); 3979 rtnl_unlock(); 3980 } 3981 3982 static void free_receive_page_frags(struct virtnet_info *vi) 3983 { 3984 int i; 3985 for (i = 0; i < vi->max_queue_pairs; i++) 3986 if (vi->rq[i].alloc_frag.page) { 3987 if (vi->rq[i].do_dma && vi->rq[i].last_dma) 3988 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); 3989 put_page(vi->rq[i].alloc_frag.page); 3990 } 3991 } 3992 3993 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) 3994 { 3995 if (!is_xdp_frame(buf)) 3996 dev_kfree_skb(buf); 3997 else 3998 xdp_return_frame(ptr_to_xdp(buf)); 3999 } 4000 4001 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf) 4002 { 4003 struct virtnet_info *vi = vq->vdev->priv; 4004 int i = vq2rxq(vq); 4005 4006 if (vi->mergeable_rx_bufs) 4007 put_page(virt_to_head_page(buf)); 4008 else if (vi->big_packets) 4009 give_pages(&vi->rq[i], buf); 4010 else 4011 put_page(virt_to_head_page(buf)); 4012 } 4013 4014 static void free_unused_bufs(struct virtnet_info *vi) 4015 { 4016 void *buf; 4017 int i; 4018 4019 for (i = 0; i < vi->max_queue_pairs; i++) { 4020 struct virtqueue *vq = vi->sq[i].vq; 4021 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 4022 virtnet_sq_free_unused_buf(vq, buf); 4023 cond_resched(); 4024 } 4025 4026 for (i = 0; i < vi->max_queue_pairs; i++) { 4027 struct receive_queue *rq = &vi->rq[i]; 4028 4029 while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL) 4030 virtnet_rq_free_unused_buf(rq->vq, buf); 4031 cond_resched(); 4032 } 4033 } 4034 4035 static void virtnet_del_vqs(struct virtnet_info *vi) 4036 { 4037 struct virtio_device *vdev = vi->vdev; 4038 4039 virtnet_clean_affinity(vi); 4040 4041 vdev->config->del_vqs(vdev); 4042 4043 virtnet_free_queues(vi); 4044 } 4045 4046 /* How large should a single buffer be so a queue full of these can fit at 4047 * least one full packet? 4048 * Logic below assumes the mergeable buffer header is used. 4049 */ 4050 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) 4051 { 4052 const unsigned int hdr_len = vi->hdr_len; 4053 unsigned int rq_size = virtqueue_get_vring_size(vq); 4054 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; 4055 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 4056 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 4057 4058 return max(max(min_buf_len, hdr_len) - hdr_len, 4059 (unsigned int)GOOD_PACKET_LEN); 4060 } 4061 4062 static int virtnet_find_vqs(struct virtnet_info *vi) 4063 { 4064 vq_callback_t **callbacks; 4065 struct virtqueue **vqs; 4066 int ret = -ENOMEM; 4067 int i, total_vqs; 4068 const char **names; 4069 bool *ctx; 4070 4071 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 4072 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 4073 * possible control vq. 4074 */ 4075 total_vqs = vi->max_queue_pairs * 2 + 4076 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 4077 4078 /* Allocate space for find_vqs parameters */ 4079 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); 4080 if (!vqs) 4081 goto err_vq; 4082 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); 4083 if (!callbacks) 4084 goto err_callback; 4085 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); 4086 if (!names) 4087 goto err_names; 4088 if (!vi->big_packets || vi->mergeable_rx_bufs) { 4089 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); 4090 if (!ctx) 4091 goto err_ctx; 4092 } else { 4093 ctx = NULL; 4094 } 4095 4096 /* Parameters for control virtqueue, if any */ 4097 if (vi->has_cvq) { 4098 callbacks[total_vqs - 1] = NULL; 4099 names[total_vqs - 1] = "control"; 4100 } 4101 4102 /* Allocate/initialize parameters for send/receive virtqueues */ 4103 for (i = 0; i < vi->max_queue_pairs; i++) { 4104 callbacks[rxq2vq(i)] = skb_recv_done; 4105 callbacks[txq2vq(i)] = skb_xmit_done; 4106 sprintf(vi->rq[i].name, "input.%d", i); 4107 sprintf(vi->sq[i].name, "output.%d", i); 4108 names[rxq2vq(i)] = vi->rq[i].name; 4109 names[txq2vq(i)] = vi->sq[i].name; 4110 if (ctx) 4111 ctx[rxq2vq(i)] = true; 4112 } 4113 4114 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks, 4115 names, ctx, NULL); 4116 if (ret) 4117 goto err_find; 4118 4119 if (vi->has_cvq) { 4120 vi->cvq = vqs[total_vqs - 1]; 4121 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 4122 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4123 } 4124 4125 for (i = 0; i < vi->max_queue_pairs; i++) { 4126 vi->rq[i].vq = vqs[rxq2vq(i)]; 4127 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); 4128 vi->sq[i].vq = vqs[txq2vq(i)]; 4129 } 4130 4131 /* run here: ret == 0. */ 4132 4133 4134 err_find: 4135 kfree(ctx); 4136 err_ctx: 4137 kfree(names); 4138 err_names: 4139 kfree(callbacks); 4140 err_callback: 4141 kfree(vqs); 4142 err_vq: 4143 return ret; 4144 } 4145 4146 static int virtnet_alloc_queues(struct virtnet_info *vi) 4147 { 4148 int i; 4149 4150 if (vi->has_cvq) { 4151 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); 4152 if (!vi->ctrl) 4153 goto err_ctrl; 4154 } else { 4155 vi->ctrl = NULL; 4156 } 4157 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); 4158 if (!vi->sq) 4159 goto err_sq; 4160 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); 4161 if (!vi->rq) 4162 goto err_rq; 4163 4164 INIT_DELAYED_WORK(&vi->refill, refill_work); 4165 for (i = 0; i < vi->max_queue_pairs; i++) { 4166 vi->rq[i].pages = NULL; 4167 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, 4168 napi_weight); 4169 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, 4170 virtnet_poll_tx, 4171 napi_tx ? napi_weight : 0); 4172 4173 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 4174 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 4175 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 4176 4177 u64_stats_init(&vi->rq[i].stats.syncp); 4178 u64_stats_init(&vi->sq[i].stats.syncp); 4179 } 4180 4181 return 0; 4182 4183 err_rq: 4184 kfree(vi->sq); 4185 err_sq: 4186 kfree(vi->ctrl); 4187 err_ctrl: 4188 return -ENOMEM; 4189 } 4190 4191 static int init_vqs(struct virtnet_info *vi) 4192 { 4193 int ret; 4194 4195 /* Allocate send & receive queues */ 4196 ret = virtnet_alloc_queues(vi); 4197 if (ret) 4198 goto err; 4199 4200 ret = virtnet_find_vqs(vi); 4201 if (ret) 4202 goto err_free; 4203 4204 virtnet_rq_set_premapped(vi); 4205 4206 cpus_read_lock(); 4207 virtnet_set_affinity(vi); 4208 cpus_read_unlock(); 4209 4210 return 0; 4211 4212 err_free: 4213 virtnet_free_queues(vi); 4214 err: 4215 return ret; 4216 } 4217 4218 #ifdef CONFIG_SYSFS 4219 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 4220 char *buf) 4221 { 4222 struct virtnet_info *vi = netdev_priv(queue->dev); 4223 unsigned int queue_index = get_netdev_rx_queue_index(queue); 4224 unsigned int headroom = virtnet_get_headroom(vi); 4225 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 4226 struct ewma_pkt_len *avg; 4227 4228 BUG_ON(queue_index >= vi->max_queue_pairs); 4229 avg = &vi->rq[queue_index].mrg_avg_pkt_len; 4230 return sprintf(buf, "%u\n", 4231 get_mergeable_buf_len(&vi->rq[queue_index], avg, 4232 SKB_DATA_ALIGN(headroom + tailroom))); 4233 } 4234 4235 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 4236 __ATTR_RO(mergeable_rx_buffer_size); 4237 4238 static struct attribute *virtio_net_mrg_rx_attrs[] = { 4239 &mergeable_rx_buffer_size_attribute.attr, 4240 NULL 4241 }; 4242 4243 static const struct attribute_group virtio_net_mrg_rx_group = { 4244 .name = "virtio_net", 4245 .attrs = virtio_net_mrg_rx_attrs 4246 }; 4247 #endif 4248 4249 static bool virtnet_fail_on_feature(struct virtio_device *vdev, 4250 unsigned int fbit, 4251 const char *fname, const char *dname) 4252 { 4253 if (!virtio_has_feature(vdev, fbit)) 4254 return false; 4255 4256 dev_err(&vdev->dev, "device advertises feature %s but not %s", 4257 fname, dname); 4258 4259 return true; 4260 } 4261 4262 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 4263 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 4264 4265 static bool virtnet_validate_features(struct virtio_device *vdev) 4266 { 4267 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 4268 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 4269 "VIRTIO_NET_F_CTRL_VQ") || 4270 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 4271 "VIRTIO_NET_F_CTRL_VQ") || 4272 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 4273 "VIRTIO_NET_F_CTRL_VQ") || 4274 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 4275 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 4276 "VIRTIO_NET_F_CTRL_VQ") || 4277 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS, 4278 "VIRTIO_NET_F_CTRL_VQ") || 4279 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT, 4280 "VIRTIO_NET_F_CTRL_VQ") || 4281 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL, 4282 "VIRTIO_NET_F_CTRL_VQ") || 4283 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL, 4284 "VIRTIO_NET_F_CTRL_VQ"))) { 4285 return false; 4286 } 4287 4288 return true; 4289 } 4290 4291 #define MIN_MTU ETH_MIN_MTU 4292 #define MAX_MTU ETH_MAX_MTU 4293 4294 static int virtnet_validate(struct virtio_device *vdev) 4295 { 4296 if (!vdev->config->get) { 4297 dev_err(&vdev->dev, "%s failure: config access disabled\n", 4298 __func__); 4299 return -EINVAL; 4300 } 4301 4302 if (!virtnet_validate_features(vdev)) 4303 return -EINVAL; 4304 4305 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 4306 int mtu = virtio_cread16(vdev, 4307 offsetof(struct virtio_net_config, 4308 mtu)); 4309 if (mtu < MIN_MTU) 4310 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 4311 } 4312 4313 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) && 4314 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 4315 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby"); 4316 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY); 4317 } 4318 4319 return 0; 4320 } 4321 4322 static bool virtnet_check_guest_gso(const struct virtnet_info *vi) 4323 { 4324 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 4325 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 4326 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 4327 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 4328 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && 4329 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); 4330 } 4331 4332 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) 4333 { 4334 bool guest_gso = virtnet_check_guest_gso(vi); 4335 4336 /* If device can receive ANY guest GSO packets, regardless of mtu, 4337 * allocate packets of maximum size, otherwise limit it to only 4338 * mtu size worth only. 4339 */ 4340 if (mtu > ETH_DATA_LEN || guest_gso) { 4341 vi->big_packets = true; 4342 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); 4343 } 4344 } 4345 4346 static int virtnet_probe(struct virtio_device *vdev) 4347 { 4348 int i, err = -ENOMEM; 4349 struct net_device *dev; 4350 struct virtnet_info *vi; 4351 u16 max_queue_pairs; 4352 int mtu = 0; 4353 4354 /* Find if host supports multiqueue/rss virtio_net device */ 4355 max_queue_pairs = 1; 4356 if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 4357 max_queue_pairs = 4358 virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); 4359 4360 /* We need at least 2 queue's */ 4361 if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 4362 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 4363 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 4364 max_queue_pairs = 1; 4365 4366 /* Allocate ourselves a network device with room for our info */ 4367 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 4368 if (!dev) 4369 return -ENOMEM; 4370 4371 /* Set up network device as normal. */ 4372 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | 4373 IFF_TX_SKB_NO_LINEAR; 4374 dev->netdev_ops = &virtnet_netdev; 4375 dev->features = NETIF_F_HIGHDMA; 4376 4377 dev->ethtool_ops = &virtnet_ethtool_ops; 4378 SET_NETDEV_DEV(dev, &vdev->dev); 4379 4380 /* Do we support "hardware" checksums? */ 4381 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 4382 /* This opens up the world of extra features. */ 4383 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4384 if (csum) 4385 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4386 4387 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 4388 dev->hw_features |= NETIF_F_TSO 4389 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 4390 } 4391 /* Individual feature bits: what can host handle? */ 4392 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 4393 dev->hw_features |= NETIF_F_TSO; 4394 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 4395 dev->hw_features |= NETIF_F_TSO6; 4396 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 4397 dev->hw_features |= NETIF_F_TSO_ECN; 4398 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO)) 4399 dev->hw_features |= NETIF_F_GSO_UDP_L4; 4400 4401 dev->features |= NETIF_F_GSO_ROBUST; 4402 4403 if (gso) 4404 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 4405 /* (!csum && gso) case will be fixed by register_netdev() */ 4406 } 4407 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 4408 dev->features |= NETIF_F_RXCSUM; 4409 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 4410 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) 4411 dev->features |= NETIF_F_GRO_HW; 4412 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) 4413 dev->hw_features |= NETIF_F_GRO_HW; 4414 4415 dev->vlan_features = dev->features; 4416 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; 4417 4418 /* MTU range: 68 - 65535 */ 4419 dev->min_mtu = MIN_MTU; 4420 dev->max_mtu = MAX_MTU; 4421 4422 /* Configuration may specify what MAC to use. Otherwise random. */ 4423 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 4424 u8 addr[ETH_ALEN]; 4425 4426 virtio_cread_bytes(vdev, 4427 offsetof(struct virtio_net_config, mac), 4428 addr, ETH_ALEN); 4429 eth_hw_addr_set(dev, addr); 4430 } else { 4431 eth_hw_addr_random(dev); 4432 dev_info(&vdev->dev, "Assigned random MAC address %pM\n", 4433 dev->dev_addr); 4434 } 4435 4436 /* Set up our device-specific information */ 4437 vi = netdev_priv(dev); 4438 vi->dev = dev; 4439 vi->vdev = vdev; 4440 vdev->priv = vi; 4441 4442 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 4443 spin_lock_init(&vi->refill_lock); 4444 4445 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { 4446 vi->mergeable_rx_bufs = true; 4447 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; 4448 } 4449 4450 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { 4451 vi->intr_coal_rx.max_usecs = 0; 4452 vi->intr_coal_tx.max_usecs = 0; 4453 vi->intr_coal_tx.max_packets = 0; 4454 vi->intr_coal_rx.max_packets = 0; 4455 } 4456 4457 if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) 4458 vi->has_rss_hash_report = true; 4459 4460 if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 4461 vi->has_rss = true; 4462 4463 if (vi->has_rss || vi->has_rss_hash_report) { 4464 vi->rss_indir_table_size = 4465 virtio_cread16(vdev, offsetof(struct virtio_net_config, 4466 rss_max_indirection_table_length)); 4467 vi->rss_key_size = 4468 virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size)); 4469 4470 vi->rss_hash_types_supported = 4471 virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types)); 4472 vi->rss_hash_types_supported &= 4473 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX | 4474 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | 4475 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX); 4476 4477 dev->hw_features |= NETIF_F_RXHASH; 4478 } 4479 4480 if (vi->has_rss_hash_report) 4481 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); 4482 else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 4483 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 4484 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 4485 else 4486 vi->hdr_len = sizeof(struct virtio_net_hdr); 4487 4488 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 4489 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 4490 vi->any_header_sg = true; 4491 4492 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 4493 vi->has_cvq = true; 4494 4495 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 4496 mtu = virtio_cread16(vdev, 4497 offsetof(struct virtio_net_config, 4498 mtu)); 4499 if (mtu < dev->min_mtu) { 4500 /* Should never trigger: MTU was previously validated 4501 * in virtnet_validate. 4502 */ 4503 dev_err(&vdev->dev, 4504 "device MTU appears to have changed it is now %d < %d", 4505 mtu, dev->min_mtu); 4506 err = -EINVAL; 4507 goto free; 4508 } 4509 4510 dev->mtu = mtu; 4511 dev->max_mtu = mtu; 4512 } 4513 4514 virtnet_set_big_packets(vi, mtu); 4515 4516 if (vi->any_header_sg) 4517 dev->needed_headroom = vi->hdr_len; 4518 4519 /* Enable multiqueue by default */ 4520 if (num_online_cpus() >= max_queue_pairs) 4521 vi->curr_queue_pairs = max_queue_pairs; 4522 else 4523 vi->curr_queue_pairs = num_online_cpus(); 4524 vi->max_queue_pairs = max_queue_pairs; 4525 4526 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 4527 err = init_vqs(vi); 4528 if (err) 4529 goto free; 4530 4531 #ifdef CONFIG_SYSFS 4532 if (vi->mergeable_rx_bufs) 4533 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 4534 #endif 4535 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 4536 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 4537 4538 virtnet_init_settings(dev); 4539 4540 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { 4541 vi->failover = net_failover_create(vi->dev); 4542 if (IS_ERR(vi->failover)) { 4543 err = PTR_ERR(vi->failover); 4544 goto free_vqs; 4545 } 4546 } 4547 4548 if (vi->has_rss || vi->has_rss_hash_report) 4549 virtnet_init_default_rss(vi); 4550 4551 /* serialize netdev register + virtio_device_ready() with ndo_open() */ 4552 rtnl_lock(); 4553 4554 err = register_netdevice(dev); 4555 if (err) { 4556 pr_debug("virtio_net: registering device failed\n"); 4557 rtnl_unlock(); 4558 goto free_failover; 4559 } 4560 4561 virtio_device_ready(vdev); 4562 4563 _virtnet_set_queues(vi, vi->curr_queue_pairs); 4564 4565 /* a random MAC address has been assigned, notify the device. 4566 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there 4567 * because many devices work fine without getting MAC explicitly 4568 */ 4569 if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 4570 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 4571 struct scatterlist sg; 4572 4573 sg_init_one(&sg, dev->dev_addr, dev->addr_len); 4574 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 4575 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 4576 pr_debug("virtio_net: setting MAC address failed\n"); 4577 rtnl_unlock(); 4578 err = -EINVAL; 4579 goto free_unregister_netdev; 4580 } 4581 } 4582 4583 rtnl_unlock(); 4584 4585 err = virtnet_cpu_notif_add(vi); 4586 if (err) { 4587 pr_debug("virtio_net: registering cpu notifier failed\n"); 4588 goto free_unregister_netdev; 4589 } 4590 4591 /* Assume link up if device can't report link status, 4592 otherwise get link status from config. */ 4593 netif_carrier_off(dev); 4594 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 4595 schedule_work(&vi->config_work); 4596 } else { 4597 vi->status = VIRTIO_NET_S_LINK_UP; 4598 virtnet_update_settings(vi); 4599 netif_carrier_on(dev); 4600 } 4601 4602 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) 4603 if (virtio_has_feature(vi->vdev, guest_offloads[i])) 4604 set_bit(guest_offloads[i], &vi->guest_offloads); 4605 vi->guest_offloads_capable = vi->guest_offloads; 4606 4607 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 4608 dev->name, max_queue_pairs); 4609 4610 return 0; 4611 4612 free_unregister_netdev: 4613 unregister_netdev(dev); 4614 free_failover: 4615 net_failover_destroy(vi->failover); 4616 free_vqs: 4617 virtio_reset_device(vdev); 4618 cancel_delayed_work_sync(&vi->refill); 4619 free_receive_page_frags(vi); 4620 virtnet_del_vqs(vi); 4621 free: 4622 free_netdev(dev); 4623 return err; 4624 } 4625 4626 static void remove_vq_common(struct virtnet_info *vi) 4627 { 4628 virtio_reset_device(vi->vdev); 4629 4630 /* Free unused buffers in both send and recv, if any. */ 4631 free_unused_bufs(vi); 4632 4633 free_receive_bufs(vi); 4634 4635 free_receive_page_frags(vi); 4636 4637 virtnet_del_vqs(vi); 4638 } 4639 4640 static void virtnet_remove(struct virtio_device *vdev) 4641 { 4642 struct virtnet_info *vi = vdev->priv; 4643 4644 virtnet_cpu_notif_remove(vi); 4645 4646 /* Make sure no work handler is accessing the device. */ 4647 flush_work(&vi->config_work); 4648 4649 unregister_netdev(vi->dev); 4650 4651 net_failover_destroy(vi->failover); 4652 4653 remove_vq_common(vi); 4654 4655 free_netdev(vi->dev); 4656 } 4657 4658 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) 4659 { 4660 struct virtnet_info *vi = vdev->priv; 4661 4662 virtnet_cpu_notif_remove(vi); 4663 virtnet_freeze_down(vdev); 4664 remove_vq_common(vi); 4665 4666 return 0; 4667 } 4668 4669 static __maybe_unused int virtnet_restore(struct virtio_device *vdev) 4670 { 4671 struct virtnet_info *vi = vdev->priv; 4672 int err; 4673 4674 err = virtnet_restore_up(vdev); 4675 if (err) 4676 return err; 4677 virtnet_set_queues(vi, vi->curr_queue_pairs); 4678 4679 err = virtnet_cpu_notif_add(vi); 4680 if (err) { 4681 virtnet_freeze_down(vdev); 4682 remove_vq_common(vi); 4683 return err; 4684 } 4685 4686 return 0; 4687 } 4688 4689 static struct virtio_device_id id_table[] = { 4690 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 4691 { 0 }, 4692 }; 4693 4694 #define VIRTNET_FEATURES \ 4695 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 4696 VIRTIO_NET_F_MAC, \ 4697 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 4698 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 4699 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 4700 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \ 4701 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 4702 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 4703 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 4704 VIRTIO_NET_F_CTRL_MAC_ADDR, \ 4705 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ 4706 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ 4707 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \ 4708 VIRTIO_NET_F_VQ_NOTF_COAL, \ 4709 VIRTIO_NET_F_GUEST_HDRLEN 4710 4711 static unsigned int features[] = { 4712 VIRTNET_FEATURES, 4713 }; 4714 4715 static unsigned int features_legacy[] = { 4716 VIRTNET_FEATURES, 4717 VIRTIO_NET_F_GSO, 4718 VIRTIO_F_ANY_LAYOUT, 4719 }; 4720 4721 static struct virtio_driver virtio_net_driver = { 4722 .feature_table = features, 4723 .feature_table_size = ARRAY_SIZE(features), 4724 .feature_table_legacy = features_legacy, 4725 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 4726 .driver.name = KBUILD_MODNAME, 4727 .driver.owner = THIS_MODULE, 4728 .id_table = id_table, 4729 .validate = virtnet_validate, 4730 .probe = virtnet_probe, 4731 .remove = virtnet_remove, 4732 .config_changed = virtnet_config_changed, 4733 #ifdef CONFIG_PM_SLEEP 4734 .freeze = virtnet_freeze, 4735 .restore = virtnet_restore, 4736 #endif 4737 }; 4738 4739 static __init int virtio_net_driver_init(void) 4740 { 4741 int ret; 4742 4743 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 4744 virtnet_cpu_online, 4745 virtnet_cpu_down_prep); 4746 if (ret < 0) 4747 goto out; 4748 virtionet_online = ret; 4749 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 4750 NULL, virtnet_cpu_dead); 4751 if (ret) 4752 goto err_dead; 4753 ret = register_virtio_driver(&virtio_net_driver); 4754 if (ret) 4755 goto err_virtio; 4756 return 0; 4757 err_virtio: 4758 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 4759 err_dead: 4760 cpuhp_remove_multi_state(virtionet_online); 4761 out: 4762 return ret; 4763 } 4764 module_init(virtio_net_driver_init); 4765 4766 static __exit void virtio_net_driver_exit(void) 4767 { 4768 unregister_virtio_driver(&virtio_net_driver); 4769 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 4770 cpuhp_remove_multi_state(virtionet_online); 4771 } 4772 module_exit(virtio_net_driver_exit); 4773 4774 MODULE_DEVICE_TABLE(virtio, id_table); 4775 MODULE_DESCRIPTION("Virtio network driver"); 4776 MODULE_LICENSE("GPL"); 4777