1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* A network driver using virtio. 3 * 4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 5 */ 6 //#define DEBUG 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/ethtool.h> 10 #include <linux/module.h> 11 #include <linux/virtio.h> 12 #include <linux/virtio_net.h> 13 #include <linux/bpf.h> 14 #include <linux/bpf_trace.h> 15 #include <linux/scatterlist.h> 16 #include <linux/if_vlan.h> 17 #include <linux/slab.h> 18 #include <linux/cpu.h> 19 #include <linux/average.h> 20 #include <linux/filter.h> 21 #include <linux/kernel.h> 22 #include <net/route.h> 23 #include <net/xdp.h> 24 #include <net/net_failover.h> 25 26 static int napi_weight = NAPI_POLL_WEIGHT; 27 module_param(napi_weight, int, 0444); 28 29 static bool csum = true, gso = true, napi_tx = true; 30 module_param(csum, bool, 0444); 31 module_param(gso, bool, 0444); 32 module_param(napi_tx, bool, 0644); 33 34 /* FIXME: MTU in config. */ 35 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 36 #define GOOD_COPY_LEN 128 37 38 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 39 40 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ 41 #define VIRTIO_XDP_HEADROOM 256 42 43 /* Separating two types of XDP xmit */ 44 #define VIRTIO_XDP_TX BIT(0) 45 #define VIRTIO_XDP_REDIR BIT(1) 46 47 #define VIRTIO_XDP_FLAG BIT(0) 48 49 /* RX packet size EWMA. The average packet size is used to determine the packet 50 * buffer size when refilling RX rings. As the entire RX ring may be refilled 51 * at once, the weight is chosen so that the EWMA will be insensitive to short- 52 * term, transient changes in packet size. 53 */ 54 DECLARE_EWMA(pkt_len, 0, 64) 55 56 #define VIRTNET_DRIVER_VERSION "1.0.0" 57 58 static const unsigned long guest_offloads[] = { 59 VIRTIO_NET_F_GUEST_TSO4, 60 VIRTIO_NET_F_GUEST_TSO6, 61 VIRTIO_NET_F_GUEST_ECN, 62 VIRTIO_NET_F_GUEST_UFO, 63 VIRTIO_NET_F_GUEST_CSUM, 64 VIRTIO_NET_F_GUEST_USO4, 65 VIRTIO_NET_F_GUEST_USO6, 66 VIRTIO_NET_F_GUEST_HDRLEN 67 }; 68 69 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ 70 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ 71 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ 72 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \ 73 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \ 74 (1ULL << VIRTIO_NET_F_GUEST_USO6)) 75 76 struct virtnet_stat_desc { 77 char desc[ETH_GSTRING_LEN]; 78 size_t offset; 79 }; 80 81 struct virtnet_sq_stats { 82 struct u64_stats_sync syncp; 83 u64 packets; 84 u64 bytes; 85 u64 xdp_tx; 86 u64 xdp_tx_drops; 87 u64 kicks; 88 u64 tx_timeouts; 89 }; 90 91 struct virtnet_rq_stats { 92 struct u64_stats_sync syncp; 93 u64 packets; 94 u64 bytes; 95 u64 drops; 96 u64 xdp_packets; 97 u64 xdp_tx; 98 u64 xdp_redirects; 99 u64 xdp_drops; 100 u64 kicks; 101 }; 102 103 #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) 104 #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) 105 106 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { 107 { "packets", VIRTNET_SQ_STAT(packets) }, 108 { "bytes", VIRTNET_SQ_STAT(bytes) }, 109 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, 110 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, 111 { "kicks", VIRTNET_SQ_STAT(kicks) }, 112 { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, 113 }; 114 115 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { 116 { "packets", VIRTNET_RQ_STAT(packets) }, 117 { "bytes", VIRTNET_RQ_STAT(bytes) }, 118 { "drops", VIRTNET_RQ_STAT(drops) }, 119 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, 120 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, 121 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, 122 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, 123 { "kicks", VIRTNET_RQ_STAT(kicks) }, 124 }; 125 126 #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) 127 #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) 128 129 /* Internal representation of a send virtqueue */ 130 struct send_queue { 131 /* Virtqueue associated with this send _queue */ 132 struct virtqueue *vq; 133 134 /* TX: fragments + linear part + virtio header */ 135 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 136 137 /* Name of the send queue: output.$index */ 138 char name[16]; 139 140 struct virtnet_sq_stats stats; 141 142 struct napi_struct napi; 143 144 /* Record whether sq is in reset state. */ 145 bool reset; 146 }; 147 148 /* Internal representation of a receive virtqueue */ 149 struct receive_queue { 150 /* Virtqueue associated with this receive_queue */ 151 struct virtqueue *vq; 152 153 struct napi_struct napi; 154 155 struct bpf_prog __rcu *xdp_prog; 156 157 struct virtnet_rq_stats stats; 158 159 /* Chain pages by the private ptr. */ 160 struct page *pages; 161 162 /* Average packet length for mergeable receive buffers. */ 163 struct ewma_pkt_len mrg_avg_pkt_len; 164 165 /* Page frag for packet buffer allocation. */ 166 struct page_frag alloc_frag; 167 168 /* RX: fragments + linear part + virtio header */ 169 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 170 171 /* Min single buffer size for mergeable buffers case. */ 172 unsigned int min_buf_len; 173 174 /* Name of this receive queue: input.$index */ 175 char name[16]; 176 177 struct xdp_rxq_info xdp_rxq; 178 }; 179 180 /* This structure can contain rss message with maximum settings for indirection table and keysize 181 * Note, that default structure that describes RSS configuration virtio_net_rss_config 182 * contains same info but can't handle table values. 183 * In any case, structure would be passed to virtio hw through sg_buf split by parts 184 * because table sizes may be differ according to the device configuration. 185 */ 186 #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 187 #define VIRTIO_NET_RSS_MAX_TABLE_LEN 128 188 struct virtio_net_ctrl_rss { 189 u32 hash_types; 190 u16 indirection_table_mask; 191 u16 unclassified_queue; 192 u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN]; 193 u16 max_tx_vq; 194 u8 hash_key_length; 195 u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE]; 196 }; 197 198 /* Control VQ buffers: protected by the rtnl lock */ 199 struct control_buf { 200 struct virtio_net_ctrl_hdr hdr; 201 virtio_net_ctrl_ack status; 202 struct virtio_net_ctrl_mq mq; 203 u8 promisc; 204 u8 allmulti; 205 __virtio16 vid; 206 __virtio64 offloads; 207 struct virtio_net_ctrl_rss rss; 208 }; 209 210 struct virtnet_info { 211 struct virtio_device *vdev; 212 struct virtqueue *cvq; 213 struct net_device *dev; 214 struct send_queue *sq; 215 struct receive_queue *rq; 216 unsigned int status; 217 218 /* Max # of queue pairs supported by the device */ 219 u16 max_queue_pairs; 220 221 /* # of queue pairs currently used by the driver */ 222 u16 curr_queue_pairs; 223 224 /* # of XDP queue pairs currently used by the driver */ 225 u16 xdp_queue_pairs; 226 227 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ 228 bool xdp_enabled; 229 230 /* I like... big packets and I cannot lie! */ 231 bool big_packets; 232 233 /* number of sg entries allocated for big packets */ 234 unsigned int big_packets_num_skbfrags; 235 236 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 237 bool mergeable_rx_bufs; 238 239 /* Host supports rss and/or hash report */ 240 bool has_rss; 241 bool has_rss_hash_report; 242 u8 rss_key_size; 243 u16 rss_indir_table_size; 244 u32 rss_hash_types_supported; 245 u32 rss_hash_types_saved; 246 247 /* Has control virtqueue */ 248 bool has_cvq; 249 250 /* Host can handle any s/g split between our header and packet data */ 251 bool any_header_sg; 252 253 /* Packet virtio header size */ 254 u8 hdr_len; 255 256 /* Work struct for delayed refilling if we run low on memory. */ 257 struct delayed_work refill; 258 259 /* Is delayed refill enabled? */ 260 bool refill_enabled; 261 262 /* The lock to synchronize the access to refill_enabled */ 263 spinlock_t refill_lock; 264 265 /* Work struct for config space updates */ 266 struct work_struct config_work; 267 268 /* Does the affinity hint is set for virtqueues? */ 269 bool affinity_hint_set; 270 271 /* CPU hotplug instances for online & dead */ 272 struct hlist_node node; 273 struct hlist_node node_dead; 274 275 struct control_buf *ctrl; 276 277 /* Ethtool settings */ 278 u8 duplex; 279 u32 speed; 280 281 /* Interrupt coalescing settings */ 282 u32 tx_usecs; 283 u32 rx_usecs; 284 u32 tx_max_packets; 285 u32 rx_max_packets; 286 287 unsigned long guest_offloads; 288 unsigned long guest_offloads_capable; 289 290 /* failover when STANDBY feature enabled */ 291 struct failover *failover; 292 }; 293 294 struct padded_vnet_hdr { 295 struct virtio_net_hdr_v1_hash hdr; 296 /* 297 * hdr is in a separate sg buffer, and data sg buffer shares same page 298 * with this header sg. This padding makes next sg 16 byte aligned 299 * after the header. 300 */ 301 char padding[12]; 302 }; 303 304 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); 305 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); 306 307 static bool is_xdp_frame(void *ptr) 308 { 309 return (unsigned long)ptr & VIRTIO_XDP_FLAG; 310 } 311 312 static void *xdp_to_ptr(struct xdp_frame *ptr) 313 { 314 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); 315 } 316 317 static struct xdp_frame *ptr_to_xdp(void *ptr) 318 { 319 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); 320 } 321 322 /* Converting between virtqueue no. and kernel tx/rx queue no. 323 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 324 */ 325 static int vq2txq(struct virtqueue *vq) 326 { 327 return (vq->index - 1) / 2; 328 } 329 330 static int txq2vq(int txq) 331 { 332 return txq * 2 + 1; 333 } 334 335 static int vq2rxq(struct virtqueue *vq) 336 { 337 return vq->index / 2; 338 } 339 340 static int rxq2vq(int rxq) 341 { 342 return rxq * 2; 343 } 344 345 static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) 346 { 347 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; 348 } 349 350 /* 351 * private is used to chain pages for big packets, put the whole 352 * most recent used list in the beginning for reuse 353 */ 354 static void give_pages(struct receive_queue *rq, struct page *page) 355 { 356 struct page *end; 357 358 /* Find end of list, sew whole thing into vi->rq.pages. */ 359 for (end = page; end->private; end = (struct page *)end->private); 360 end->private = (unsigned long)rq->pages; 361 rq->pages = page; 362 } 363 364 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 365 { 366 struct page *p = rq->pages; 367 368 if (p) { 369 rq->pages = (struct page *)p->private; 370 /* clear private here, it is used to chain pages */ 371 p->private = 0; 372 } else 373 p = alloc_page(gfp_mask); 374 return p; 375 } 376 377 static void enable_delayed_refill(struct virtnet_info *vi) 378 { 379 spin_lock_bh(&vi->refill_lock); 380 vi->refill_enabled = true; 381 spin_unlock_bh(&vi->refill_lock); 382 } 383 384 static void disable_delayed_refill(struct virtnet_info *vi) 385 { 386 spin_lock_bh(&vi->refill_lock); 387 vi->refill_enabled = false; 388 spin_unlock_bh(&vi->refill_lock); 389 } 390 391 static void virtqueue_napi_schedule(struct napi_struct *napi, 392 struct virtqueue *vq) 393 { 394 if (napi_schedule_prep(napi)) { 395 virtqueue_disable_cb(vq); 396 __napi_schedule(napi); 397 } 398 } 399 400 static void virtqueue_napi_complete(struct napi_struct *napi, 401 struct virtqueue *vq, int processed) 402 { 403 int opaque; 404 405 opaque = virtqueue_enable_cb_prepare(vq); 406 if (napi_complete_done(napi, processed)) { 407 if (unlikely(virtqueue_poll(vq, opaque))) 408 virtqueue_napi_schedule(napi, vq); 409 } else { 410 virtqueue_disable_cb(vq); 411 } 412 } 413 414 static void skb_xmit_done(struct virtqueue *vq) 415 { 416 struct virtnet_info *vi = vq->vdev->priv; 417 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; 418 419 /* Suppress further interrupts. */ 420 virtqueue_disable_cb(vq); 421 422 if (napi->weight) 423 virtqueue_napi_schedule(napi, vq); 424 else 425 /* We were probably waiting for more output buffers. */ 426 netif_wake_subqueue(vi->dev, vq2txq(vq)); 427 } 428 429 #define MRG_CTX_HEADER_SHIFT 22 430 static void *mergeable_len_to_ctx(unsigned int truesize, 431 unsigned int headroom) 432 { 433 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); 434 } 435 436 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) 437 { 438 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; 439 } 440 441 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) 442 { 443 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); 444 } 445 446 /* Called from bottom half context */ 447 static struct sk_buff *page_to_skb(struct virtnet_info *vi, 448 struct receive_queue *rq, 449 struct page *page, unsigned int offset, 450 unsigned int len, unsigned int truesize, 451 unsigned int headroom) 452 { 453 struct sk_buff *skb; 454 struct virtio_net_hdr_mrg_rxbuf *hdr; 455 unsigned int copy, hdr_len, hdr_padded_len; 456 struct page *page_to_free = NULL; 457 int tailroom, shinfo_size; 458 char *p, *hdr_p, *buf; 459 460 p = page_address(page) + offset; 461 hdr_p = p; 462 463 hdr_len = vi->hdr_len; 464 if (vi->mergeable_rx_bufs) 465 hdr_padded_len = hdr_len; 466 else 467 hdr_padded_len = sizeof(struct padded_vnet_hdr); 468 469 buf = p - headroom; 470 len -= hdr_len; 471 offset += hdr_padded_len; 472 p += hdr_padded_len; 473 tailroom = truesize - headroom - hdr_padded_len - len; 474 475 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 476 477 /* copy small packet so we can reuse these pages */ 478 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { 479 skb = build_skb(buf, truesize); 480 if (unlikely(!skb)) 481 return NULL; 482 483 skb_reserve(skb, p - buf); 484 skb_put(skb, len); 485 486 page = (struct page *)page->private; 487 if (page) 488 give_pages(rq, page); 489 goto ok; 490 } 491 492 /* copy small packet so we can reuse these pages for small data */ 493 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 494 if (unlikely(!skb)) 495 return NULL; 496 497 /* Copy all frame if it fits skb->head, otherwise 498 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. 499 */ 500 if (len <= skb_tailroom(skb)) 501 copy = len; 502 else 503 copy = ETH_HLEN; 504 skb_put_data(skb, p, copy); 505 506 len -= copy; 507 offset += copy; 508 509 if (vi->mergeable_rx_bufs) { 510 if (len) 511 skb_add_rx_frag(skb, 0, page, offset, len, truesize); 512 else 513 page_to_free = page; 514 goto ok; 515 } 516 517 /* 518 * Verify that we can indeed put this data into a skb. 519 * This is here to handle cases when the device erroneously 520 * tries to receive more than is possible. This is usually 521 * the case of a broken device. 522 */ 523 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 524 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 525 dev_kfree_skb(skb); 526 return NULL; 527 } 528 BUG_ON(offset >= PAGE_SIZE); 529 while (len) { 530 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 531 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 532 frag_size, truesize); 533 len -= frag_size; 534 page = (struct page *)page->private; 535 offset = 0; 536 } 537 538 if (page) 539 give_pages(rq, page); 540 541 ok: 542 hdr = skb_vnet_hdr(skb); 543 memcpy(hdr, hdr_p, hdr_len); 544 if (page_to_free) 545 put_page(page_to_free); 546 547 return skb; 548 } 549 550 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) 551 { 552 unsigned int len; 553 unsigned int packets = 0; 554 unsigned int bytes = 0; 555 void *ptr; 556 557 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 558 if (likely(!is_xdp_frame(ptr))) { 559 struct sk_buff *skb = ptr; 560 561 pr_debug("Sent skb %p\n", skb); 562 563 bytes += skb->len; 564 napi_consume_skb(skb, in_napi); 565 } else { 566 struct xdp_frame *frame = ptr_to_xdp(ptr); 567 568 bytes += xdp_get_frame_len(frame); 569 xdp_return_frame(frame); 570 } 571 packets++; 572 } 573 574 /* Avoid overhead when no packets have been processed 575 * happens when called speculatively from start_xmit. 576 */ 577 if (!packets) 578 return; 579 580 u64_stats_update_begin(&sq->stats.syncp); 581 sq->stats.bytes += bytes; 582 sq->stats.packets += packets; 583 u64_stats_update_end(&sq->stats.syncp); 584 } 585 586 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 587 { 588 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 589 return false; 590 else if (q < vi->curr_queue_pairs) 591 return true; 592 else 593 return false; 594 } 595 596 static void check_sq_full_and_disable(struct virtnet_info *vi, 597 struct net_device *dev, 598 struct send_queue *sq) 599 { 600 bool use_napi = sq->napi.weight; 601 int qnum; 602 603 qnum = sq - vi->sq; 604 605 /* If running out of space, stop queue to avoid getting packets that we 606 * are then unable to transmit. 607 * An alternative would be to force queuing layer to requeue the skb by 608 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 609 * returned in a normal path of operation: it means that driver is not 610 * maintaining the TX queue stop/start state properly, and causes 611 * the stack to do a non-trivial amount of useless work. 612 * Since most packets only take 1 or 2 ring slots, stopping the queue 613 * early means 16 slots are typically wasted. 614 */ 615 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 616 netif_stop_subqueue(dev, qnum); 617 if (use_napi) { 618 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) 619 virtqueue_napi_schedule(&sq->napi, sq->vq); 620 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 621 /* More just got used, free them then recheck. */ 622 free_old_xmit_skbs(sq, false); 623 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 624 netif_start_subqueue(dev, qnum); 625 virtqueue_disable_cb(sq->vq); 626 } 627 } 628 } 629 } 630 631 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, 632 struct send_queue *sq, 633 struct xdp_frame *xdpf) 634 { 635 struct virtio_net_hdr_mrg_rxbuf *hdr; 636 struct skb_shared_info *shinfo; 637 u8 nr_frags = 0; 638 int err, i; 639 640 if (unlikely(xdpf->headroom < vi->hdr_len)) 641 return -EOVERFLOW; 642 643 if (unlikely(xdp_frame_has_frags(xdpf))) { 644 shinfo = xdp_get_shared_info_from_frame(xdpf); 645 nr_frags = shinfo->nr_frags; 646 } 647 648 /* In wrapping function virtnet_xdp_xmit(), we need to free 649 * up the pending old buffers, where we need to calculate the 650 * position of skb_shared_info in xdp_get_frame_len() and 651 * xdp_return_frame(), which will involve to xdpf->data and 652 * xdpf->headroom. Therefore, we need to update the value of 653 * headroom synchronously here. 654 */ 655 xdpf->headroom -= vi->hdr_len; 656 xdpf->data -= vi->hdr_len; 657 /* Zero header and leave csum up to XDP layers */ 658 hdr = xdpf->data; 659 memset(hdr, 0, vi->hdr_len); 660 xdpf->len += vi->hdr_len; 661 662 sg_init_table(sq->sg, nr_frags + 1); 663 sg_set_buf(sq->sg, xdpf->data, xdpf->len); 664 for (i = 0; i < nr_frags; i++) { 665 skb_frag_t *frag = &shinfo->frags[i]; 666 667 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), 668 skb_frag_size(frag), skb_frag_off(frag)); 669 } 670 671 err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1, 672 xdp_to_ptr(xdpf), GFP_ATOMIC); 673 if (unlikely(err)) 674 return -ENOSPC; /* Caller handle free/refcnt */ 675 676 return 0; 677 } 678 679 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on 680 * the current cpu, so it does not need to be locked. 681 * 682 * Here we use marco instead of inline functions because we have to deal with 683 * three issues at the same time: 1. the choice of sq. 2. judge and execute the 684 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline 685 * functions to perfectly solve these three problems at the same time. 686 */ 687 #define virtnet_xdp_get_sq(vi) ({ \ 688 int cpu = smp_processor_id(); \ 689 struct netdev_queue *txq; \ 690 typeof(vi) v = (vi); \ 691 unsigned int qp; \ 692 \ 693 if (v->curr_queue_pairs > nr_cpu_ids) { \ 694 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ 695 qp += cpu; \ 696 txq = netdev_get_tx_queue(v->dev, qp); \ 697 __netif_tx_acquire(txq); \ 698 } else { \ 699 qp = cpu % v->curr_queue_pairs; \ 700 txq = netdev_get_tx_queue(v->dev, qp); \ 701 __netif_tx_lock(txq, cpu); \ 702 } \ 703 v->sq + qp; \ 704 }) 705 706 #define virtnet_xdp_put_sq(vi, q) { \ 707 struct netdev_queue *txq; \ 708 typeof(vi) v = (vi); \ 709 \ 710 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ 711 if (v->curr_queue_pairs > nr_cpu_ids) \ 712 __netif_tx_release(txq); \ 713 else \ 714 __netif_tx_unlock(txq); \ 715 } 716 717 static int virtnet_xdp_xmit(struct net_device *dev, 718 int n, struct xdp_frame **frames, u32 flags) 719 { 720 struct virtnet_info *vi = netdev_priv(dev); 721 struct receive_queue *rq = vi->rq; 722 struct bpf_prog *xdp_prog; 723 struct send_queue *sq; 724 unsigned int len; 725 int packets = 0; 726 int bytes = 0; 727 int nxmit = 0; 728 int kicks = 0; 729 void *ptr; 730 int ret; 731 int i; 732 733 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 734 * indicate XDP resources have been successfully allocated. 735 */ 736 xdp_prog = rcu_access_pointer(rq->xdp_prog); 737 if (!xdp_prog) 738 return -ENXIO; 739 740 sq = virtnet_xdp_get_sq(vi); 741 742 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { 743 ret = -EINVAL; 744 goto out; 745 } 746 747 /* Free up any pending old buffers before queueing new ones. */ 748 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 749 if (likely(is_xdp_frame(ptr))) { 750 struct xdp_frame *frame = ptr_to_xdp(ptr); 751 752 bytes += xdp_get_frame_len(frame); 753 xdp_return_frame(frame); 754 } else { 755 struct sk_buff *skb = ptr; 756 757 bytes += skb->len; 758 napi_consume_skb(skb, false); 759 } 760 packets++; 761 } 762 763 for (i = 0; i < n; i++) { 764 struct xdp_frame *xdpf = frames[i]; 765 766 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) 767 break; 768 nxmit++; 769 } 770 ret = nxmit; 771 772 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) 773 check_sq_full_and_disable(vi, dev, sq); 774 775 if (flags & XDP_XMIT_FLUSH) { 776 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) 777 kicks = 1; 778 } 779 out: 780 u64_stats_update_begin(&sq->stats.syncp); 781 sq->stats.bytes += bytes; 782 sq->stats.packets += packets; 783 sq->stats.xdp_tx += n; 784 sq->stats.xdp_tx_drops += n - nxmit; 785 sq->stats.kicks += kicks; 786 u64_stats_update_end(&sq->stats.syncp); 787 788 virtnet_xdp_put_sq(vi, sq); 789 return ret; 790 } 791 792 static void put_xdp_frags(struct xdp_buff *xdp) 793 { 794 struct skb_shared_info *shinfo; 795 struct page *xdp_page; 796 int i; 797 798 if (xdp_buff_has_frags(xdp)) { 799 shinfo = xdp_get_shared_info_from_buff(xdp); 800 for (i = 0; i < shinfo->nr_frags; i++) { 801 xdp_page = skb_frag_page(&shinfo->frags[i]); 802 put_page(xdp_page); 803 } 804 } 805 } 806 807 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, 808 struct net_device *dev, 809 unsigned int *xdp_xmit, 810 struct virtnet_rq_stats *stats) 811 { 812 struct xdp_frame *xdpf; 813 int err; 814 u32 act; 815 816 act = bpf_prog_run_xdp(xdp_prog, xdp); 817 stats->xdp_packets++; 818 819 switch (act) { 820 case XDP_PASS: 821 return act; 822 823 case XDP_TX: 824 stats->xdp_tx++; 825 xdpf = xdp_convert_buff_to_frame(xdp); 826 if (unlikely(!xdpf)) { 827 netdev_dbg(dev, "convert buff to frame failed for xdp\n"); 828 return XDP_DROP; 829 } 830 831 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); 832 if (unlikely(!err)) { 833 xdp_return_frame_rx_napi(xdpf); 834 } else if (unlikely(err < 0)) { 835 trace_xdp_exception(dev, xdp_prog, act); 836 return XDP_DROP; 837 } 838 *xdp_xmit |= VIRTIO_XDP_TX; 839 return act; 840 841 case XDP_REDIRECT: 842 stats->xdp_redirects++; 843 err = xdp_do_redirect(dev, xdp, xdp_prog); 844 if (err) 845 return XDP_DROP; 846 847 *xdp_xmit |= VIRTIO_XDP_REDIR; 848 return act; 849 850 default: 851 bpf_warn_invalid_xdp_action(dev, xdp_prog, act); 852 fallthrough; 853 case XDP_ABORTED: 854 trace_xdp_exception(dev, xdp_prog, act); 855 fallthrough; 856 case XDP_DROP: 857 return XDP_DROP; 858 } 859 } 860 861 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) 862 { 863 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; 864 } 865 866 /* We copy the packet for XDP in the following cases: 867 * 868 * 1) Packet is scattered across multiple rx buffers. 869 * 2) Headroom space is insufficient. 870 * 871 * This is inefficient but it's a temporary condition that 872 * we hit right after XDP is enabled and until queue is refilled 873 * with large buffers with sufficient headroom - so it should affect 874 * at most queue size packets. 875 * Afterwards, the conditions to enable 876 * XDP should preclude the underlying device from sending packets 877 * across multiple buffers (num_buf > 1), and we make sure buffers 878 * have enough headroom. 879 */ 880 static struct page *xdp_linearize_page(struct receive_queue *rq, 881 int *num_buf, 882 struct page *p, 883 int offset, 884 int page_off, 885 unsigned int *len) 886 { 887 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 888 struct page *page; 889 890 if (page_off + *len + tailroom > PAGE_SIZE) 891 return NULL; 892 893 page = alloc_page(GFP_ATOMIC); 894 if (!page) 895 return NULL; 896 897 memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 898 page_off += *len; 899 900 while (--*num_buf) { 901 unsigned int buflen; 902 void *buf; 903 int off; 904 905 buf = virtqueue_get_buf(rq->vq, &buflen); 906 if (unlikely(!buf)) 907 goto err_buf; 908 909 p = virt_to_head_page(buf); 910 off = buf - page_address(p); 911 912 /* guard against a misconfigured or uncooperative backend that 913 * is sending packet larger than the MTU. 914 */ 915 if ((page_off + buflen + tailroom) > PAGE_SIZE) { 916 put_page(p); 917 goto err_buf; 918 } 919 920 memcpy(page_address(page) + page_off, 921 page_address(p) + off, buflen); 922 page_off += buflen; 923 put_page(p); 924 } 925 926 /* Headroom does not contribute to packet length */ 927 *len = page_off - VIRTIO_XDP_HEADROOM; 928 return page; 929 err_buf: 930 __free_pages(page, 0); 931 return NULL; 932 } 933 934 static struct sk_buff *receive_small(struct net_device *dev, 935 struct virtnet_info *vi, 936 struct receive_queue *rq, 937 void *buf, void *ctx, 938 unsigned int len, 939 unsigned int *xdp_xmit, 940 struct virtnet_rq_stats *stats) 941 { 942 struct sk_buff *skb; 943 struct bpf_prog *xdp_prog; 944 unsigned int xdp_headroom = (unsigned long)ctx; 945 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; 946 unsigned int headroom = vi->hdr_len + header_offset; 947 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 948 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 949 struct page *page = virt_to_head_page(buf); 950 unsigned int delta = 0; 951 struct page *xdp_page; 952 unsigned int metasize = 0; 953 954 len -= vi->hdr_len; 955 stats->bytes += len; 956 957 if (unlikely(len > GOOD_PACKET_LEN)) { 958 pr_debug("%s: rx error: len %u exceeds max size %d\n", 959 dev->name, len, GOOD_PACKET_LEN); 960 dev->stats.rx_length_errors++; 961 goto err; 962 } 963 964 if (likely(!vi->xdp_enabled)) { 965 xdp_prog = NULL; 966 goto skip_xdp; 967 } 968 969 rcu_read_lock(); 970 xdp_prog = rcu_dereference(rq->xdp_prog); 971 if (xdp_prog) { 972 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; 973 struct xdp_buff xdp; 974 void *orig_data; 975 u32 act; 976 977 if (unlikely(hdr->hdr.gso_type)) 978 goto err_xdp; 979 980 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { 981 int offset = buf - page_address(page) + header_offset; 982 unsigned int tlen = len + vi->hdr_len; 983 int num_buf = 1; 984 985 xdp_headroom = virtnet_get_headroom(vi); 986 header_offset = VIRTNET_RX_PAD + xdp_headroom; 987 headroom = vi->hdr_len + header_offset; 988 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 989 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 990 xdp_page = xdp_linearize_page(rq, &num_buf, page, 991 offset, header_offset, 992 &tlen); 993 if (!xdp_page) 994 goto err_xdp; 995 996 buf = page_address(xdp_page); 997 put_page(page); 998 page = xdp_page; 999 } 1000 1001 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); 1002 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, 1003 xdp_headroom, len, true); 1004 orig_data = xdp.data; 1005 1006 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); 1007 1008 switch (act) { 1009 case XDP_PASS: 1010 /* Recalculate length in case bpf program changed it */ 1011 delta = orig_data - xdp.data; 1012 len = xdp.data_end - xdp.data; 1013 metasize = xdp.data - xdp.data_meta; 1014 break; 1015 case XDP_TX: 1016 case XDP_REDIRECT: 1017 rcu_read_unlock(); 1018 goto xdp_xmit; 1019 default: 1020 goto err_xdp; 1021 } 1022 } 1023 rcu_read_unlock(); 1024 1025 skip_xdp: 1026 skb = build_skb(buf, buflen); 1027 if (!skb) 1028 goto err; 1029 skb_reserve(skb, headroom - delta); 1030 skb_put(skb, len); 1031 if (!xdp_prog) { 1032 buf += header_offset; 1033 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); 1034 } /* keep zeroed vnet hdr since XDP is loaded */ 1035 1036 if (metasize) 1037 skb_metadata_set(skb, metasize); 1038 1039 return skb; 1040 1041 err_xdp: 1042 rcu_read_unlock(); 1043 stats->xdp_drops++; 1044 err: 1045 stats->drops++; 1046 put_page(page); 1047 xdp_xmit: 1048 return NULL; 1049 } 1050 1051 static struct sk_buff *receive_big(struct net_device *dev, 1052 struct virtnet_info *vi, 1053 struct receive_queue *rq, 1054 void *buf, 1055 unsigned int len, 1056 struct virtnet_rq_stats *stats) 1057 { 1058 struct page *page = buf; 1059 struct sk_buff *skb = 1060 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); 1061 1062 stats->bytes += len - vi->hdr_len; 1063 if (unlikely(!skb)) 1064 goto err; 1065 1066 return skb; 1067 1068 err: 1069 stats->drops++; 1070 give_pages(rq, page); 1071 return NULL; 1072 } 1073 1074 static void mergeable_buf_free(struct receive_queue *rq, int num_buf, 1075 struct net_device *dev, 1076 struct virtnet_rq_stats *stats) 1077 { 1078 struct page *page; 1079 void *buf; 1080 int len; 1081 1082 while (num_buf-- > 1) { 1083 buf = virtqueue_get_buf(rq->vq, &len); 1084 if (unlikely(!buf)) { 1085 pr_debug("%s: rx error: %d buffers missing\n", 1086 dev->name, num_buf); 1087 dev->stats.rx_length_errors++; 1088 break; 1089 } 1090 stats->bytes += len; 1091 page = virt_to_head_page(buf); 1092 put_page(page); 1093 } 1094 } 1095 1096 /* Why not use xdp_build_skb_from_frame() ? 1097 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in 1098 * virtio-net there are 2 points that do not match its requirements: 1099 * 1. The size of the prefilled buffer is not fixed before xdp is set. 1100 * 2. xdp_build_skb_from_frame() does more checks that we don't need, 1101 * like eth_type_trans() (which virtio-net does in receive_buf()). 1102 */ 1103 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev, 1104 struct virtnet_info *vi, 1105 struct xdp_buff *xdp, 1106 unsigned int xdp_frags_truesz) 1107 { 1108 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 1109 unsigned int headroom, data_len; 1110 struct sk_buff *skb; 1111 int metasize; 1112 u8 nr_frags; 1113 1114 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { 1115 pr_debug("Error building skb as missing reserved tailroom for xdp"); 1116 return NULL; 1117 } 1118 1119 if (unlikely(xdp_buff_has_frags(xdp))) 1120 nr_frags = sinfo->nr_frags; 1121 1122 skb = build_skb(xdp->data_hard_start, xdp->frame_sz); 1123 if (unlikely(!skb)) 1124 return NULL; 1125 1126 headroom = xdp->data - xdp->data_hard_start; 1127 data_len = xdp->data_end - xdp->data; 1128 skb_reserve(skb, headroom); 1129 __skb_put(skb, data_len); 1130 1131 metasize = xdp->data - xdp->data_meta; 1132 metasize = metasize > 0 ? metasize : 0; 1133 if (metasize) 1134 skb_metadata_set(skb, metasize); 1135 1136 if (unlikely(xdp_buff_has_frags(xdp))) 1137 xdp_update_skb_shared_info(skb, nr_frags, 1138 sinfo->xdp_frags_size, 1139 xdp_frags_truesz, 1140 xdp_buff_is_frag_pfmemalloc(xdp)); 1141 1142 return skb; 1143 } 1144 1145 /* TODO: build xdp in big mode */ 1146 static int virtnet_build_xdp_buff_mrg(struct net_device *dev, 1147 struct virtnet_info *vi, 1148 struct receive_queue *rq, 1149 struct xdp_buff *xdp, 1150 void *buf, 1151 unsigned int len, 1152 unsigned int frame_sz, 1153 int *num_buf, 1154 unsigned int *xdp_frags_truesize, 1155 struct virtnet_rq_stats *stats) 1156 { 1157 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1158 unsigned int headroom, tailroom, room; 1159 unsigned int truesize, cur_frag_size; 1160 struct skb_shared_info *shinfo; 1161 unsigned int xdp_frags_truesz = 0; 1162 struct page *page; 1163 skb_frag_t *frag; 1164 int offset; 1165 void *ctx; 1166 1167 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); 1168 xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM, 1169 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); 1170 1171 if (!*num_buf) 1172 return 0; 1173 1174 if (*num_buf > 1) { 1175 /* If we want to build multi-buffer xdp, we need 1176 * to specify that the flags of xdp_buff have the 1177 * XDP_FLAGS_HAS_FRAG bit. 1178 */ 1179 if (!xdp_buff_has_frags(xdp)) 1180 xdp_buff_set_frags_flag(xdp); 1181 1182 shinfo = xdp_get_shared_info_from_buff(xdp); 1183 shinfo->nr_frags = 0; 1184 shinfo->xdp_frags_size = 0; 1185 } 1186 1187 if (*num_buf > MAX_SKB_FRAGS + 1) 1188 return -EINVAL; 1189 1190 while (--*num_buf > 0) { 1191 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); 1192 if (unlikely(!buf)) { 1193 pr_debug("%s: rx error: %d buffers out of %d missing\n", 1194 dev->name, *num_buf, 1195 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); 1196 dev->stats.rx_length_errors++; 1197 goto err; 1198 } 1199 1200 stats->bytes += len; 1201 page = virt_to_head_page(buf); 1202 offset = buf - page_address(page); 1203 1204 truesize = mergeable_ctx_to_truesize(ctx); 1205 headroom = mergeable_ctx_to_headroom(ctx); 1206 tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1207 room = SKB_DATA_ALIGN(headroom + tailroom); 1208 1209 cur_frag_size = truesize; 1210 xdp_frags_truesz += cur_frag_size; 1211 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) { 1212 put_page(page); 1213 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1214 dev->name, len, (unsigned long)(truesize - room)); 1215 dev->stats.rx_length_errors++; 1216 goto err; 1217 } 1218 1219 frag = &shinfo->frags[shinfo->nr_frags++]; 1220 __skb_frag_set_page(frag, page); 1221 skb_frag_off_set(frag, offset); 1222 skb_frag_size_set(frag, len); 1223 if (page_is_pfmemalloc(page)) 1224 xdp_buff_set_frag_pfmemalloc(xdp); 1225 1226 shinfo->xdp_frags_size += len; 1227 } 1228 1229 *xdp_frags_truesize = xdp_frags_truesz; 1230 return 0; 1231 1232 err: 1233 put_xdp_frags(xdp); 1234 return -EINVAL; 1235 } 1236 1237 static void *mergeable_xdp_get_buf(struct virtnet_info *vi, 1238 struct receive_queue *rq, 1239 struct bpf_prog *xdp_prog, 1240 void *ctx, 1241 unsigned int *frame_sz, 1242 int *num_buf, 1243 struct page **page, 1244 int offset, 1245 unsigned int *len, 1246 struct virtio_net_hdr_mrg_rxbuf *hdr) 1247 { 1248 unsigned int truesize = mergeable_ctx_to_truesize(ctx); 1249 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 1250 struct page *xdp_page; 1251 unsigned int xdp_room; 1252 1253 /* Transient failure which in theory could occur if 1254 * in-flight packets from before XDP was enabled reach 1255 * the receive path after XDP is loaded. 1256 */ 1257 if (unlikely(hdr->hdr.gso_type)) 1258 return NULL; 1259 1260 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers 1261 * with headroom may add hole in truesize, which 1262 * make their length exceed PAGE_SIZE. So we disabled the 1263 * hole mechanism for xdp. See add_recvbuf_mergeable(). 1264 */ 1265 *frame_sz = truesize; 1266 1267 if (likely(headroom >= virtnet_get_headroom(vi) && 1268 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { 1269 return page_address(*page) + offset; 1270 } 1271 1272 /* This happens when headroom is not enough because 1273 * of the buffer was prefilled before XDP is set. 1274 * This should only happen for the first several packets. 1275 * In fact, vq reset can be used here to help us clean up 1276 * the prefilled buffers, but many existing devices do not 1277 * support it, and we don't want to bother users who are 1278 * using xdp normally. 1279 */ 1280 if (!xdp_prog->aux->xdp_has_frags) { 1281 /* linearize data for XDP */ 1282 xdp_page = xdp_linearize_page(rq, num_buf, 1283 *page, offset, 1284 VIRTIO_XDP_HEADROOM, 1285 len); 1286 if (!xdp_page) 1287 return NULL; 1288 } else { 1289 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + 1290 sizeof(struct skb_shared_info)); 1291 if (*len + xdp_room > PAGE_SIZE) 1292 return NULL; 1293 1294 xdp_page = alloc_page(GFP_ATOMIC); 1295 if (!xdp_page) 1296 return NULL; 1297 1298 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, 1299 page_address(*page) + offset, *len); 1300 } 1301 1302 *frame_sz = PAGE_SIZE; 1303 1304 put_page(*page); 1305 1306 *page = xdp_page; 1307 1308 return page_address(*page) + VIRTIO_XDP_HEADROOM; 1309 } 1310 1311 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, 1312 struct virtnet_info *vi, 1313 struct receive_queue *rq, 1314 struct bpf_prog *xdp_prog, 1315 void *buf, 1316 void *ctx, 1317 unsigned int len, 1318 unsigned int *xdp_xmit, 1319 struct virtnet_rq_stats *stats) 1320 { 1321 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1322 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 1323 struct page *page = virt_to_head_page(buf); 1324 int offset = buf - page_address(page); 1325 unsigned int xdp_frags_truesz = 0; 1326 struct sk_buff *head_skb; 1327 unsigned int frame_sz; 1328 struct xdp_buff xdp; 1329 void *data; 1330 u32 act; 1331 int err; 1332 1333 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, 1334 offset, &len, hdr); 1335 if (unlikely(!data)) 1336 goto err_xdp; 1337 1338 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, 1339 &num_buf, &xdp_frags_truesz, stats); 1340 if (unlikely(err)) 1341 goto err_xdp; 1342 1343 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); 1344 1345 switch (act) { 1346 case XDP_PASS: 1347 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); 1348 if (unlikely(!head_skb)) 1349 break; 1350 return head_skb; 1351 1352 case XDP_TX: 1353 case XDP_REDIRECT: 1354 return NULL; 1355 1356 default: 1357 break; 1358 } 1359 1360 put_xdp_frags(&xdp); 1361 1362 err_xdp: 1363 put_page(page); 1364 mergeable_buf_free(rq, num_buf, dev, stats); 1365 1366 stats->xdp_drops++; 1367 stats->drops++; 1368 return NULL; 1369 } 1370 1371 static struct sk_buff *receive_mergeable(struct net_device *dev, 1372 struct virtnet_info *vi, 1373 struct receive_queue *rq, 1374 void *buf, 1375 void *ctx, 1376 unsigned int len, 1377 unsigned int *xdp_xmit, 1378 struct virtnet_rq_stats *stats) 1379 { 1380 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1381 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 1382 struct page *page = virt_to_head_page(buf); 1383 int offset = buf - page_address(page); 1384 struct sk_buff *head_skb, *curr_skb; 1385 struct bpf_prog *xdp_prog; 1386 unsigned int truesize = mergeable_ctx_to_truesize(ctx); 1387 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 1388 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1389 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 1390 1391 head_skb = NULL; 1392 stats->bytes += len - vi->hdr_len; 1393 1394 if (unlikely(len > truesize - room)) { 1395 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1396 dev->name, len, (unsigned long)(truesize - room)); 1397 dev->stats.rx_length_errors++; 1398 goto err_skb; 1399 } 1400 1401 if (likely(!vi->xdp_enabled)) { 1402 xdp_prog = NULL; 1403 goto skip_xdp; 1404 } 1405 1406 rcu_read_lock(); 1407 xdp_prog = rcu_dereference(rq->xdp_prog); 1408 if (xdp_prog) { 1409 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, 1410 len, xdp_xmit, stats); 1411 rcu_read_unlock(); 1412 return head_skb; 1413 } 1414 rcu_read_unlock(); 1415 1416 skip_xdp: 1417 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); 1418 curr_skb = head_skb; 1419 1420 if (unlikely(!curr_skb)) 1421 goto err_skb; 1422 while (--num_buf) { 1423 int num_skb_frags; 1424 1425 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); 1426 if (unlikely(!buf)) { 1427 pr_debug("%s: rx error: %d buffers out of %d missing\n", 1428 dev->name, num_buf, 1429 virtio16_to_cpu(vi->vdev, 1430 hdr->num_buffers)); 1431 dev->stats.rx_length_errors++; 1432 goto err_buf; 1433 } 1434 1435 stats->bytes += len; 1436 page = virt_to_head_page(buf); 1437 1438 truesize = mergeable_ctx_to_truesize(ctx); 1439 headroom = mergeable_ctx_to_headroom(ctx); 1440 tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1441 room = SKB_DATA_ALIGN(headroom + tailroom); 1442 if (unlikely(len > truesize - room)) { 1443 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1444 dev->name, len, (unsigned long)(truesize - room)); 1445 dev->stats.rx_length_errors++; 1446 goto err_skb; 1447 } 1448 1449 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 1450 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 1451 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 1452 1453 if (unlikely(!nskb)) 1454 goto err_skb; 1455 if (curr_skb == head_skb) 1456 skb_shinfo(curr_skb)->frag_list = nskb; 1457 else 1458 curr_skb->next = nskb; 1459 curr_skb = nskb; 1460 head_skb->truesize += nskb->truesize; 1461 num_skb_frags = 0; 1462 } 1463 if (curr_skb != head_skb) { 1464 head_skb->data_len += len; 1465 head_skb->len += len; 1466 head_skb->truesize += truesize; 1467 } 1468 offset = buf - page_address(page); 1469 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 1470 put_page(page); 1471 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 1472 len, truesize); 1473 } else { 1474 skb_add_rx_frag(curr_skb, num_skb_frags, page, 1475 offset, len, truesize); 1476 } 1477 } 1478 1479 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 1480 return head_skb; 1481 1482 err_skb: 1483 put_page(page); 1484 mergeable_buf_free(rq, num_buf, dev, stats); 1485 1486 err_buf: 1487 stats->drops++; 1488 dev_kfree_skb(head_skb); 1489 return NULL; 1490 } 1491 1492 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, 1493 struct sk_buff *skb) 1494 { 1495 enum pkt_hash_types rss_hash_type; 1496 1497 if (!hdr_hash || !skb) 1498 return; 1499 1500 switch (__le16_to_cpu(hdr_hash->hash_report)) { 1501 case VIRTIO_NET_HASH_REPORT_TCPv4: 1502 case VIRTIO_NET_HASH_REPORT_UDPv4: 1503 case VIRTIO_NET_HASH_REPORT_TCPv6: 1504 case VIRTIO_NET_HASH_REPORT_UDPv6: 1505 case VIRTIO_NET_HASH_REPORT_TCPv6_EX: 1506 case VIRTIO_NET_HASH_REPORT_UDPv6_EX: 1507 rss_hash_type = PKT_HASH_TYPE_L4; 1508 break; 1509 case VIRTIO_NET_HASH_REPORT_IPv4: 1510 case VIRTIO_NET_HASH_REPORT_IPv6: 1511 case VIRTIO_NET_HASH_REPORT_IPv6_EX: 1512 rss_hash_type = PKT_HASH_TYPE_L3; 1513 break; 1514 case VIRTIO_NET_HASH_REPORT_NONE: 1515 default: 1516 rss_hash_type = PKT_HASH_TYPE_NONE; 1517 } 1518 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); 1519 } 1520 1521 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 1522 void *buf, unsigned int len, void **ctx, 1523 unsigned int *xdp_xmit, 1524 struct virtnet_rq_stats *stats) 1525 { 1526 struct net_device *dev = vi->dev; 1527 struct sk_buff *skb; 1528 struct virtio_net_hdr_mrg_rxbuf *hdr; 1529 1530 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 1531 pr_debug("%s: short packet %i\n", dev->name, len); 1532 dev->stats.rx_length_errors++; 1533 virtnet_rq_free_unused_buf(rq->vq, buf); 1534 return; 1535 } 1536 1537 if (vi->mergeable_rx_bufs) 1538 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, 1539 stats); 1540 else if (vi->big_packets) 1541 skb = receive_big(dev, vi, rq, buf, len, stats); 1542 else 1543 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); 1544 1545 if (unlikely(!skb)) 1546 return; 1547 1548 hdr = skb_vnet_hdr(skb); 1549 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) 1550 virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb); 1551 1552 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 1553 skb->ip_summed = CHECKSUM_UNNECESSARY; 1554 1555 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 1556 virtio_is_little_endian(vi->vdev))) { 1557 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 1558 dev->name, hdr->hdr.gso_type, 1559 hdr->hdr.gso_size); 1560 goto frame_err; 1561 } 1562 1563 skb_record_rx_queue(skb, vq2rxq(rq->vq)); 1564 skb->protocol = eth_type_trans(skb, dev); 1565 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 1566 ntohs(skb->protocol), skb->len, skb->pkt_type); 1567 1568 napi_gro_receive(&rq->napi, skb); 1569 return; 1570 1571 frame_err: 1572 dev->stats.rx_frame_errors++; 1573 dev_kfree_skb(skb); 1574 } 1575 1576 /* Unlike mergeable buffers, all buffers are allocated to the 1577 * same size, except for the headroom. For this reason we do 1578 * not need to use mergeable_len_to_ctx here - it is enough 1579 * to store the headroom as the context ignoring the truesize. 1580 */ 1581 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 1582 gfp_t gfp) 1583 { 1584 struct page_frag *alloc_frag = &rq->alloc_frag; 1585 char *buf; 1586 unsigned int xdp_headroom = virtnet_get_headroom(vi); 1587 void *ctx = (void *)(unsigned long)xdp_headroom; 1588 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; 1589 int err; 1590 1591 len = SKB_DATA_ALIGN(len) + 1592 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1593 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) 1594 return -ENOMEM; 1595 1596 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1597 get_page(alloc_frag->page); 1598 alloc_frag->offset += len; 1599 sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, 1600 vi->hdr_len + GOOD_PACKET_LEN); 1601 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 1602 if (err < 0) 1603 put_page(virt_to_head_page(buf)); 1604 return err; 1605 } 1606 1607 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 1608 gfp_t gfp) 1609 { 1610 struct page *first, *list = NULL; 1611 char *p; 1612 int i, err, offset; 1613 1614 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); 1615 1616 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ 1617 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { 1618 first = get_a_page(rq, gfp); 1619 if (!first) { 1620 if (list) 1621 give_pages(rq, list); 1622 return -ENOMEM; 1623 } 1624 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 1625 1626 /* chain new page in list head to match sg */ 1627 first->private = (unsigned long)list; 1628 list = first; 1629 } 1630 1631 first = get_a_page(rq, gfp); 1632 if (!first) { 1633 give_pages(rq, list); 1634 return -ENOMEM; 1635 } 1636 p = page_address(first); 1637 1638 /* rq->sg[0], rq->sg[1] share the same page */ 1639 /* a separated rq->sg[0] for header - required in case !any_header_sg */ 1640 sg_set_buf(&rq->sg[0], p, vi->hdr_len); 1641 1642 /* rq->sg[1] for data packet, from offset */ 1643 offset = sizeof(struct padded_vnet_hdr); 1644 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 1645 1646 /* chain first in list head */ 1647 first->private = (unsigned long)list; 1648 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, 1649 first, gfp); 1650 if (err < 0) 1651 give_pages(rq, first); 1652 1653 return err; 1654 } 1655 1656 static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 1657 struct ewma_pkt_len *avg_pkt_len, 1658 unsigned int room) 1659 { 1660 struct virtnet_info *vi = rq->vq->vdev->priv; 1661 const size_t hdr_len = vi->hdr_len; 1662 unsigned int len; 1663 1664 if (room) 1665 return PAGE_SIZE - room; 1666 1667 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 1668 rq->min_buf_len, PAGE_SIZE - hdr_len); 1669 1670 return ALIGN(len, L1_CACHE_BYTES); 1671 } 1672 1673 static int add_recvbuf_mergeable(struct virtnet_info *vi, 1674 struct receive_queue *rq, gfp_t gfp) 1675 { 1676 struct page_frag *alloc_frag = &rq->alloc_frag; 1677 unsigned int headroom = virtnet_get_headroom(vi); 1678 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1679 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 1680 char *buf; 1681 void *ctx; 1682 int err; 1683 unsigned int len, hole; 1684 1685 /* Extra tailroom is needed to satisfy XDP's assumption. This 1686 * means rx frags coalescing won't work, but consider we've 1687 * disabled GSO for XDP, it won't be a big issue. 1688 */ 1689 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); 1690 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) 1691 return -ENOMEM; 1692 1693 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1694 buf += headroom; /* advance address leaving hole at front of pkt */ 1695 get_page(alloc_frag->page); 1696 alloc_frag->offset += len + room; 1697 hole = alloc_frag->size - alloc_frag->offset; 1698 if (hole < len + room) { 1699 /* To avoid internal fragmentation, if there is very likely not 1700 * enough space for another buffer, add the remaining space to 1701 * the current buffer. 1702 * XDP core assumes that frame_size of xdp_buff and the length 1703 * of the frag are PAGE_SIZE, so we disable the hole mechanism. 1704 */ 1705 if (!headroom) 1706 len += hole; 1707 alloc_frag->offset += hole; 1708 } 1709 1710 sg_init_one(rq->sg, buf, len); 1711 ctx = mergeable_len_to_ctx(len + room, headroom); 1712 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 1713 if (err < 0) 1714 put_page(virt_to_head_page(buf)); 1715 1716 return err; 1717 } 1718 1719 /* 1720 * Returns false if we couldn't fill entirely (OOM). 1721 * 1722 * Normally run in the receive path, but can also be run from ndo_open 1723 * before we're receiving packets, or from refill_work which is 1724 * careful to disable receiving (using napi_disable). 1725 */ 1726 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 1727 gfp_t gfp) 1728 { 1729 int err; 1730 bool oom; 1731 1732 do { 1733 if (vi->mergeable_rx_bufs) 1734 err = add_recvbuf_mergeable(vi, rq, gfp); 1735 else if (vi->big_packets) 1736 err = add_recvbuf_big(vi, rq, gfp); 1737 else 1738 err = add_recvbuf_small(vi, rq, gfp); 1739 1740 oom = err == -ENOMEM; 1741 if (err) 1742 break; 1743 } while (rq->vq->num_free); 1744 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { 1745 unsigned long flags; 1746 1747 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); 1748 rq->stats.kicks++; 1749 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); 1750 } 1751 1752 return !oom; 1753 } 1754 1755 static void skb_recv_done(struct virtqueue *rvq) 1756 { 1757 struct virtnet_info *vi = rvq->vdev->priv; 1758 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 1759 1760 virtqueue_napi_schedule(&rq->napi, rvq); 1761 } 1762 1763 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) 1764 { 1765 napi_enable(napi); 1766 1767 /* If all buffers were filled by other side before we napi_enabled, we 1768 * won't get another interrupt, so process any outstanding packets now. 1769 * Call local_bh_enable after to trigger softIRQ processing. 1770 */ 1771 local_bh_disable(); 1772 virtqueue_napi_schedule(napi, vq); 1773 local_bh_enable(); 1774 } 1775 1776 static void virtnet_napi_tx_enable(struct virtnet_info *vi, 1777 struct virtqueue *vq, 1778 struct napi_struct *napi) 1779 { 1780 if (!napi->weight) 1781 return; 1782 1783 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only 1784 * enable the feature if this is likely affine with the transmit path. 1785 */ 1786 if (!vi->affinity_hint_set) { 1787 napi->weight = 0; 1788 return; 1789 } 1790 1791 return virtnet_napi_enable(vq, napi); 1792 } 1793 1794 static void virtnet_napi_tx_disable(struct napi_struct *napi) 1795 { 1796 if (napi->weight) 1797 napi_disable(napi); 1798 } 1799 1800 static void refill_work(struct work_struct *work) 1801 { 1802 struct virtnet_info *vi = 1803 container_of(work, struct virtnet_info, refill.work); 1804 bool still_empty; 1805 int i; 1806 1807 for (i = 0; i < vi->curr_queue_pairs; i++) { 1808 struct receive_queue *rq = &vi->rq[i]; 1809 1810 napi_disable(&rq->napi); 1811 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 1812 virtnet_napi_enable(rq->vq, &rq->napi); 1813 1814 /* In theory, this can happen: if we don't get any buffers in 1815 * we will *never* try to fill again. 1816 */ 1817 if (still_empty) 1818 schedule_delayed_work(&vi->refill, HZ/2); 1819 } 1820 } 1821 1822 static int virtnet_receive(struct receive_queue *rq, int budget, 1823 unsigned int *xdp_xmit) 1824 { 1825 struct virtnet_info *vi = rq->vq->vdev->priv; 1826 struct virtnet_rq_stats stats = {}; 1827 unsigned int len; 1828 void *buf; 1829 int i; 1830 1831 if (!vi->big_packets || vi->mergeable_rx_bufs) { 1832 void *ctx; 1833 1834 while (stats.packets < budget && 1835 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { 1836 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); 1837 stats.packets++; 1838 } 1839 } else { 1840 while (stats.packets < budget && 1841 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 1842 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); 1843 stats.packets++; 1844 } 1845 } 1846 1847 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { 1848 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { 1849 spin_lock(&vi->refill_lock); 1850 if (vi->refill_enabled) 1851 schedule_delayed_work(&vi->refill, 0); 1852 spin_unlock(&vi->refill_lock); 1853 } 1854 } 1855 1856 u64_stats_update_begin(&rq->stats.syncp); 1857 for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { 1858 size_t offset = virtnet_rq_stats_desc[i].offset; 1859 u64 *item; 1860 1861 item = (u64 *)((u8 *)&rq->stats + offset); 1862 *item += *(u64 *)((u8 *)&stats + offset); 1863 } 1864 u64_stats_update_end(&rq->stats.syncp); 1865 1866 return stats.packets; 1867 } 1868 1869 static void virtnet_poll_cleantx(struct receive_queue *rq) 1870 { 1871 struct virtnet_info *vi = rq->vq->vdev->priv; 1872 unsigned int index = vq2rxq(rq->vq); 1873 struct send_queue *sq = &vi->sq[index]; 1874 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 1875 1876 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) 1877 return; 1878 1879 if (__netif_tx_trylock(txq)) { 1880 if (sq->reset) { 1881 __netif_tx_unlock(txq); 1882 return; 1883 } 1884 1885 do { 1886 virtqueue_disable_cb(sq->vq); 1887 free_old_xmit_skbs(sq, true); 1888 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 1889 1890 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 1891 netif_tx_wake_queue(txq); 1892 1893 __netif_tx_unlock(txq); 1894 } 1895 } 1896 1897 static int virtnet_poll(struct napi_struct *napi, int budget) 1898 { 1899 struct receive_queue *rq = 1900 container_of(napi, struct receive_queue, napi); 1901 struct virtnet_info *vi = rq->vq->vdev->priv; 1902 struct send_queue *sq; 1903 unsigned int received; 1904 unsigned int xdp_xmit = 0; 1905 1906 virtnet_poll_cleantx(rq); 1907 1908 received = virtnet_receive(rq, budget, &xdp_xmit); 1909 1910 if (xdp_xmit & VIRTIO_XDP_REDIR) 1911 xdp_do_flush(); 1912 1913 /* Out of packets? */ 1914 if (received < budget) 1915 virtqueue_napi_complete(napi, rq->vq, received); 1916 1917 if (xdp_xmit & VIRTIO_XDP_TX) { 1918 sq = virtnet_xdp_get_sq(vi); 1919 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 1920 u64_stats_update_begin(&sq->stats.syncp); 1921 sq->stats.kicks++; 1922 u64_stats_update_end(&sq->stats.syncp); 1923 } 1924 virtnet_xdp_put_sq(vi, sq); 1925 } 1926 1927 return received; 1928 } 1929 1930 static int virtnet_open(struct net_device *dev) 1931 { 1932 struct virtnet_info *vi = netdev_priv(dev); 1933 int i, err; 1934 1935 enable_delayed_refill(vi); 1936 1937 for (i = 0; i < vi->max_queue_pairs; i++) { 1938 if (i < vi->curr_queue_pairs) 1939 /* Make sure we have some buffers: if oom use wq. */ 1940 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 1941 schedule_delayed_work(&vi->refill, 0); 1942 1943 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id); 1944 if (err < 0) 1945 return err; 1946 1947 err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, 1948 MEM_TYPE_PAGE_SHARED, NULL); 1949 if (err < 0) { 1950 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); 1951 return err; 1952 } 1953 1954 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 1955 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); 1956 } 1957 1958 return 0; 1959 } 1960 1961 static int virtnet_poll_tx(struct napi_struct *napi, int budget) 1962 { 1963 struct send_queue *sq = container_of(napi, struct send_queue, napi); 1964 struct virtnet_info *vi = sq->vq->vdev->priv; 1965 unsigned int index = vq2txq(sq->vq); 1966 struct netdev_queue *txq; 1967 int opaque; 1968 bool done; 1969 1970 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { 1971 /* We don't need to enable cb for XDP */ 1972 napi_complete_done(napi, 0); 1973 return 0; 1974 } 1975 1976 txq = netdev_get_tx_queue(vi->dev, index); 1977 __netif_tx_lock(txq, raw_smp_processor_id()); 1978 virtqueue_disable_cb(sq->vq); 1979 free_old_xmit_skbs(sq, true); 1980 1981 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 1982 netif_tx_wake_queue(txq); 1983 1984 opaque = virtqueue_enable_cb_prepare(sq->vq); 1985 1986 done = napi_complete_done(napi, 0); 1987 1988 if (!done) 1989 virtqueue_disable_cb(sq->vq); 1990 1991 __netif_tx_unlock(txq); 1992 1993 if (done) { 1994 if (unlikely(virtqueue_poll(sq->vq, opaque))) { 1995 if (napi_schedule_prep(napi)) { 1996 __netif_tx_lock(txq, raw_smp_processor_id()); 1997 virtqueue_disable_cb(sq->vq); 1998 __netif_tx_unlock(txq); 1999 __napi_schedule(napi); 2000 } 2001 } 2002 } 2003 2004 return 0; 2005 } 2006 2007 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 2008 { 2009 struct virtio_net_hdr_mrg_rxbuf *hdr; 2010 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 2011 struct virtnet_info *vi = sq->vq->vdev->priv; 2012 int num_sg; 2013 unsigned hdr_len = vi->hdr_len; 2014 bool can_push; 2015 2016 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 2017 2018 can_push = vi->any_header_sg && 2019 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 2020 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 2021 /* Even if we can, don't push here yet as this would skew 2022 * csum_start offset below. */ 2023 if (can_push) 2024 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 2025 else 2026 hdr = skb_vnet_hdr(skb); 2027 2028 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 2029 virtio_is_little_endian(vi->vdev), false, 2030 0)) 2031 return -EPROTO; 2032 2033 if (vi->mergeable_rx_bufs) 2034 hdr->num_buffers = 0; 2035 2036 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 2037 if (can_push) { 2038 __skb_push(skb, hdr_len); 2039 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 2040 if (unlikely(num_sg < 0)) 2041 return num_sg; 2042 /* Pull header back to avoid skew in tx bytes calculations. */ 2043 __skb_pull(skb, hdr_len); 2044 } else { 2045 sg_set_buf(sq->sg, hdr, hdr_len); 2046 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 2047 if (unlikely(num_sg < 0)) 2048 return num_sg; 2049 num_sg++; 2050 } 2051 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 2052 } 2053 2054 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 2055 { 2056 struct virtnet_info *vi = netdev_priv(dev); 2057 int qnum = skb_get_queue_mapping(skb); 2058 struct send_queue *sq = &vi->sq[qnum]; 2059 int err; 2060 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 2061 bool kick = !netdev_xmit_more(); 2062 bool use_napi = sq->napi.weight; 2063 2064 /* Free up any pending old buffers before queueing new ones. */ 2065 do { 2066 if (use_napi) 2067 virtqueue_disable_cb(sq->vq); 2068 2069 free_old_xmit_skbs(sq, false); 2070 2071 } while (use_napi && kick && 2072 unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 2073 2074 /* timestamp packet in software */ 2075 skb_tx_timestamp(skb); 2076 2077 /* Try to transmit */ 2078 err = xmit_skb(sq, skb); 2079 2080 /* This should not happen! */ 2081 if (unlikely(err)) { 2082 dev->stats.tx_fifo_errors++; 2083 if (net_ratelimit()) 2084 dev_warn(&dev->dev, 2085 "Unexpected TXQ (%d) queue failure: %d\n", 2086 qnum, err); 2087 dev->stats.tx_dropped++; 2088 dev_kfree_skb_any(skb); 2089 return NETDEV_TX_OK; 2090 } 2091 2092 /* Don't wait up for transmitted skbs to be freed. */ 2093 if (!use_napi) { 2094 skb_orphan(skb); 2095 nf_reset_ct(skb); 2096 } 2097 2098 check_sq_full_and_disable(vi, dev, sq); 2099 2100 if (kick || netif_xmit_stopped(txq)) { 2101 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 2102 u64_stats_update_begin(&sq->stats.syncp); 2103 sq->stats.kicks++; 2104 u64_stats_update_end(&sq->stats.syncp); 2105 } 2106 } 2107 2108 return NETDEV_TX_OK; 2109 } 2110 2111 static int virtnet_rx_resize(struct virtnet_info *vi, 2112 struct receive_queue *rq, u32 ring_num) 2113 { 2114 bool running = netif_running(vi->dev); 2115 int err, qindex; 2116 2117 qindex = rq - vi->rq; 2118 2119 if (running) 2120 napi_disable(&rq->napi); 2121 2122 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf); 2123 if (err) 2124 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); 2125 2126 if (!try_fill_recv(vi, rq, GFP_KERNEL)) 2127 schedule_delayed_work(&vi->refill, 0); 2128 2129 if (running) 2130 virtnet_napi_enable(rq->vq, &rq->napi); 2131 return err; 2132 } 2133 2134 static int virtnet_tx_resize(struct virtnet_info *vi, 2135 struct send_queue *sq, u32 ring_num) 2136 { 2137 bool running = netif_running(vi->dev); 2138 struct netdev_queue *txq; 2139 int err, qindex; 2140 2141 qindex = sq - vi->sq; 2142 2143 if (running) 2144 virtnet_napi_tx_disable(&sq->napi); 2145 2146 txq = netdev_get_tx_queue(vi->dev, qindex); 2147 2148 /* 1. wait all ximt complete 2149 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue() 2150 */ 2151 __netif_tx_lock_bh(txq); 2152 2153 /* Prevent rx poll from accessing sq. */ 2154 sq->reset = true; 2155 2156 /* Prevent the upper layer from trying to send packets. */ 2157 netif_stop_subqueue(vi->dev, qindex); 2158 2159 __netif_tx_unlock_bh(txq); 2160 2161 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); 2162 if (err) 2163 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); 2164 2165 __netif_tx_lock_bh(txq); 2166 sq->reset = false; 2167 netif_tx_wake_queue(txq); 2168 __netif_tx_unlock_bh(txq); 2169 2170 if (running) 2171 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); 2172 return err; 2173 } 2174 2175 /* 2176 * Send command via the control virtqueue and check status. Commands 2177 * supported by the hypervisor, as indicated by feature bits, should 2178 * never fail unless improperly formatted. 2179 */ 2180 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 2181 struct scatterlist *out) 2182 { 2183 struct scatterlist *sgs[4], hdr, stat; 2184 unsigned out_num = 0, tmp; 2185 int ret; 2186 2187 /* Caller should know better */ 2188 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 2189 2190 vi->ctrl->status = ~0; 2191 vi->ctrl->hdr.class = class; 2192 vi->ctrl->hdr.cmd = cmd; 2193 /* Add header */ 2194 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); 2195 sgs[out_num++] = &hdr; 2196 2197 if (out) 2198 sgs[out_num++] = out; 2199 2200 /* Add return status. */ 2201 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); 2202 sgs[out_num] = &stat; 2203 2204 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 2205 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 2206 if (ret < 0) { 2207 dev_warn(&vi->vdev->dev, 2208 "Failed to add sgs for command vq: %d\n.", ret); 2209 return false; 2210 } 2211 2212 if (unlikely(!virtqueue_kick(vi->cvq))) 2213 return vi->ctrl->status == VIRTIO_NET_OK; 2214 2215 /* Spin for a response, the kick causes an ioport write, trapping 2216 * into the hypervisor, so the request should be handled immediately. 2217 */ 2218 while (!virtqueue_get_buf(vi->cvq, &tmp) && 2219 !virtqueue_is_broken(vi->cvq)) 2220 cpu_relax(); 2221 2222 return vi->ctrl->status == VIRTIO_NET_OK; 2223 } 2224 2225 static int virtnet_set_mac_address(struct net_device *dev, void *p) 2226 { 2227 struct virtnet_info *vi = netdev_priv(dev); 2228 struct virtio_device *vdev = vi->vdev; 2229 int ret; 2230 struct sockaddr *addr; 2231 struct scatterlist sg; 2232 2233 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 2234 return -EOPNOTSUPP; 2235 2236 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 2237 if (!addr) 2238 return -ENOMEM; 2239 2240 ret = eth_prepare_mac_addr_change(dev, addr); 2241 if (ret) 2242 goto out; 2243 2244 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 2245 sg_init_one(&sg, addr->sa_data, dev->addr_len); 2246 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 2247 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 2248 dev_warn(&vdev->dev, 2249 "Failed to set mac address by vq command.\n"); 2250 ret = -EINVAL; 2251 goto out; 2252 } 2253 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 2254 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2255 unsigned int i; 2256 2257 /* Naturally, this has an atomicity problem. */ 2258 for (i = 0; i < dev->addr_len; i++) 2259 virtio_cwrite8(vdev, 2260 offsetof(struct virtio_net_config, mac) + 2261 i, addr->sa_data[i]); 2262 } 2263 2264 eth_commit_mac_addr_change(dev, p); 2265 ret = 0; 2266 2267 out: 2268 kfree(addr); 2269 return ret; 2270 } 2271 2272 static void virtnet_stats(struct net_device *dev, 2273 struct rtnl_link_stats64 *tot) 2274 { 2275 struct virtnet_info *vi = netdev_priv(dev); 2276 unsigned int start; 2277 int i; 2278 2279 for (i = 0; i < vi->max_queue_pairs; i++) { 2280 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops; 2281 struct receive_queue *rq = &vi->rq[i]; 2282 struct send_queue *sq = &vi->sq[i]; 2283 2284 do { 2285 start = u64_stats_fetch_begin(&sq->stats.syncp); 2286 tpackets = sq->stats.packets; 2287 tbytes = sq->stats.bytes; 2288 terrors = sq->stats.tx_timeouts; 2289 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); 2290 2291 do { 2292 start = u64_stats_fetch_begin(&rq->stats.syncp); 2293 rpackets = rq->stats.packets; 2294 rbytes = rq->stats.bytes; 2295 rdrops = rq->stats.drops; 2296 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); 2297 2298 tot->rx_packets += rpackets; 2299 tot->tx_packets += tpackets; 2300 tot->rx_bytes += rbytes; 2301 tot->tx_bytes += tbytes; 2302 tot->rx_dropped += rdrops; 2303 tot->tx_errors += terrors; 2304 } 2305 2306 tot->tx_dropped = dev->stats.tx_dropped; 2307 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 2308 tot->rx_length_errors = dev->stats.rx_length_errors; 2309 tot->rx_frame_errors = dev->stats.rx_frame_errors; 2310 } 2311 2312 static void virtnet_ack_link_announce(struct virtnet_info *vi) 2313 { 2314 rtnl_lock(); 2315 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 2316 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 2317 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 2318 rtnl_unlock(); 2319 } 2320 2321 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 2322 { 2323 struct scatterlist sg; 2324 struct net_device *dev = vi->dev; 2325 2326 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 2327 return 0; 2328 2329 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 2330 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); 2331 2332 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 2333 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 2334 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 2335 queue_pairs); 2336 return -EINVAL; 2337 } else { 2338 vi->curr_queue_pairs = queue_pairs; 2339 /* virtnet_open() will refill when device is going to up. */ 2340 if (dev->flags & IFF_UP) 2341 schedule_delayed_work(&vi->refill, 0); 2342 } 2343 2344 return 0; 2345 } 2346 2347 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 2348 { 2349 int err; 2350 2351 rtnl_lock(); 2352 err = _virtnet_set_queues(vi, queue_pairs); 2353 rtnl_unlock(); 2354 return err; 2355 } 2356 2357 static int virtnet_close(struct net_device *dev) 2358 { 2359 struct virtnet_info *vi = netdev_priv(dev); 2360 int i; 2361 2362 /* Make sure NAPI doesn't schedule refill work */ 2363 disable_delayed_refill(vi); 2364 /* Make sure refill_work doesn't re-enable napi! */ 2365 cancel_delayed_work_sync(&vi->refill); 2366 2367 for (i = 0; i < vi->max_queue_pairs; i++) { 2368 virtnet_napi_tx_disable(&vi->sq[i].napi); 2369 napi_disable(&vi->rq[i].napi); 2370 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); 2371 } 2372 2373 return 0; 2374 } 2375 2376 static void virtnet_set_rx_mode(struct net_device *dev) 2377 { 2378 struct virtnet_info *vi = netdev_priv(dev); 2379 struct scatterlist sg[2]; 2380 struct virtio_net_ctrl_mac *mac_data; 2381 struct netdev_hw_addr *ha; 2382 int uc_count; 2383 int mc_count; 2384 void *buf; 2385 int i; 2386 2387 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 2388 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 2389 return; 2390 2391 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); 2392 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 2393 2394 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); 2395 2396 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 2397 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 2398 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 2399 vi->ctrl->promisc ? "en" : "dis"); 2400 2401 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); 2402 2403 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 2404 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 2405 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 2406 vi->ctrl->allmulti ? "en" : "dis"); 2407 2408 uc_count = netdev_uc_count(dev); 2409 mc_count = netdev_mc_count(dev); 2410 /* MAC filter - use one buffer for both lists */ 2411 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 2412 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 2413 mac_data = buf; 2414 if (!buf) 2415 return; 2416 2417 sg_init_table(sg, 2); 2418 2419 /* Store the unicast list and count in the front of the buffer */ 2420 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 2421 i = 0; 2422 netdev_for_each_uc_addr(ha, dev) 2423 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 2424 2425 sg_set_buf(&sg[0], mac_data, 2426 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 2427 2428 /* multicast list and count fill the end */ 2429 mac_data = (void *)&mac_data->macs[uc_count][0]; 2430 2431 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 2432 i = 0; 2433 netdev_for_each_mc_addr(ha, dev) 2434 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 2435 2436 sg_set_buf(&sg[1], mac_data, 2437 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 2438 2439 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 2440 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 2441 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 2442 2443 kfree(buf); 2444 } 2445 2446 static int virtnet_vlan_rx_add_vid(struct net_device *dev, 2447 __be16 proto, u16 vid) 2448 { 2449 struct virtnet_info *vi = netdev_priv(dev); 2450 struct scatterlist sg; 2451 2452 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 2453 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 2454 2455 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 2456 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 2457 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 2458 return 0; 2459 } 2460 2461 static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 2462 __be16 proto, u16 vid) 2463 { 2464 struct virtnet_info *vi = netdev_priv(dev); 2465 struct scatterlist sg; 2466 2467 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 2468 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 2469 2470 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 2471 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 2472 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 2473 return 0; 2474 } 2475 2476 static void virtnet_clean_affinity(struct virtnet_info *vi) 2477 { 2478 int i; 2479 2480 if (vi->affinity_hint_set) { 2481 for (i = 0; i < vi->max_queue_pairs; i++) { 2482 virtqueue_set_affinity(vi->rq[i].vq, NULL); 2483 virtqueue_set_affinity(vi->sq[i].vq, NULL); 2484 } 2485 2486 vi->affinity_hint_set = false; 2487 } 2488 } 2489 2490 static void virtnet_set_affinity(struct virtnet_info *vi) 2491 { 2492 cpumask_var_t mask; 2493 int stragglers; 2494 int group_size; 2495 int i, j, cpu; 2496 int num_cpu; 2497 int stride; 2498 2499 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 2500 virtnet_clean_affinity(vi); 2501 return; 2502 } 2503 2504 num_cpu = num_online_cpus(); 2505 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); 2506 stragglers = num_cpu >= vi->curr_queue_pairs ? 2507 num_cpu % vi->curr_queue_pairs : 2508 0; 2509 cpu = cpumask_first(cpu_online_mask); 2510 2511 for (i = 0; i < vi->curr_queue_pairs; i++) { 2512 group_size = stride + (i < stragglers ? 1 : 0); 2513 2514 for (j = 0; j < group_size; j++) { 2515 cpumask_set_cpu(cpu, mask); 2516 cpu = cpumask_next_wrap(cpu, cpu_online_mask, 2517 nr_cpu_ids, false); 2518 } 2519 virtqueue_set_affinity(vi->rq[i].vq, mask); 2520 virtqueue_set_affinity(vi->sq[i].vq, mask); 2521 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); 2522 cpumask_clear(mask); 2523 } 2524 2525 vi->affinity_hint_set = true; 2526 free_cpumask_var(mask); 2527 } 2528 2529 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 2530 { 2531 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2532 node); 2533 virtnet_set_affinity(vi); 2534 return 0; 2535 } 2536 2537 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 2538 { 2539 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2540 node_dead); 2541 virtnet_set_affinity(vi); 2542 return 0; 2543 } 2544 2545 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 2546 { 2547 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2548 node); 2549 2550 virtnet_clean_affinity(vi); 2551 return 0; 2552 } 2553 2554 static enum cpuhp_state virtionet_online; 2555 2556 static int virtnet_cpu_notif_add(struct virtnet_info *vi) 2557 { 2558 int ret; 2559 2560 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 2561 if (ret) 2562 return ret; 2563 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 2564 &vi->node_dead); 2565 if (!ret) 2566 return ret; 2567 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 2568 return ret; 2569 } 2570 2571 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 2572 { 2573 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 2574 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 2575 &vi->node_dead); 2576 } 2577 2578 static void virtnet_get_ringparam(struct net_device *dev, 2579 struct ethtool_ringparam *ring, 2580 struct kernel_ethtool_ringparam *kernel_ring, 2581 struct netlink_ext_ack *extack) 2582 { 2583 struct virtnet_info *vi = netdev_priv(dev); 2584 2585 ring->rx_max_pending = vi->rq[0].vq->num_max; 2586 ring->tx_max_pending = vi->sq[0].vq->num_max; 2587 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 2588 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 2589 } 2590 2591 static int virtnet_set_ringparam(struct net_device *dev, 2592 struct ethtool_ringparam *ring, 2593 struct kernel_ethtool_ringparam *kernel_ring, 2594 struct netlink_ext_ack *extack) 2595 { 2596 struct virtnet_info *vi = netdev_priv(dev); 2597 u32 rx_pending, tx_pending; 2598 struct receive_queue *rq; 2599 struct send_queue *sq; 2600 int i, err; 2601 2602 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 2603 return -EINVAL; 2604 2605 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 2606 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 2607 2608 if (ring->rx_pending == rx_pending && 2609 ring->tx_pending == tx_pending) 2610 return 0; 2611 2612 if (ring->rx_pending > vi->rq[0].vq->num_max) 2613 return -EINVAL; 2614 2615 if (ring->tx_pending > vi->sq[0].vq->num_max) 2616 return -EINVAL; 2617 2618 for (i = 0; i < vi->max_queue_pairs; i++) { 2619 rq = vi->rq + i; 2620 sq = vi->sq + i; 2621 2622 if (ring->tx_pending != tx_pending) { 2623 err = virtnet_tx_resize(vi, sq, ring->tx_pending); 2624 if (err) 2625 return err; 2626 } 2627 2628 if (ring->rx_pending != rx_pending) { 2629 err = virtnet_rx_resize(vi, rq, ring->rx_pending); 2630 if (err) 2631 return err; 2632 } 2633 } 2634 2635 return 0; 2636 } 2637 2638 static bool virtnet_commit_rss_command(struct virtnet_info *vi) 2639 { 2640 struct net_device *dev = vi->dev; 2641 struct scatterlist sgs[4]; 2642 unsigned int sg_buf_size; 2643 2644 /* prepare sgs */ 2645 sg_init_table(sgs, 4); 2646 2647 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table); 2648 sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); 2649 2650 sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); 2651 sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); 2652 2653 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) 2654 - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); 2655 sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); 2656 2657 sg_buf_size = vi->rss_key_size; 2658 sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); 2659 2660 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 2661 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG 2662 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { 2663 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); 2664 return false; 2665 } 2666 return true; 2667 } 2668 2669 static void virtnet_init_default_rss(struct virtnet_info *vi) 2670 { 2671 u32 indir_val = 0; 2672 int i = 0; 2673 2674 vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; 2675 vi->rss_hash_types_saved = vi->rss_hash_types_supported; 2676 vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size 2677 ? vi->rss_indir_table_size - 1 : 0; 2678 vi->ctrl->rss.unclassified_queue = 0; 2679 2680 for (; i < vi->rss_indir_table_size; ++i) { 2681 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); 2682 vi->ctrl->rss.indirection_table[i] = indir_val; 2683 } 2684 2685 vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs; 2686 vi->ctrl->rss.hash_key_length = vi->rss_key_size; 2687 2688 netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); 2689 } 2690 2691 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) 2692 { 2693 info->data = 0; 2694 switch (info->flow_type) { 2695 case TCP_V4_FLOW: 2696 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { 2697 info->data = RXH_IP_SRC | RXH_IP_DST | 2698 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2699 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 2700 info->data = RXH_IP_SRC | RXH_IP_DST; 2701 } 2702 break; 2703 case TCP_V6_FLOW: 2704 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { 2705 info->data = RXH_IP_SRC | RXH_IP_DST | 2706 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2707 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 2708 info->data = RXH_IP_SRC | RXH_IP_DST; 2709 } 2710 break; 2711 case UDP_V4_FLOW: 2712 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { 2713 info->data = RXH_IP_SRC | RXH_IP_DST | 2714 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2715 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 2716 info->data = RXH_IP_SRC | RXH_IP_DST; 2717 } 2718 break; 2719 case UDP_V6_FLOW: 2720 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { 2721 info->data = RXH_IP_SRC | RXH_IP_DST | 2722 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2723 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 2724 info->data = RXH_IP_SRC | RXH_IP_DST; 2725 } 2726 break; 2727 case IPV4_FLOW: 2728 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) 2729 info->data = RXH_IP_SRC | RXH_IP_DST; 2730 2731 break; 2732 case IPV6_FLOW: 2733 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) 2734 info->data = RXH_IP_SRC | RXH_IP_DST; 2735 2736 break; 2737 default: 2738 info->data = 0; 2739 break; 2740 } 2741 } 2742 2743 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) 2744 { 2745 u32 new_hashtypes = vi->rss_hash_types_saved; 2746 bool is_disable = info->data & RXH_DISCARD; 2747 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); 2748 2749 /* supports only 'sd', 'sdfn' and 'r' */ 2750 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) 2751 return false; 2752 2753 switch (info->flow_type) { 2754 case TCP_V4_FLOW: 2755 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4); 2756 if (!is_disable) 2757 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 2758 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0); 2759 break; 2760 case UDP_V4_FLOW: 2761 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4); 2762 if (!is_disable) 2763 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 2764 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0); 2765 break; 2766 case IPV4_FLOW: 2767 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4; 2768 if (!is_disable) 2769 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4; 2770 break; 2771 case TCP_V6_FLOW: 2772 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6); 2773 if (!is_disable) 2774 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 2775 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0); 2776 break; 2777 case UDP_V6_FLOW: 2778 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6); 2779 if (!is_disable) 2780 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 2781 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0); 2782 break; 2783 case IPV6_FLOW: 2784 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6; 2785 if (!is_disable) 2786 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6; 2787 break; 2788 default: 2789 /* unsupported flow */ 2790 return false; 2791 } 2792 2793 /* if unsupported hashtype was set */ 2794 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) 2795 return false; 2796 2797 if (new_hashtypes != vi->rss_hash_types_saved) { 2798 vi->rss_hash_types_saved = new_hashtypes; 2799 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; 2800 if (vi->dev->features & NETIF_F_RXHASH) 2801 return virtnet_commit_rss_command(vi); 2802 } 2803 2804 return true; 2805 } 2806 2807 static void virtnet_get_drvinfo(struct net_device *dev, 2808 struct ethtool_drvinfo *info) 2809 { 2810 struct virtnet_info *vi = netdev_priv(dev); 2811 struct virtio_device *vdev = vi->vdev; 2812 2813 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 2814 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 2815 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 2816 2817 } 2818 2819 /* TODO: Eliminate OOO packets during switching */ 2820 static int virtnet_set_channels(struct net_device *dev, 2821 struct ethtool_channels *channels) 2822 { 2823 struct virtnet_info *vi = netdev_priv(dev); 2824 u16 queue_pairs = channels->combined_count; 2825 int err; 2826 2827 /* We don't support separate rx/tx channels. 2828 * We don't allow setting 'other' channels. 2829 */ 2830 if (channels->rx_count || channels->tx_count || channels->other_count) 2831 return -EINVAL; 2832 2833 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 2834 return -EINVAL; 2835 2836 /* For now we don't support modifying channels while XDP is loaded 2837 * also when XDP is loaded all RX queues have XDP programs so we only 2838 * need to check a single RX queue. 2839 */ 2840 if (vi->rq[0].xdp_prog) 2841 return -EINVAL; 2842 2843 cpus_read_lock(); 2844 err = _virtnet_set_queues(vi, queue_pairs); 2845 if (err) { 2846 cpus_read_unlock(); 2847 goto err; 2848 } 2849 virtnet_set_affinity(vi); 2850 cpus_read_unlock(); 2851 2852 netif_set_real_num_tx_queues(dev, queue_pairs); 2853 netif_set_real_num_rx_queues(dev, queue_pairs); 2854 err: 2855 return err; 2856 } 2857 2858 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2859 { 2860 struct virtnet_info *vi = netdev_priv(dev); 2861 unsigned int i, j; 2862 u8 *p = data; 2863 2864 switch (stringset) { 2865 case ETH_SS_STATS: 2866 for (i = 0; i < vi->curr_queue_pairs; i++) { 2867 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) 2868 ethtool_sprintf(&p, "rx_queue_%u_%s", i, 2869 virtnet_rq_stats_desc[j].desc); 2870 } 2871 2872 for (i = 0; i < vi->curr_queue_pairs; i++) { 2873 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) 2874 ethtool_sprintf(&p, "tx_queue_%u_%s", i, 2875 virtnet_sq_stats_desc[j].desc); 2876 } 2877 break; 2878 } 2879 } 2880 2881 static int virtnet_get_sset_count(struct net_device *dev, int sset) 2882 { 2883 struct virtnet_info *vi = netdev_priv(dev); 2884 2885 switch (sset) { 2886 case ETH_SS_STATS: 2887 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + 2888 VIRTNET_SQ_STATS_LEN); 2889 default: 2890 return -EOPNOTSUPP; 2891 } 2892 } 2893 2894 static void virtnet_get_ethtool_stats(struct net_device *dev, 2895 struct ethtool_stats *stats, u64 *data) 2896 { 2897 struct virtnet_info *vi = netdev_priv(dev); 2898 unsigned int idx = 0, start, i, j; 2899 const u8 *stats_base; 2900 size_t offset; 2901 2902 for (i = 0; i < vi->curr_queue_pairs; i++) { 2903 struct receive_queue *rq = &vi->rq[i]; 2904 2905 stats_base = (u8 *)&rq->stats; 2906 do { 2907 start = u64_stats_fetch_begin(&rq->stats.syncp); 2908 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { 2909 offset = virtnet_rq_stats_desc[j].offset; 2910 data[idx + j] = *(u64 *)(stats_base + offset); 2911 } 2912 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); 2913 idx += VIRTNET_RQ_STATS_LEN; 2914 } 2915 2916 for (i = 0; i < vi->curr_queue_pairs; i++) { 2917 struct send_queue *sq = &vi->sq[i]; 2918 2919 stats_base = (u8 *)&sq->stats; 2920 do { 2921 start = u64_stats_fetch_begin(&sq->stats.syncp); 2922 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { 2923 offset = virtnet_sq_stats_desc[j].offset; 2924 data[idx + j] = *(u64 *)(stats_base + offset); 2925 } 2926 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); 2927 idx += VIRTNET_SQ_STATS_LEN; 2928 } 2929 } 2930 2931 static void virtnet_get_channels(struct net_device *dev, 2932 struct ethtool_channels *channels) 2933 { 2934 struct virtnet_info *vi = netdev_priv(dev); 2935 2936 channels->combined_count = vi->curr_queue_pairs; 2937 channels->max_combined = vi->max_queue_pairs; 2938 channels->max_other = 0; 2939 channels->rx_count = 0; 2940 channels->tx_count = 0; 2941 channels->other_count = 0; 2942 } 2943 2944 static int virtnet_set_link_ksettings(struct net_device *dev, 2945 const struct ethtool_link_ksettings *cmd) 2946 { 2947 struct virtnet_info *vi = netdev_priv(dev); 2948 2949 return ethtool_virtdev_set_link_ksettings(dev, cmd, 2950 &vi->speed, &vi->duplex); 2951 } 2952 2953 static int virtnet_get_link_ksettings(struct net_device *dev, 2954 struct ethtool_link_ksettings *cmd) 2955 { 2956 struct virtnet_info *vi = netdev_priv(dev); 2957 2958 cmd->base.speed = vi->speed; 2959 cmd->base.duplex = vi->duplex; 2960 cmd->base.port = PORT_OTHER; 2961 2962 return 0; 2963 } 2964 2965 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, 2966 struct ethtool_coalesce *ec) 2967 { 2968 struct scatterlist sgs_tx, sgs_rx; 2969 struct virtio_net_ctrl_coal_tx coal_tx; 2970 struct virtio_net_ctrl_coal_rx coal_rx; 2971 2972 coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); 2973 coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); 2974 sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx)); 2975 2976 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 2977 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, 2978 &sgs_tx)) 2979 return -EINVAL; 2980 2981 /* Save parameters */ 2982 vi->tx_usecs = ec->tx_coalesce_usecs; 2983 vi->tx_max_packets = ec->tx_max_coalesced_frames; 2984 2985 coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); 2986 coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); 2987 sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx)); 2988 2989 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 2990 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, 2991 &sgs_rx)) 2992 return -EINVAL; 2993 2994 /* Save parameters */ 2995 vi->rx_usecs = ec->rx_coalesce_usecs; 2996 vi->rx_max_packets = ec->rx_max_coalesced_frames; 2997 2998 return 0; 2999 } 3000 3001 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) 3002 { 3003 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL 3004 * feature is negotiated. 3005 */ 3006 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) 3007 return -EOPNOTSUPP; 3008 3009 if (ec->tx_max_coalesced_frames > 1 || 3010 ec->rx_max_coalesced_frames != 1) 3011 return -EINVAL; 3012 3013 return 0; 3014 } 3015 3016 static int virtnet_set_coalesce(struct net_device *dev, 3017 struct ethtool_coalesce *ec, 3018 struct kernel_ethtool_coalesce *kernel_coal, 3019 struct netlink_ext_ack *extack) 3020 { 3021 struct virtnet_info *vi = netdev_priv(dev); 3022 int ret, i, napi_weight; 3023 bool update_napi = false; 3024 3025 /* Can't change NAPI weight if the link is up */ 3026 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; 3027 if (napi_weight ^ vi->sq[0].napi.weight) { 3028 if (dev->flags & IFF_UP) 3029 return -EBUSY; 3030 else 3031 update_napi = true; 3032 } 3033 3034 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) 3035 ret = virtnet_send_notf_coal_cmds(vi, ec); 3036 else 3037 ret = virtnet_coal_params_supported(ec); 3038 3039 if (ret) 3040 return ret; 3041 3042 if (update_napi) { 3043 for (i = 0; i < vi->max_queue_pairs; i++) 3044 vi->sq[i].napi.weight = napi_weight; 3045 } 3046 3047 return ret; 3048 } 3049 3050 static int virtnet_get_coalesce(struct net_device *dev, 3051 struct ethtool_coalesce *ec, 3052 struct kernel_ethtool_coalesce *kernel_coal, 3053 struct netlink_ext_ack *extack) 3054 { 3055 struct virtnet_info *vi = netdev_priv(dev); 3056 3057 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { 3058 ec->rx_coalesce_usecs = vi->rx_usecs; 3059 ec->tx_coalesce_usecs = vi->tx_usecs; 3060 ec->tx_max_coalesced_frames = vi->tx_max_packets; 3061 ec->rx_max_coalesced_frames = vi->rx_max_packets; 3062 } else { 3063 ec->rx_max_coalesced_frames = 1; 3064 3065 if (vi->sq[0].napi.weight) 3066 ec->tx_max_coalesced_frames = 1; 3067 } 3068 3069 return 0; 3070 } 3071 3072 static void virtnet_init_settings(struct net_device *dev) 3073 { 3074 struct virtnet_info *vi = netdev_priv(dev); 3075 3076 vi->speed = SPEED_UNKNOWN; 3077 vi->duplex = DUPLEX_UNKNOWN; 3078 } 3079 3080 static void virtnet_update_settings(struct virtnet_info *vi) 3081 { 3082 u32 speed; 3083 u8 duplex; 3084 3085 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) 3086 return; 3087 3088 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); 3089 3090 if (ethtool_validate_speed(speed)) 3091 vi->speed = speed; 3092 3093 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); 3094 3095 if (ethtool_validate_duplex(duplex)) 3096 vi->duplex = duplex; 3097 } 3098 3099 static u32 virtnet_get_rxfh_key_size(struct net_device *dev) 3100 { 3101 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; 3102 } 3103 3104 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev) 3105 { 3106 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; 3107 } 3108 3109 static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 3110 { 3111 struct virtnet_info *vi = netdev_priv(dev); 3112 int i; 3113 3114 if (indir) { 3115 for (i = 0; i < vi->rss_indir_table_size; ++i) 3116 indir[i] = vi->ctrl->rss.indirection_table[i]; 3117 } 3118 3119 if (key) 3120 memcpy(key, vi->ctrl->rss.key, vi->rss_key_size); 3121 3122 if (hfunc) 3123 *hfunc = ETH_RSS_HASH_TOP; 3124 3125 return 0; 3126 } 3127 3128 static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) 3129 { 3130 struct virtnet_info *vi = netdev_priv(dev); 3131 int i; 3132 3133 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 3134 return -EOPNOTSUPP; 3135 3136 if (indir) { 3137 for (i = 0; i < vi->rss_indir_table_size; ++i) 3138 vi->ctrl->rss.indirection_table[i] = indir[i]; 3139 } 3140 if (key) 3141 memcpy(vi->ctrl->rss.key, key, vi->rss_key_size); 3142 3143 virtnet_commit_rss_command(vi); 3144 3145 return 0; 3146 } 3147 3148 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) 3149 { 3150 struct virtnet_info *vi = netdev_priv(dev); 3151 int rc = 0; 3152 3153 switch (info->cmd) { 3154 case ETHTOOL_GRXRINGS: 3155 info->data = vi->curr_queue_pairs; 3156 break; 3157 case ETHTOOL_GRXFH: 3158 virtnet_get_hashflow(vi, info); 3159 break; 3160 default: 3161 rc = -EOPNOTSUPP; 3162 } 3163 3164 return rc; 3165 } 3166 3167 static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 3168 { 3169 struct virtnet_info *vi = netdev_priv(dev); 3170 int rc = 0; 3171 3172 switch (info->cmd) { 3173 case ETHTOOL_SRXFH: 3174 if (!virtnet_set_hashflow(vi, info)) 3175 rc = -EINVAL; 3176 3177 break; 3178 default: 3179 rc = -EOPNOTSUPP; 3180 } 3181 3182 return rc; 3183 } 3184 3185 static const struct ethtool_ops virtnet_ethtool_ops = { 3186 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 3187 ETHTOOL_COALESCE_USECS, 3188 .get_drvinfo = virtnet_get_drvinfo, 3189 .get_link = ethtool_op_get_link, 3190 .get_ringparam = virtnet_get_ringparam, 3191 .set_ringparam = virtnet_set_ringparam, 3192 .get_strings = virtnet_get_strings, 3193 .get_sset_count = virtnet_get_sset_count, 3194 .get_ethtool_stats = virtnet_get_ethtool_stats, 3195 .set_channels = virtnet_set_channels, 3196 .get_channels = virtnet_get_channels, 3197 .get_ts_info = ethtool_op_get_ts_info, 3198 .get_link_ksettings = virtnet_get_link_ksettings, 3199 .set_link_ksettings = virtnet_set_link_ksettings, 3200 .set_coalesce = virtnet_set_coalesce, 3201 .get_coalesce = virtnet_get_coalesce, 3202 .get_rxfh_key_size = virtnet_get_rxfh_key_size, 3203 .get_rxfh_indir_size = virtnet_get_rxfh_indir_size, 3204 .get_rxfh = virtnet_get_rxfh, 3205 .set_rxfh = virtnet_set_rxfh, 3206 .get_rxnfc = virtnet_get_rxnfc, 3207 .set_rxnfc = virtnet_set_rxnfc, 3208 }; 3209 3210 static void virtnet_freeze_down(struct virtio_device *vdev) 3211 { 3212 struct virtnet_info *vi = vdev->priv; 3213 3214 /* Make sure no work handler is accessing the device */ 3215 flush_work(&vi->config_work); 3216 3217 netif_tx_lock_bh(vi->dev); 3218 netif_device_detach(vi->dev); 3219 netif_tx_unlock_bh(vi->dev); 3220 if (netif_running(vi->dev)) 3221 virtnet_close(vi->dev); 3222 } 3223 3224 static int init_vqs(struct virtnet_info *vi); 3225 3226 static int virtnet_restore_up(struct virtio_device *vdev) 3227 { 3228 struct virtnet_info *vi = vdev->priv; 3229 int err; 3230 3231 err = init_vqs(vi); 3232 if (err) 3233 return err; 3234 3235 virtio_device_ready(vdev); 3236 3237 enable_delayed_refill(vi); 3238 3239 if (netif_running(vi->dev)) { 3240 err = virtnet_open(vi->dev); 3241 if (err) 3242 return err; 3243 } 3244 3245 netif_tx_lock_bh(vi->dev); 3246 netif_device_attach(vi->dev); 3247 netif_tx_unlock_bh(vi->dev); 3248 return err; 3249 } 3250 3251 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 3252 { 3253 struct scatterlist sg; 3254 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); 3255 3256 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); 3257 3258 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 3259 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { 3260 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); 3261 return -EINVAL; 3262 } 3263 3264 return 0; 3265 } 3266 3267 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) 3268 { 3269 u64 offloads = 0; 3270 3271 if (!vi->guest_offloads) 3272 return 0; 3273 3274 return virtnet_set_guest_offloads(vi, offloads); 3275 } 3276 3277 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) 3278 { 3279 u64 offloads = vi->guest_offloads; 3280 3281 if (!vi->guest_offloads) 3282 return 0; 3283 3284 return virtnet_set_guest_offloads(vi, offloads); 3285 } 3286 3287 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, 3288 struct netlink_ext_ack *extack) 3289 { 3290 unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + 3291 sizeof(struct skb_shared_info)); 3292 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; 3293 struct virtnet_info *vi = netdev_priv(dev); 3294 struct bpf_prog *old_prog; 3295 u16 xdp_qp = 0, curr_qp; 3296 int i, err; 3297 3298 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) 3299 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 3300 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 3301 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 3302 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 3303 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || 3304 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || 3305 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { 3306 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); 3307 return -EOPNOTSUPP; 3308 } 3309 3310 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 3311 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); 3312 return -EINVAL; 3313 } 3314 3315 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { 3316 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags"); 3317 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); 3318 return -EINVAL; 3319 } 3320 3321 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 3322 if (prog) 3323 xdp_qp = nr_cpu_ids; 3324 3325 /* XDP requires extra queues for XDP_TX */ 3326 if (curr_qp + xdp_qp > vi->max_queue_pairs) { 3327 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", 3328 curr_qp + xdp_qp, vi->max_queue_pairs); 3329 xdp_qp = 0; 3330 } 3331 3332 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); 3333 if (!prog && !old_prog) 3334 return 0; 3335 3336 if (prog) 3337 bpf_prog_add(prog, vi->max_queue_pairs - 1); 3338 3339 /* Make sure NAPI is not using any XDP TX queues for RX. */ 3340 if (netif_running(dev)) { 3341 for (i = 0; i < vi->max_queue_pairs; i++) { 3342 napi_disable(&vi->rq[i].napi); 3343 virtnet_napi_tx_disable(&vi->sq[i].napi); 3344 } 3345 } 3346 3347 if (!prog) { 3348 for (i = 0; i < vi->max_queue_pairs; i++) { 3349 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 3350 if (i == 0) 3351 virtnet_restore_guest_offloads(vi); 3352 } 3353 synchronize_net(); 3354 } 3355 3356 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 3357 if (err) 3358 goto err; 3359 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 3360 vi->xdp_queue_pairs = xdp_qp; 3361 3362 if (prog) { 3363 vi->xdp_enabled = true; 3364 for (i = 0; i < vi->max_queue_pairs; i++) { 3365 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 3366 if (i == 0 && !old_prog) 3367 virtnet_clear_guest_offloads(vi); 3368 } 3369 if (!old_prog) 3370 xdp_features_set_redirect_target(dev, true); 3371 } else { 3372 xdp_features_clear_redirect_target(dev); 3373 vi->xdp_enabled = false; 3374 } 3375 3376 for (i = 0; i < vi->max_queue_pairs; i++) { 3377 if (old_prog) 3378 bpf_prog_put(old_prog); 3379 if (netif_running(dev)) { 3380 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 3381 virtnet_napi_tx_enable(vi, vi->sq[i].vq, 3382 &vi->sq[i].napi); 3383 } 3384 } 3385 3386 return 0; 3387 3388 err: 3389 if (!prog) { 3390 virtnet_clear_guest_offloads(vi); 3391 for (i = 0; i < vi->max_queue_pairs; i++) 3392 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); 3393 } 3394 3395 if (netif_running(dev)) { 3396 for (i = 0; i < vi->max_queue_pairs; i++) { 3397 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 3398 virtnet_napi_tx_enable(vi, vi->sq[i].vq, 3399 &vi->sq[i].napi); 3400 } 3401 } 3402 if (prog) 3403 bpf_prog_sub(prog, vi->max_queue_pairs - 1); 3404 return err; 3405 } 3406 3407 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) 3408 { 3409 switch (xdp->command) { 3410 case XDP_SETUP_PROG: 3411 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); 3412 default: 3413 return -EINVAL; 3414 } 3415 } 3416 3417 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, 3418 size_t len) 3419 { 3420 struct virtnet_info *vi = netdev_priv(dev); 3421 int ret; 3422 3423 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 3424 return -EOPNOTSUPP; 3425 3426 ret = snprintf(buf, len, "sby"); 3427 if (ret >= len) 3428 return -EOPNOTSUPP; 3429 3430 return 0; 3431 } 3432 3433 static int virtnet_set_features(struct net_device *dev, 3434 netdev_features_t features) 3435 { 3436 struct virtnet_info *vi = netdev_priv(dev); 3437 u64 offloads; 3438 int err; 3439 3440 if ((dev->features ^ features) & NETIF_F_GRO_HW) { 3441 if (vi->xdp_enabled) 3442 return -EBUSY; 3443 3444 if (features & NETIF_F_GRO_HW) 3445 offloads = vi->guest_offloads_capable; 3446 else 3447 offloads = vi->guest_offloads_capable & 3448 ~GUEST_OFFLOAD_GRO_HW_MASK; 3449 3450 err = virtnet_set_guest_offloads(vi, offloads); 3451 if (err) 3452 return err; 3453 vi->guest_offloads = offloads; 3454 } 3455 3456 if ((dev->features ^ features) & NETIF_F_RXHASH) { 3457 if (features & NETIF_F_RXHASH) 3458 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; 3459 else 3460 vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; 3461 3462 if (!virtnet_commit_rss_command(vi)) 3463 return -EINVAL; 3464 } 3465 3466 return 0; 3467 } 3468 3469 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) 3470 { 3471 struct virtnet_info *priv = netdev_priv(dev); 3472 struct send_queue *sq = &priv->sq[txqueue]; 3473 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); 3474 3475 u64_stats_update_begin(&sq->stats.syncp); 3476 sq->stats.tx_timeouts++; 3477 u64_stats_update_end(&sq->stats.syncp); 3478 3479 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", 3480 txqueue, sq->name, sq->vq->index, sq->vq->name, 3481 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); 3482 } 3483 3484 static const struct net_device_ops virtnet_netdev = { 3485 .ndo_open = virtnet_open, 3486 .ndo_stop = virtnet_close, 3487 .ndo_start_xmit = start_xmit, 3488 .ndo_validate_addr = eth_validate_addr, 3489 .ndo_set_mac_address = virtnet_set_mac_address, 3490 .ndo_set_rx_mode = virtnet_set_rx_mode, 3491 .ndo_get_stats64 = virtnet_stats, 3492 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 3493 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 3494 .ndo_bpf = virtnet_xdp, 3495 .ndo_xdp_xmit = virtnet_xdp_xmit, 3496 .ndo_features_check = passthru_features_check, 3497 .ndo_get_phys_port_name = virtnet_get_phys_port_name, 3498 .ndo_set_features = virtnet_set_features, 3499 .ndo_tx_timeout = virtnet_tx_timeout, 3500 }; 3501 3502 static void virtnet_config_changed_work(struct work_struct *work) 3503 { 3504 struct virtnet_info *vi = 3505 container_of(work, struct virtnet_info, config_work); 3506 u16 v; 3507 3508 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 3509 struct virtio_net_config, status, &v) < 0) 3510 return; 3511 3512 if (v & VIRTIO_NET_S_ANNOUNCE) { 3513 netdev_notify_peers(vi->dev); 3514 virtnet_ack_link_announce(vi); 3515 } 3516 3517 /* Ignore unknown (future) status bits */ 3518 v &= VIRTIO_NET_S_LINK_UP; 3519 3520 if (vi->status == v) 3521 return; 3522 3523 vi->status = v; 3524 3525 if (vi->status & VIRTIO_NET_S_LINK_UP) { 3526 virtnet_update_settings(vi); 3527 netif_carrier_on(vi->dev); 3528 netif_tx_wake_all_queues(vi->dev); 3529 } else { 3530 netif_carrier_off(vi->dev); 3531 netif_tx_stop_all_queues(vi->dev); 3532 } 3533 } 3534 3535 static void virtnet_config_changed(struct virtio_device *vdev) 3536 { 3537 struct virtnet_info *vi = vdev->priv; 3538 3539 schedule_work(&vi->config_work); 3540 } 3541 3542 static void virtnet_free_queues(struct virtnet_info *vi) 3543 { 3544 int i; 3545 3546 for (i = 0; i < vi->max_queue_pairs; i++) { 3547 __netif_napi_del(&vi->rq[i].napi); 3548 __netif_napi_del(&vi->sq[i].napi); 3549 } 3550 3551 /* We called __netif_napi_del(), 3552 * we need to respect an RCU grace period before freeing vi->rq 3553 */ 3554 synchronize_net(); 3555 3556 kfree(vi->rq); 3557 kfree(vi->sq); 3558 kfree(vi->ctrl); 3559 } 3560 3561 static void _free_receive_bufs(struct virtnet_info *vi) 3562 { 3563 struct bpf_prog *old_prog; 3564 int i; 3565 3566 for (i = 0; i < vi->max_queue_pairs; i++) { 3567 while (vi->rq[i].pages) 3568 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 3569 3570 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 3571 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 3572 if (old_prog) 3573 bpf_prog_put(old_prog); 3574 } 3575 } 3576 3577 static void free_receive_bufs(struct virtnet_info *vi) 3578 { 3579 rtnl_lock(); 3580 _free_receive_bufs(vi); 3581 rtnl_unlock(); 3582 } 3583 3584 static void free_receive_page_frags(struct virtnet_info *vi) 3585 { 3586 int i; 3587 for (i = 0; i < vi->max_queue_pairs; i++) 3588 if (vi->rq[i].alloc_frag.page) 3589 put_page(vi->rq[i].alloc_frag.page); 3590 } 3591 3592 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) 3593 { 3594 if (!is_xdp_frame(buf)) 3595 dev_kfree_skb(buf); 3596 else 3597 xdp_return_frame(ptr_to_xdp(buf)); 3598 } 3599 3600 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf) 3601 { 3602 struct virtnet_info *vi = vq->vdev->priv; 3603 int i = vq2rxq(vq); 3604 3605 if (vi->mergeable_rx_bufs) 3606 put_page(virt_to_head_page(buf)); 3607 else if (vi->big_packets) 3608 give_pages(&vi->rq[i], buf); 3609 else 3610 put_page(virt_to_head_page(buf)); 3611 } 3612 3613 static void free_unused_bufs(struct virtnet_info *vi) 3614 { 3615 void *buf; 3616 int i; 3617 3618 for (i = 0; i < vi->max_queue_pairs; i++) { 3619 struct virtqueue *vq = vi->sq[i].vq; 3620 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 3621 virtnet_sq_free_unused_buf(vq, buf); 3622 cond_resched(); 3623 } 3624 3625 for (i = 0; i < vi->max_queue_pairs; i++) { 3626 struct virtqueue *vq = vi->rq[i].vq; 3627 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 3628 virtnet_rq_free_unused_buf(vq, buf); 3629 cond_resched(); 3630 } 3631 } 3632 3633 static void virtnet_del_vqs(struct virtnet_info *vi) 3634 { 3635 struct virtio_device *vdev = vi->vdev; 3636 3637 virtnet_clean_affinity(vi); 3638 3639 vdev->config->del_vqs(vdev); 3640 3641 virtnet_free_queues(vi); 3642 } 3643 3644 /* How large should a single buffer be so a queue full of these can fit at 3645 * least one full packet? 3646 * Logic below assumes the mergeable buffer header is used. 3647 */ 3648 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) 3649 { 3650 const unsigned int hdr_len = vi->hdr_len; 3651 unsigned int rq_size = virtqueue_get_vring_size(vq); 3652 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; 3653 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 3654 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 3655 3656 return max(max(min_buf_len, hdr_len) - hdr_len, 3657 (unsigned int)GOOD_PACKET_LEN); 3658 } 3659 3660 static int virtnet_find_vqs(struct virtnet_info *vi) 3661 { 3662 vq_callback_t **callbacks; 3663 struct virtqueue **vqs; 3664 int ret = -ENOMEM; 3665 int i, total_vqs; 3666 const char **names; 3667 bool *ctx; 3668 3669 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 3670 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 3671 * possible control vq. 3672 */ 3673 total_vqs = vi->max_queue_pairs * 2 + 3674 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 3675 3676 /* Allocate space for find_vqs parameters */ 3677 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); 3678 if (!vqs) 3679 goto err_vq; 3680 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); 3681 if (!callbacks) 3682 goto err_callback; 3683 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); 3684 if (!names) 3685 goto err_names; 3686 if (!vi->big_packets || vi->mergeable_rx_bufs) { 3687 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); 3688 if (!ctx) 3689 goto err_ctx; 3690 } else { 3691 ctx = NULL; 3692 } 3693 3694 /* Parameters for control virtqueue, if any */ 3695 if (vi->has_cvq) { 3696 callbacks[total_vqs - 1] = NULL; 3697 names[total_vqs - 1] = "control"; 3698 } 3699 3700 /* Allocate/initialize parameters for send/receive virtqueues */ 3701 for (i = 0; i < vi->max_queue_pairs; i++) { 3702 callbacks[rxq2vq(i)] = skb_recv_done; 3703 callbacks[txq2vq(i)] = skb_xmit_done; 3704 sprintf(vi->rq[i].name, "input.%d", i); 3705 sprintf(vi->sq[i].name, "output.%d", i); 3706 names[rxq2vq(i)] = vi->rq[i].name; 3707 names[txq2vq(i)] = vi->sq[i].name; 3708 if (ctx) 3709 ctx[rxq2vq(i)] = true; 3710 } 3711 3712 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks, 3713 names, ctx, NULL); 3714 if (ret) 3715 goto err_find; 3716 3717 if (vi->has_cvq) { 3718 vi->cvq = vqs[total_vqs - 1]; 3719 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 3720 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3721 } 3722 3723 for (i = 0; i < vi->max_queue_pairs; i++) { 3724 vi->rq[i].vq = vqs[rxq2vq(i)]; 3725 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); 3726 vi->sq[i].vq = vqs[txq2vq(i)]; 3727 } 3728 3729 /* run here: ret == 0. */ 3730 3731 3732 err_find: 3733 kfree(ctx); 3734 err_ctx: 3735 kfree(names); 3736 err_names: 3737 kfree(callbacks); 3738 err_callback: 3739 kfree(vqs); 3740 err_vq: 3741 return ret; 3742 } 3743 3744 static int virtnet_alloc_queues(struct virtnet_info *vi) 3745 { 3746 int i; 3747 3748 if (vi->has_cvq) { 3749 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); 3750 if (!vi->ctrl) 3751 goto err_ctrl; 3752 } else { 3753 vi->ctrl = NULL; 3754 } 3755 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); 3756 if (!vi->sq) 3757 goto err_sq; 3758 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); 3759 if (!vi->rq) 3760 goto err_rq; 3761 3762 INIT_DELAYED_WORK(&vi->refill, refill_work); 3763 for (i = 0; i < vi->max_queue_pairs; i++) { 3764 vi->rq[i].pages = NULL; 3765 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, 3766 napi_weight); 3767 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, 3768 virtnet_poll_tx, 3769 napi_tx ? napi_weight : 0); 3770 3771 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 3772 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 3773 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 3774 3775 u64_stats_init(&vi->rq[i].stats.syncp); 3776 u64_stats_init(&vi->sq[i].stats.syncp); 3777 } 3778 3779 return 0; 3780 3781 err_rq: 3782 kfree(vi->sq); 3783 err_sq: 3784 kfree(vi->ctrl); 3785 err_ctrl: 3786 return -ENOMEM; 3787 } 3788 3789 static int init_vqs(struct virtnet_info *vi) 3790 { 3791 int ret; 3792 3793 /* Allocate send & receive queues */ 3794 ret = virtnet_alloc_queues(vi); 3795 if (ret) 3796 goto err; 3797 3798 ret = virtnet_find_vqs(vi); 3799 if (ret) 3800 goto err_free; 3801 3802 cpus_read_lock(); 3803 virtnet_set_affinity(vi); 3804 cpus_read_unlock(); 3805 3806 return 0; 3807 3808 err_free: 3809 virtnet_free_queues(vi); 3810 err: 3811 return ret; 3812 } 3813 3814 #ifdef CONFIG_SYSFS 3815 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 3816 char *buf) 3817 { 3818 struct virtnet_info *vi = netdev_priv(queue->dev); 3819 unsigned int queue_index = get_netdev_rx_queue_index(queue); 3820 unsigned int headroom = virtnet_get_headroom(vi); 3821 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 3822 struct ewma_pkt_len *avg; 3823 3824 BUG_ON(queue_index >= vi->max_queue_pairs); 3825 avg = &vi->rq[queue_index].mrg_avg_pkt_len; 3826 return sprintf(buf, "%u\n", 3827 get_mergeable_buf_len(&vi->rq[queue_index], avg, 3828 SKB_DATA_ALIGN(headroom + tailroom))); 3829 } 3830 3831 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 3832 __ATTR_RO(mergeable_rx_buffer_size); 3833 3834 static struct attribute *virtio_net_mrg_rx_attrs[] = { 3835 &mergeable_rx_buffer_size_attribute.attr, 3836 NULL 3837 }; 3838 3839 static const struct attribute_group virtio_net_mrg_rx_group = { 3840 .name = "virtio_net", 3841 .attrs = virtio_net_mrg_rx_attrs 3842 }; 3843 #endif 3844 3845 static bool virtnet_fail_on_feature(struct virtio_device *vdev, 3846 unsigned int fbit, 3847 const char *fname, const char *dname) 3848 { 3849 if (!virtio_has_feature(vdev, fbit)) 3850 return false; 3851 3852 dev_err(&vdev->dev, "device advertises feature %s but not %s", 3853 fname, dname); 3854 3855 return true; 3856 } 3857 3858 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 3859 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 3860 3861 static bool virtnet_validate_features(struct virtio_device *vdev) 3862 { 3863 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 3864 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 3865 "VIRTIO_NET_F_CTRL_VQ") || 3866 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 3867 "VIRTIO_NET_F_CTRL_VQ") || 3868 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 3869 "VIRTIO_NET_F_CTRL_VQ") || 3870 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 3871 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 3872 "VIRTIO_NET_F_CTRL_VQ") || 3873 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS, 3874 "VIRTIO_NET_F_CTRL_VQ") || 3875 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT, 3876 "VIRTIO_NET_F_CTRL_VQ") || 3877 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL, 3878 "VIRTIO_NET_F_CTRL_VQ"))) { 3879 return false; 3880 } 3881 3882 return true; 3883 } 3884 3885 #define MIN_MTU ETH_MIN_MTU 3886 #define MAX_MTU ETH_MAX_MTU 3887 3888 static int virtnet_validate(struct virtio_device *vdev) 3889 { 3890 if (!vdev->config->get) { 3891 dev_err(&vdev->dev, "%s failure: config access disabled\n", 3892 __func__); 3893 return -EINVAL; 3894 } 3895 3896 if (!virtnet_validate_features(vdev)) 3897 return -EINVAL; 3898 3899 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 3900 int mtu = virtio_cread16(vdev, 3901 offsetof(struct virtio_net_config, 3902 mtu)); 3903 if (mtu < MIN_MTU) 3904 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 3905 } 3906 3907 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) && 3908 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 3909 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby"); 3910 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY); 3911 } 3912 3913 return 0; 3914 } 3915 3916 static bool virtnet_check_guest_gso(const struct virtnet_info *vi) 3917 { 3918 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 3919 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 3920 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 3921 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 3922 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && 3923 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); 3924 } 3925 3926 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) 3927 { 3928 bool guest_gso = virtnet_check_guest_gso(vi); 3929 3930 /* If device can receive ANY guest GSO packets, regardless of mtu, 3931 * allocate packets of maximum size, otherwise limit it to only 3932 * mtu size worth only. 3933 */ 3934 if (mtu > ETH_DATA_LEN || guest_gso) { 3935 vi->big_packets = true; 3936 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); 3937 } 3938 } 3939 3940 static int virtnet_probe(struct virtio_device *vdev) 3941 { 3942 int i, err = -ENOMEM; 3943 struct net_device *dev; 3944 struct virtnet_info *vi; 3945 u16 max_queue_pairs; 3946 int mtu = 0; 3947 3948 /* Find if host supports multiqueue/rss virtio_net device */ 3949 max_queue_pairs = 1; 3950 if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 3951 max_queue_pairs = 3952 virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); 3953 3954 /* We need at least 2 queue's */ 3955 if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 3956 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 3957 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 3958 max_queue_pairs = 1; 3959 3960 /* Allocate ourselves a network device with room for our info */ 3961 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 3962 if (!dev) 3963 return -ENOMEM; 3964 3965 /* Set up network device as normal. */ 3966 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | 3967 IFF_TX_SKB_NO_LINEAR; 3968 dev->netdev_ops = &virtnet_netdev; 3969 dev->features = NETIF_F_HIGHDMA; 3970 3971 dev->ethtool_ops = &virtnet_ethtool_ops; 3972 SET_NETDEV_DEV(dev, &vdev->dev); 3973 3974 /* Do we support "hardware" checksums? */ 3975 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 3976 /* This opens up the world of extra features. */ 3977 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 3978 if (csum) 3979 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 3980 3981 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 3982 dev->hw_features |= NETIF_F_TSO 3983 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 3984 } 3985 /* Individual feature bits: what can host handle? */ 3986 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 3987 dev->hw_features |= NETIF_F_TSO; 3988 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 3989 dev->hw_features |= NETIF_F_TSO6; 3990 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 3991 dev->hw_features |= NETIF_F_TSO_ECN; 3992 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO)) 3993 dev->hw_features |= NETIF_F_GSO_UDP_L4; 3994 3995 dev->features |= NETIF_F_GSO_ROBUST; 3996 3997 if (gso) 3998 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 3999 /* (!csum && gso) case will be fixed by register_netdev() */ 4000 } 4001 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 4002 dev->features |= NETIF_F_RXCSUM; 4003 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 4004 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) 4005 dev->features |= NETIF_F_GRO_HW; 4006 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) 4007 dev->hw_features |= NETIF_F_GRO_HW; 4008 4009 dev->vlan_features = dev->features; 4010 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; 4011 4012 /* MTU range: 68 - 65535 */ 4013 dev->min_mtu = MIN_MTU; 4014 dev->max_mtu = MAX_MTU; 4015 4016 /* Configuration may specify what MAC to use. Otherwise random. */ 4017 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 4018 u8 addr[ETH_ALEN]; 4019 4020 virtio_cread_bytes(vdev, 4021 offsetof(struct virtio_net_config, mac), 4022 addr, ETH_ALEN); 4023 eth_hw_addr_set(dev, addr); 4024 } else { 4025 eth_hw_addr_random(dev); 4026 dev_info(&vdev->dev, "Assigned random MAC address %pM\n", 4027 dev->dev_addr); 4028 } 4029 4030 /* Set up our device-specific information */ 4031 vi = netdev_priv(dev); 4032 vi->dev = dev; 4033 vi->vdev = vdev; 4034 vdev->priv = vi; 4035 4036 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 4037 spin_lock_init(&vi->refill_lock); 4038 4039 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { 4040 vi->mergeable_rx_bufs = true; 4041 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; 4042 } 4043 4044 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { 4045 vi->rx_usecs = 0; 4046 vi->tx_usecs = 0; 4047 vi->tx_max_packets = 0; 4048 vi->rx_max_packets = 0; 4049 } 4050 4051 if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) 4052 vi->has_rss_hash_report = true; 4053 4054 if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 4055 vi->has_rss = true; 4056 4057 if (vi->has_rss || vi->has_rss_hash_report) { 4058 vi->rss_indir_table_size = 4059 virtio_cread16(vdev, offsetof(struct virtio_net_config, 4060 rss_max_indirection_table_length)); 4061 vi->rss_key_size = 4062 virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size)); 4063 4064 vi->rss_hash_types_supported = 4065 virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types)); 4066 vi->rss_hash_types_supported &= 4067 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX | 4068 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | 4069 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX); 4070 4071 dev->hw_features |= NETIF_F_RXHASH; 4072 } 4073 4074 if (vi->has_rss_hash_report) 4075 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); 4076 else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 4077 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 4078 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 4079 else 4080 vi->hdr_len = sizeof(struct virtio_net_hdr); 4081 4082 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 4083 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 4084 vi->any_header_sg = true; 4085 4086 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 4087 vi->has_cvq = true; 4088 4089 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 4090 mtu = virtio_cread16(vdev, 4091 offsetof(struct virtio_net_config, 4092 mtu)); 4093 if (mtu < dev->min_mtu) { 4094 /* Should never trigger: MTU was previously validated 4095 * in virtnet_validate. 4096 */ 4097 dev_err(&vdev->dev, 4098 "device MTU appears to have changed it is now %d < %d", 4099 mtu, dev->min_mtu); 4100 err = -EINVAL; 4101 goto free; 4102 } 4103 4104 dev->mtu = mtu; 4105 dev->max_mtu = mtu; 4106 } 4107 4108 virtnet_set_big_packets(vi, mtu); 4109 4110 if (vi->any_header_sg) 4111 dev->needed_headroom = vi->hdr_len; 4112 4113 /* Enable multiqueue by default */ 4114 if (num_online_cpus() >= max_queue_pairs) 4115 vi->curr_queue_pairs = max_queue_pairs; 4116 else 4117 vi->curr_queue_pairs = num_online_cpus(); 4118 vi->max_queue_pairs = max_queue_pairs; 4119 4120 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 4121 err = init_vqs(vi); 4122 if (err) 4123 goto free; 4124 4125 #ifdef CONFIG_SYSFS 4126 if (vi->mergeable_rx_bufs) 4127 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 4128 #endif 4129 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 4130 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 4131 4132 virtnet_init_settings(dev); 4133 4134 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { 4135 vi->failover = net_failover_create(vi->dev); 4136 if (IS_ERR(vi->failover)) { 4137 err = PTR_ERR(vi->failover); 4138 goto free_vqs; 4139 } 4140 } 4141 4142 if (vi->has_rss || vi->has_rss_hash_report) 4143 virtnet_init_default_rss(vi); 4144 4145 /* serialize netdev register + virtio_device_ready() with ndo_open() */ 4146 rtnl_lock(); 4147 4148 err = register_netdevice(dev); 4149 if (err) { 4150 pr_debug("virtio_net: registering device failed\n"); 4151 rtnl_unlock(); 4152 goto free_failover; 4153 } 4154 4155 virtio_device_ready(vdev); 4156 4157 /* a random MAC address has been assigned, notify the device. 4158 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there 4159 * because many devices work fine without getting MAC explicitly 4160 */ 4161 if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 4162 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 4163 struct scatterlist sg; 4164 4165 sg_init_one(&sg, dev->dev_addr, dev->addr_len); 4166 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 4167 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 4168 pr_debug("virtio_net: setting MAC address failed\n"); 4169 rtnl_unlock(); 4170 err = -EINVAL; 4171 goto free_unregister_netdev; 4172 } 4173 } 4174 4175 rtnl_unlock(); 4176 4177 err = virtnet_cpu_notif_add(vi); 4178 if (err) { 4179 pr_debug("virtio_net: registering cpu notifier failed\n"); 4180 goto free_unregister_netdev; 4181 } 4182 4183 virtnet_set_queues(vi, vi->curr_queue_pairs); 4184 4185 /* Assume link up if device can't report link status, 4186 otherwise get link status from config. */ 4187 netif_carrier_off(dev); 4188 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 4189 schedule_work(&vi->config_work); 4190 } else { 4191 vi->status = VIRTIO_NET_S_LINK_UP; 4192 virtnet_update_settings(vi); 4193 netif_carrier_on(dev); 4194 } 4195 4196 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) 4197 if (virtio_has_feature(vi->vdev, guest_offloads[i])) 4198 set_bit(guest_offloads[i], &vi->guest_offloads); 4199 vi->guest_offloads_capable = vi->guest_offloads; 4200 4201 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 4202 dev->name, max_queue_pairs); 4203 4204 return 0; 4205 4206 free_unregister_netdev: 4207 unregister_netdev(dev); 4208 free_failover: 4209 net_failover_destroy(vi->failover); 4210 free_vqs: 4211 virtio_reset_device(vdev); 4212 cancel_delayed_work_sync(&vi->refill); 4213 free_receive_page_frags(vi); 4214 virtnet_del_vqs(vi); 4215 free: 4216 free_netdev(dev); 4217 return err; 4218 } 4219 4220 static void remove_vq_common(struct virtnet_info *vi) 4221 { 4222 virtio_reset_device(vi->vdev); 4223 4224 /* Free unused buffers in both send and recv, if any. */ 4225 free_unused_bufs(vi); 4226 4227 free_receive_bufs(vi); 4228 4229 free_receive_page_frags(vi); 4230 4231 virtnet_del_vqs(vi); 4232 } 4233 4234 static void virtnet_remove(struct virtio_device *vdev) 4235 { 4236 struct virtnet_info *vi = vdev->priv; 4237 4238 virtnet_cpu_notif_remove(vi); 4239 4240 /* Make sure no work handler is accessing the device. */ 4241 flush_work(&vi->config_work); 4242 4243 unregister_netdev(vi->dev); 4244 4245 net_failover_destroy(vi->failover); 4246 4247 remove_vq_common(vi); 4248 4249 free_netdev(vi->dev); 4250 } 4251 4252 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) 4253 { 4254 struct virtnet_info *vi = vdev->priv; 4255 4256 virtnet_cpu_notif_remove(vi); 4257 virtnet_freeze_down(vdev); 4258 remove_vq_common(vi); 4259 4260 return 0; 4261 } 4262 4263 static __maybe_unused int virtnet_restore(struct virtio_device *vdev) 4264 { 4265 struct virtnet_info *vi = vdev->priv; 4266 int err; 4267 4268 err = virtnet_restore_up(vdev); 4269 if (err) 4270 return err; 4271 virtnet_set_queues(vi, vi->curr_queue_pairs); 4272 4273 err = virtnet_cpu_notif_add(vi); 4274 if (err) { 4275 virtnet_freeze_down(vdev); 4276 remove_vq_common(vi); 4277 return err; 4278 } 4279 4280 return 0; 4281 } 4282 4283 static struct virtio_device_id id_table[] = { 4284 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 4285 { 0 }, 4286 }; 4287 4288 #define VIRTNET_FEATURES \ 4289 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 4290 VIRTIO_NET_F_MAC, \ 4291 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 4292 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 4293 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 4294 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \ 4295 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 4296 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 4297 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 4298 VIRTIO_NET_F_CTRL_MAC_ADDR, \ 4299 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ 4300 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ 4301 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \ 4302 VIRTIO_NET_F_GUEST_HDRLEN 4303 4304 static unsigned int features[] = { 4305 VIRTNET_FEATURES, 4306 }; 4307 4308 static unsigned int features_legacy[] = { 4309 VIRTNET_FEATURES, 4310 VIRTIO_NET_F_GSO, 4311 VIRTIO_F_ANY_LAYOUT, 4312 }; 4313 4314 static struct virtio_driver virtio_net_driver = { 4315 .feature_table = features, 4316 .feature_table_size = ARRAY_SIZE(features), 4317 .feature_table_legacy = features_legacy, 4318 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 4319 .driver.name = KBUILD_MODNAME, 4320 .driver.owner = THIS_MODULE, 4321 .id_table = id_table, 4322 .validate = virtnet_validate, 4323 .probe = virtnet_probe, 4324 .remove = virtnet_remove, 4325 .config_changed = virtnet_config_changed, 4326 #ifdef CONFIG_PM_SLEEP 4327 .freeze = virtnet_freeze, 4328 .restore = virtnet_restore, 4329 #endif 4330 }; 4331 4332 static __init int virtio_net_driver_init(void) 4333 { 4334 int ret; 4335 4336 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 4337 virtnet_cpu_online, 4338 virtnet_cpu_down_prep); 4339 if (ret < 0) 4340 goto out; 4341 virtionet_online = ret; 4342 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 4343 NULL, virtnet_cpu_dead); 4344 if (ret) 4345 goto err_dead; 4346 ret = register_virtio_driver(&virtio_net_driver); 4347 if (ret) 4348 goto err_virtio; 4349 return 0; 4350 err_virtio: 4351 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 4352 err_dead: 4353 cpuhp_remove_multi_state(virtionet_online); 4354 out: 4355 return ret; 4356 } 4357 module_init(virtio_net_driver_init); 4358 4359 static __exit void virtio_net_driver_exit(void) 4360 { 4361 unregister_virtio_driver(&virtio_net_driver); 4362 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 4363 cpuhp_remove_multi_state(virtionet_online); 4364 } 4365 module_exit(virtio_net_driver_exit); 4366 4367 MODULE_DEVICE_TABLE(virtio, id_table); 4368 MODULE_DESCRIPTION("Virtio network driver"); 4369 MODULE_LICENSE("GPL"); 4370