1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* A network driver using virtio. 3 * 4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 5 */ 6 //#define DEBUG 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/ethtool.h> 10 #include <linux/module.h> 11 #include <linux/virtio.h> 12 #include <linux/virtio_net.h> 13 #include <linux/bpf.h> 14 #include <linux/bpf_trace.h> 15 #include <linux/scatterlist.h> 16 #include <linux/if_vlan.h> 17 #include <linux/slab.h> 18 #include <linux/cpu.h> 19 #include <linux/average.h> 20 #include <linux/filter.h> 21 #include <linux/kernel.h> 22 #include <net/route.h> 23 #include <net/xdp.h> 24 #include <net/net_failover.h> 25 26 static int napi_weight = NAPI_POLL_WEIGHT; 27 module_param(napi_weight, int, 0444); 28 29 static bool csum = true, gso = true, napi_tx = true; 30 module_param(csum, bool, 0444); 31 module_param(gso, bool, 0444); 32 module_param(napi_tx, bool, 0644); 33 34 /* FIXME: MTU in config. */ 35 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 36 #define GOOD_COPY_LEN 128 37 38 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 39 40 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ 41 #define VIRTIO_XDP_HEADROOM 256 42 43 /* Separating two types of XDP xmit */ 44 #define VIRTIO_XDP_TX BIT(0) 45 #define VIRTIO_XDP_REDIR BIT(1) 46 47 #define VIRTIO_XDP_FLAG BIT(0) 48 49 /* RX packet size EWMA. The average packet size is used to determine the packet 50 * buffer size when refilling RX rings. As the entire RX ring may be refilled 51 * at once, the weight is chosen so that the EWMA will be insensitive to short- 52 * term, transient changes in packet size. 53 */ 54 DECLARE_EWMA(pkt_len, 0, 64) 55 56 #define VIRTNET_DRIVER_VERSION "1.0.0" 57 58 static const unsigned long guest_offloads[] = { 59 VIRTIO_NET_F_GUEST_TSO4, 60 VIRTIO_NET_F_GUEST_TSO6, 61 VIRTIO_NET_F_GUEST_ECN, 62 VIRTIO_NET_F_GUEST_UFO, 63 VIRTIO_NET_F_GUEST_CSUM, 64 VIRTIO_NET_F_GUEST_USO4, 65 VIRTIO_NET_F_GUEST_USO6, 66 VIRTIO_NET_F_GUEST_HDRLEN 67 }; 68 69 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ 70 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ 71 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ 72 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \ 73 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \ 74 (1ULL << VIRTIO_NET_F_GUEST_USO6)) 75 76 struct virtnet_stat_desc { 77 char desc[ETH_GSTRING_LEN]; 78 size_t offset; 79 }; 80 81 struct virtnet_sq_stats { 82 struct u64_stats_sync syncp; 83 u64 packets; 84 u64 bytes; 85 u64 xdp_tx; 86 u64 xdp_tx_drops; 87 u64 kicks; 88 u64 tx_timeouts; 89 }; 90 91 struct virtnet_rq_stats { 92 struct u64_stats_sync syncp; 93 u64 packets; 94 u64 bytes; 95 u64 drops; 96 u64 xdp_packets; 97 u64 xdp_tx; 98 u64 xdp_redirects; 99 u64 xdp_drops; 100 u64 kicks; 101 }; 102 103 #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) 104 #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) 105 106 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { 107 { "packets", VIRTNET_SQ_STAT(packets) }, 108 { "bytes", VIRTNET_SQ_STAT(bytes) }, 109 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, 110 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, 111 { "kicks", VIRTNET_SQ_STAT(kicks) }, 112 { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, 113 }; 114 115 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { 116 { "packets", VIRTNET_RQ_STAT(packets) }, 117 { "bytes", VIRTNET_RQ_STAT(bytes) }, 118 { "drops", VIRTNET_RQ_STAT(drops) }, 119 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, 120 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, 121 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, 122 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, 123 { "kicks", VIRTNET_RQ_STAT(kicks) }, 124 }; 125 126 #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) 127 #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) 128 129 /* Internal representation of a send virtqueue */ 130 struct send_queue { 131 /* Virtqueue associated with this send _queue */ 132 struct virtqueue *vq; 133 134 /* TX: fragments + linear part + virtio header */ 135 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 136 137 /* Name of the send queue: output.$index */ 138 char name[16]; 139 140 struct virtnet_sq_stats stats; 141 142 struct napi_struct napi; 143 144 /* Record whether sq is in reset state. */ 145 bool reset; 146 }; 147 148 /* Internal representation of a receive virtqueue */ 149 struct receive_queue { 150 /* Virtqueue associated with this receive_queue */ 151 struct virtqueue *vq; 152 153 struct napi_struct napi; 154 155 struct bpf_prog __rcu *xdp_prog; 156 157 struct virtnet_rq_stats stats; 158 159 /* Chain pages by the private ptr. */ 160 struct page *pages; 161 162 /* Average packet length for mergeable receive buffers. */ 163 struct ewma_pkt_len mrg_avg_pkt_len; 164 165 /* Page frag for packet buffer allocation. */ 166 struct page_frag alloc_frag; 167 168 /* RX: fragments + linear part + virtio header */ 169 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 170 171 /* Min single buffer size for mergeable buffers case. */ 172 unsigned int min_buf_len; 173 174 /* Name of this receive queue: input.$index */ 175 char name[16]; 176 177 struct xdp_rxq_info xdp_rxq; 178 }; 179 180 /* This structure can contain rss message with maximum settings for indirection table and keysize 181 * Note, that default structure that describes RSS configuration virtio_net_rss_config 182 * contains same info but can't handle table values. 183 * In any case, structure would be passed to virtio hw through sg_buf split by parts 184 * because table sizes may be differ according to the device configuration. 185 */ 186 #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 187 #define VIRTIO_NET_RSS_MAX_TABLE_LEN 128 188 struct virtio_net_ctrl_rss { 189 u32 hash_types; 190 u16 indirection_table_mask; 191 u16 unclassified_queue; 192 u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN]; 193 u16 max_tx_vq; 194 u8 hash_key_length; 195 u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE]; 196 }; 197 198 /* Control VQ buffers: protected by the rtnl lock */ 199 struct control_buf { 200 struct virtio_net_ctrl_hdr hdr; 201 virtio_net_ctrl_ack status; 202 struct virtio_net_ctrl_mq mq; 203 u8 promisc; 204 u8 allmulti; 205 __virtio16 vid; 206 __virtio64 offloads; 207 struct virtio_net_ctrl_rss rss; 208 struct virtio_net_ctrl_coal_tx coal_tx; 209 struct virtio_net_ctrl_coal_rx coal_rx; 210 }; 211 212 struct virtnet_info { 213 struct virtio_device *vdev; 214 struct virtqueue *cvq; 215 struct net_device *dev; 216 struct send_queue *sq; 217 struct receive_queue *rq; 218 unsigned int status; 219 220 /* Max # of queue pairs supported by the device */ 221 u16 max_queue_pairs; 222 223 /* # of queue pairs currently used by the driver */ 224 u16 curr_queue_pairs; 225 226 /* # of XDP queue pairs currently used by the driver */ 227 u16 xdp_queue_pairs; 228 229 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ 230 bool xdp_enabled; 231 232 /* I like... big packets and I cannot lie! */ 233 bool big_packets; 234 235 /* number of sg entries allocated for big packets */ 236 unsigned int big_packets_num_skbfrags; 237 238 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 239 bool mergeable_rx_bufs; 240 241 /* Host supports rss and/or hash report */ 242 bool has_rss; 243 bool has_rss_hash_report; 244 u8 rss_key_size; 245 u16 rss_indir_table_size; 246 u32 rss_hash_types_supported; 247 u32 rss_hash_types_saved; 248 249 /* Has control virtqueue */ 250 bool has_cvq; 251 252 /* Host can handle any s/g split between our header and packet data */ 253 bool any_header_sg; 254 255 /* Packet virtio header size */ 256 u8 hdr_len; 257 258 /* Work struct for delayed refilling if we run low on memory. */ 259 struct delayed_work refill; 260 261 /* Is delayed refill enabled? */ 262 bool refill_enabled; 263 264 /* The lock to synchronize the access to refill_enabled */ 265 spinlock_t refill_lock; 266 267 /* Work struct for config space updates */ 268 struct work_struct config_work; 269 270 /* Does the affinity hint is set for virtqueues? */ 271 bool affinity_hint_set; 272 273 /* CPU hotplug instances for online & dead */ 274 struct hlist_node node; 275 struct hlist_node node_dead; 276 277 struct control_buf *ctrl; 278 279 /* Ethtool settings */ 280 u8 duplex; 281 u32 speed; 282 283 /* Interrupt coalescing settings */ 284 u32 tx_usecs; 285 u32 rx_usecs; 286 u32 tx_max_packets; 287 u32 rx_max_packets; 288 289 unsigned long guest_offloads; 290 unsigned long guest_offloads_capable; 291 292 /* failover when STANDBY feature enabled */ 293 struct failover *failover; 294 }; 295 296 struct padded_vnet_hdr { 297 struct virtio_net_hdr_v1_hash hdr; 298 /* 299 * hdr is in a separate sg buffer, and data sg buffer shares same page 300 * with this header sg. This padding makes next sg 16 byte aligned 301 * after the header. 302 */ 303 char padding[12]; 304 }; 305 306 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); 307 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); 308 309 static bool is_xdp_frame(void *ptr) 310 { 311 return (unsigned long)ptr & VIRTIO_XDP_FLAG; 312 } 313 314 static void *xdp_to_ptr(struct xdp_frame *ptr) 315 { 316 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); 317 } 318 319 static struct xdp_frame *ptr_to_xdp(void *ptr) 320 { 321 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); 322 } 323 324 /* Converting between virtqueue no. and kernel tx/rx queue no. 325 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 326 */ 327 static int vq2txq(struct virtqueue *vq) 328 { 329 return (vq->index - 1) / 2; 330 } 331 332 static int txq2vq(int txq) 333 { 334 return txq * 2 + 1; 335 } 336 337 static int vq2rxq(struct virtqueue *vq) 338 { 339 return vq->index / 2; 340 } 341 342 static int rxq2vq(int rxq) 343 { 344 return rxq * 2; 345 } 346 347 static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) 348 { 349 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; 350 } 351 352 /* 353 * private is used to chain pages for big packets, put the whole 354 * most recent used list in the beginning for reuse 355 */ 356 static void give_pages(struct receive_queue *rq, struct page *page) 357 { 358 struct page *end; 359 360 /* Find end of list, sew whole thing into vi->rq.pages. */ 361 for (end = page; end->private; end = (struct page *)end->private); 362 end->private = (unsigned long)rq->pages; 363 rq->pages = page; 364 } 365 366 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 367 { 368 struct page *p = rq->pages; 369 370 if (p) { 371 rq->pages = (struct page *)p->private; 372 /* clear private here, it is used to chain pages */ 373 p->private = 0; 374 } else 375 p = alloc_page(gfp_mask); 376 return p; 377 } 378 379 static void enable_delayed_refill(struct virtnet_info *vi) 380 { 381 spin_lock_bh(&vi->refill_lock); 382 vi->refill_enabled = true; 383 spin_unlock_bh(&vi->refill_lock); 384 } 385 386 static void disable_delayed_refill(struct virtnet_info *vi) 387 { 388 spin_lock_bh(&vi->refill_lock); 389 vi->refill_enabled = false; 390 spin_unlock_bh(&vi->refill_lock); 391 } 392 393 static void virtqueue_napi_schedule(struct napi_struct *napi, 394 struct virtqueue *vq) 395 { 396 if (napi_schedule_prep(napi)) { 397 virtqueue_disable_cb(vq); 398 __napi_schedule(napi); 399 } 400 } 401 402 static void virtqueue_napi_complete(struct napi_struct *napi, 403 struct virtqueue *vq, int processed) 404 { 405 int opaque; 406 407 opaque = virtqueue_enable_cb_prepare(vq); 408 if (napi_complete_done(napi, processed)) { 409 if (unlikely(virtqueue_poll(vq, opaque))) 410 virtqueue_napi_schedule(napi, vq); 411 } else { 412 virtqueue_disable_cb(vq); 413 } 414 } 415 416 static void skb_xmit_done(struct virtqueue *vq) 417 { 418 struct virtnet_info *vi = vq->vdev->priv; 419 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; 420 421 /* Suppress further interrupts. */ 422 virtqueue_disable_cb(vq); 423 424 if (napi->weight) 425 virtqueue_napi_schedule(napi, vq); 426 else 427 /* We were probably waiting for more output buffers. */ 428 netif_wake_subqueue(vi->dev, vq2txq(vq)); 429 } 430 431 #define MRG_CTX_HEADER_SHIFT 22 432 static void *mergeable_len_to_ctx(unsigned int truesize, 433 unsigned int headroom) 434 { 435 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); 436 } 437 438 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) 439 { 440 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; 441 } 442 443 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) 444 { 445 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); 446 } 447 448 /* Called from bottom half context */ 449 static struct sk_buff *page_to_skb(struct virtnet_info *vi, 450 struct receive_queue *rq, 451 struct page *page, unsigned int offset, 452 unsigned int len, unsigned int truesize, 453 unsigned int headroom) 454 { 455 struct sk_buff *skb; 456 struct virtio_net_hdr_mrg_rxbuf *hdr; 457 unsigned int copy, hdr_len, hdr_padded_len; 458 struct page *page_to_free = NULL; 459 int tailroom, shinfo_size; 460 char *p, *hdr_p, *buf; 461 462 p = page_address(page) + offset; 463 hdr_p = p; 464 465 hdr_len = vi->hdr_len; 466 if (vi->mergeable_rx_bufs) 467 hdr_padded_len = hdr_len; 468 else 469 hdr_padded_len = sizeof(struct padded_vnet_hdr); 470 471 buf = p - headroom; 472 len -= hdr_len; 473 offset += hdr_padded_len; 474 p += hdr_padded_len; 475 tailroom = truesize - headroom - hdr_padded_len - len; 476 477 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 478 479 /* copy small packet so we can reuse these pages */ 480 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { 481 skb = build_skb(buf, truesize); 482 if (unlikely(!skb)) 483 return NULL; 484 485 skb_reserve(skb, p - buf); 486 skb_put(skb, len); 487 488 page = (struct page *)page->private; 489 if (page) 490 give_pages(rq, page); 491 goto ok; 492 } 493 494 /* copy small packet so we can reuse these pages for small data */ 495 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 496 if (unlikely(!skb)) 497 return NULL; 498 499 /* Copy all frame if it fits skb->head, otherwise 500 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. 501 */ 502 if (len <= skb_tailroom(skb)) 503 copy = len; 504 else 505 copy = ETH_HLEN; 506 skb_put_data(skb, p, copy); 507 508 len -= copy; 509 offset += copy; 510 511 if (vi->mergeable_rx_bufs) { 512 if (len) 513 skb_add_rx_frag(skb, 0, page, offset, len, truesize); 514 else 515 page_to_free = page; 516 goto ok; 517 } 518 519 /* 520 * Verify that we can indeed put this data into a skb. 521 * This is here to handle cases when the device erroneously 522 * tries to receive more than is possible. This is usually 523 * the case of a broken device. 524 */ 525 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 526 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 527 dev_kfree_skb(skb); 528 return NULL; 529 } 530 BUG_ON(offset >= PAGE_SIZE); 531 while (len) { 532 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 533 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 534 frag_size, truesize); 535 len -= frag_size; 536 page = (struct page *)page->private; 537 offset = 0; 538 } 539 540 if (page) 541 give_pages(rq, page); 542 543 ok: 544 hdr = skb_vnet_hdr(skb); 545 memcpy(hdr, hdr_p, hdr_len); 546 if (page_to_free) 547 put_page(page_to_free); 548 549 return skb; 550 } 551 552 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) 553 { 554 unsigned int len; 555 unsigned int packets = 0; 556 unsigned int bytes = 0; 557 void *ptr; 558 559 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 560 if (likely(!is_xdp_frame(ptr))) { 561 struct sk_buff *skb = ptr; 562 563 pr_debug("Sent skb %p\n", skb); 564 565 bytes += skb->len; 566 napi_consume_skb(skb, in_napi); 567 } else { 568 struct xdp_frame *frame = ptr_to_xdp(ptr); 569 570 bytes += xdp_get_frame_len(frame); 571 xdp_return_frame(frame); 572 } 573 packets++; 574 } 575 576 /* Avoid overhead when no packets have been processed 577 * happens when called speculatively from start_xmit. 578 */ 579 if (!packets) 580 return; 581 582 u64_stats_update_begin(&sq->stats.syncp); 583 sq->stats.bytes += bytes; 584 sq->stats.packets += packets; 585 u64_stats_update_end(&sq->stats.syncp); 586 } 587 588 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 589 { 590 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 591 return false; 592 else if (q < vi->curr_queue_pairs) 593 return true; 594 else 595 return false; 596 } 597 598 static void check_sq_full_and_disable(struct virtnet_info *vi, 599 struct net_device *dev, 600 struct send_queue *sq) 601 { 602 bool use_napi = sq->napi.weight; 603 int qnum; 604 605 qnum = sq - vi->sq; 606 607 /* If running out of space, stop queue to avoid getting packets that we 608 * are then unable to transmit. 609 * An alternative would be to force queuing layer to requeue the skb by 610 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 611 * returned in a normal path of operation: it means that driver is not 612 * maintaining the TX queue stop/start state properly, and causes 613 * the stack to do a non-trivial amount of useless work. 614 * Since most packets only take 1 or 2 ring slots, stopping the queue 615 * early means 16 slots are typically wasted. 616 */ 617 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 618 netif_stop_subqueue(dev, qnum); 619 if (use_napi) { 620 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) 621 virtqueue_napi_schedule(&sq->napi, sq->vq); 622 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 623 /* More just got used, free them then recheck. */ 624 free_old_xmit_skbs(sq, false); 625 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 626 netif_start_subqueue(dev, qnum); 627 virtqueue_disable_cb(sq->vq); 628 } 629 } 630 } 631 } 632 633 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, 634 struct send_queue *sq, 635 struct xdp_frame *xdpf) 636 { 637 struct virtio_net_hdr_mrg_rxbuf *hdr; 638 struct skb_shared_info *shinfo; 639 u8 nr_frags = 0; 640 int err, i; 641 642 if (unlikely(xdpf->headroom < vi->hdr_len)) 643 return -EOVERFLOW; 644 645 if (unlikely(xdp_frame_has_frags(xdpf))) { 646 shinfo = xdp_get_shared_info_from_frame(xdpf); 647 nr_frags = shinfo->nr_frags; 648 } 649 650 /* In wrapping function virtnet_xdp_xmit(), we need to free 651 * up the pending old buffers, where we need to calculate the 652 * position of skb_shared_info in xdp_get_frame_len() and 653 * xdp_return_frame(), which will involve to xdpf->data and 654 * xdpf->headroom. Therefore, we need to update the value of 655 * headroom synchronously here. 656 */ 657 xdpf->headroom -= vi->hdr_len; 658 xdpf->data -= vi->hdr_len; 659 /* Zero header and leave csum up to XDP layers */ 660 hdr = xdpf->data; 661 memset(hdr, 0, vi->hdr_len); 662 xdpf->len += vi->hdr_len; 663 664 sg_init_table(sq->sg, nr_frags + 1); 665 sg_set_buf(sq->sg, xdpf->data, xdpf->len); 666 for (i = 0; i < nr_frags; i++) { 667 skb_frag_t *frag = &shinfo->frags[i]; 668 669 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), 670 skb_frag_size(frag), skb_frag_off(frag)); 671 } 672 673 err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1, 674 xdp_to_ptr(xdpf), GFP_ATOMIC); 675 if (unlikely(err)) 676 return -ENOSPC; /* Caller handle free/refcnt */ 677 678 return 0; 679 } 680 681 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on 682 * the current cpu, so it does not need to be locked. 683 * 684 * Here we use marco instead of inline functions because we have to deal with 685 * three issues at the same time: 1. the choice of sq. 2. judge and execute the 686 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline 687 * functions to perfectly solve these three problems at the same time. 688 */ 689 #define virtnet_xdp_get_sq(vi) ({ \ 690 int cpu = smp_processor_id(); \ 691 struct netdev_queue *txq; \ 692 typeof(vi) v = (vi); \ 693 unsigned int qp; \ 694 \ 695 if (v->curr_queue_pairs > nr_cpu_ids) { \ 696 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ 697 qp += cpu; \ 698 txq = netdev_get_tx_queue(v->dev, qp); \ 699 __netif_tx_acquire(txq); \ 700 } else { \ 701 qp = cpu % v->curr_queue_pairs; \ 702 txq = netdev_get_tx_queue(v->dev, qp); \ 703 __netif_tx_lock(txq, cpu); \ 704 } \ 705 v->sq + qp; \ 706 }) 707 708 #define virtnet_xdp_put_sq(vi, q) { \ 709 struct netdev_queue *txq; \ 710 typeof(vi) v = (vi); \ 711 \ 712 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ 713 if (v->curr_queue_pairs > nr_cpu_ids) \ 714 __netif_tx_release(txq); \ 715 else \ 716 __netif_tx_unlock(txq); \ 717 } 718 719 static int virtnet_xdp_xmit(struct net_device *dev, 720 int n, struct xdp_frame **frames, u32 flags) 721 { 722 struct virtnet_info *vi = netdev_priv(dev); 723 struct receive_queue *rq = vi->rq; 724 struct bpf_prog *xdp_prog; 725 struct send_queue *sq; 726 unsigned int len; 727 int packets = 0; 728 int bytes = 0; 729 int nxmit = 0; 730 int kicks = 0; 731 void *ptr; 732 int ret; 733 int i; 734 735 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 736 * indicate XDP resources have been successfully allocated. 737 */ 738 xdp_prog = rcu_access_pointer(rq->xdp_prog); 739 if (!xdp_prog) 740 return -ENXIO; 741 742 sq = virtnet_xdp_get_sq(vi); 743 744 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { 745 ret = -EINVAL; 746 goto out; 747 } 748 749 /* Free up any pending old buffers before queueing new ones. */ 750 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 751 if (likely(is_xdp_frame(ptr))) { 752 struct xdp_frame *frame = ptr_to_xdp(ptr); 753 754 bytes += xdp_get_frame_len(frame); 755 xdp_return_frame(frame); 756 } else { 757 struct sk_buff *skb = ptr; 758 759 bytes += skb->len; 760 napi_consume_skb(skb, false); 761 } 762 packets++; 763 } 764 765 for (i = 0; i < n; i++) { 766 struct xdp_frame *xdpf = frames[i]; 767 768 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) 769 break; 770 nxmit++; 771 } 772 ret = nxmit; 773 774 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) 775 check_sq_full_and_disable(vi, dev, sq); 776 777 if (flags & XDP_XMIT_FLUSH) { 778 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) 779 kicks = 1; 780 } 781 out: 782 u64_stats_update_begin(&sq->stats.syncp); 783 sq->stats.bytes += bytes; 784 sq->stats.packets += packets; 785 sq->stats.xdp_tx += n; 786 sq->stats.xdp_tx_drops += n - nxmit; 787 sq->stats.kicks += kicks; 788 u64_stats_update_end(&sq->stats.syncp); 789 790 virtnet_xdp_put_sq(vi, sq); 791 return ret; 792 } 793 794 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) 795 { 796 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; 797 } 798 799 /* We copy the packet for XDP in the following cases: 800 * 801 * 1) Packet is scattered across multiple rx buffers. 802 * 2) Headroom space is insufficient. 803 * 804 * This is inefficient but it's a temporary condition that 805 * we hit right after XDP is enabled and until queue is refilled 806 * with large buffers with sufficient headroom - so it should affect 807 * at most queue size packets. 808 * Afterwards, the conditions to enable 809 * XDP should preclude the underlying device from sending packets 810 * across multiple buffers (num_buf > 1), and we make sure buffers 811 * have enough headroom. 812 */ 813 static struct page *xdp_linearize_page(struct receive_queue *rq, 814 int *num_buf, 815 struct page *p, 816 int offset, 817 int page_off, 818 unsigned int *len) 819 { 820 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 821 struct page *page; 822 823 if (page_off + *len + tailroom > PAGE_SIZE) 824 return NULL; 825 826 page = alloc_page(GFP_ATOMIC); 827 if (!page) 828 return NULL; 829 830 memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 831 page_off += *len; 832 833 while (--*num_buf) { 834 unsigned int buflen; 835 void *buf; 836 int off; 837 838 buf = virtqueue_get_buf(rq->vq, &buflen); 839 if (unlikely(!buf)) 840 goto err_buf; 841 842 p = virt_to_head_page(buf); 843 off = buf - page_address(p); 844 845 /* guard against a misconfigured or uncooperative backend that 846 * is sending packet larger than the MTU. 847 */ 848 if ((page_off + buflen + tailroom) > PAGE_SIZE) { 849 put_page(p); 850 goto err_buf; 851 } 852 853 memcpy(page_address(page) + page_off, 854 page_address(p) + off, buflen); 855 page_off += buflen; 856 put_page(p); 857 } 858 859 /* Headroom does not contribute to packet length */ 860 *len = page_off - VIRTIO_XDP_HEADROOM; 861 return page; 862 err_buf: 863 __free_pages(page, 0); 864 return NULL; 865 } 866 867 static struct sk_buff *receive_small(struct net_device *dev, 868 struct virtnet_info *vi, 869 struct receive_queue *rq, 870 void *buf, void *ctx, 871 unsigned int len, 872 unsigned int *xdp_xmit, 873 struct virtnet_rq_stats *stats) 874 { 875 struct sk_buff *skb; 876 struct bpf_prog *xdp_prog; 877 unsigned int xdp_headroom = (unsigned long)ctx; 878 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; 879 unsigned int headroom = vi->hdr_len + header_offset; 880 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 881 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 882 struct page *page = virt_to_head_page(buf); 883 unsigned int delta = 0; 884 struct page *xdp_page; 885 int err; 886 unsigned int metasize = 0; 887 888 len -= vi->hdr_len; 889 stats->bytes += len; 890 891 if (unlikely(len > GOOD_PACKET_LEN)) { 892 pr_debug("%s: rx error: len %u exceeds max size %d\n", 893 dev->name, len, GOOD_PACKET_LEN); 894 dev->stats.rx_length_errors++; 895 goto err; 896 } 897 898 if (likely(!vi->xdp_enabled)) { 899 xdp_prog = NULL; 900 goto skip_xdp; 901 } 902 903 rcu_read_lock(); 904 xdp_prog = rcu_dereference(rq->xdp_prog); 905 if (xdp_prog) { 906 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; 907 struct xdp_frame *xdpf; 908 struct xdp_buff xdp; 909 void *orig_data; 910 u32 act; 911 912 if (unlikely(hdr->hdr.gso_type)) 913 goto err_xdp; 914 915 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { 916 int offset = buf - page_address(page) + header_offset; 917 unsigned int tlen = len + vi->hdr_len; 918 int num_buf = 1; 919 920 xdp_headroom = virtnet_get_headroom(vi); 921 header_offset = VIRTNET_RX_PAD + xdp_headroom; 922 headroom = vi->hdr_len + header_offset; 923 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 924 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 925 xdp_page = xdp_linearize_page(rq, &num_buf, page, 926 offset, header_offset, 927 &tlen); 928 if (!xdp_page) 929 goto err_xdp; 930 931 buf = page_address(xdp_page); 932 put_page(page); 933 page = xdp_page; 934 } 935 936 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); 937 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, 938 xdp_headroom, len, true); 939 orig_data = xdp.data; 940 act = bpf_prog_run_xdp(xdp_prog, &xdp); 941 stats->xdp_packets++; 942 943 switch (act) { 944 case XDP_PASS: 945 /* Recalculate length in case bpf program changed it */ 946 delta = orig_data - xdp.data; 947 len = xdp.data_end - xdp.data; 948 metasize = xdp.data - xdp.data_meta; 949 break; 950 case XDP_TX: 951 stats->xdp_tx++; 952 xdpf = xdp_convert_buff_to_frame(&xdp); 953 if (unlikely(!xdpf)) 954 goto err_xdp; 955 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); 956 if (unlikely(!err)) { 957 xdp_return_frame_rx_napi(xdpf); 958 } else if (unlikely(err < 0)) { 959 trace_xdp_exception(vi->dev, xdp_prog, act); 960 goto err_xdp; 961 } 962 *xdp_xmit |= VIRTIO_XDP_TX; 963 rcu_read_unlock(); 964 goto xdp_xmit; 965 case XDP_REDIRECT: 966 stats->xdp_redirects++; 967 err = xdp_do_redirect(dev, &xdp, xdp_prog); 968 if (err) 969 goto err_xdp; 970 *xdp_xmit |= VIRTIO_XDP_REDIR; 971 rcu_read_unlock(); 972 goto xdp_xmit; 973 default: 974 bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); 975 fallthrough; 976 case XDP_ABORTED: 977 trace_xdp_exception(vi->dev, xdp_prog, act); 978 goto err_xdp; 979 case XDP_DROP: 980 goto err_xdp; 981 } 982 } 983 rcu_read_unlock(); 984 985 skip_xdp: 986 skb = build_skb(buf, buflen); 987 if (!skb) 988 goto err; 989 skb_reserve(skb, headroom - delta); 990 skb_put(skb, len); 991 if (!xdp_prog) { 992 buf += header_offset; 993 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); 994 } /* keep zeroed vnet hdr since XDP is loaded */ 995 996 if (metasize) 997 skb_metadata_set(skb, metasize); 998 999 return skb; 1000 1001 err_xdp: 1002 rcu_read_unlock(); 1003 stats->xdp_drops++; 1004 err: 1005 stats->drops++; 1006 put_page(page); 1007 xdp_xmit: 1008 return NULL; 1009 } 1010 1011 static struct sk_buff *receive_big(struct net_device *dev, 1012 struct virtnet_info *vi, 1013 struct receive_queue *rq, 1014 void *buf, 1015 unsigned int len, 1016 struct virtnet_rq_stats *stats) 1017 { 1018 struct page *page = buf; 1019 struct sk_buff *skb = 1020 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); 1021 1022 stats->bytes += len - vi->hdr_len; 1023 if (unlikely(!skb)) 1024 goto err; 1025 1026 return skb; 1027 1028 err: 1029 stats->drops++; 1030 give_pages(rq, page); 1031 return NULL; 1032 } 1033 1034 /* Why not use xdp_build_skb_from_frame() ? 1035 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in 1036 * virtio-net there are 2 points that do not match its requirements: 1037 * 1. The size of the prefilled buffer is not fixed before xdp is set. 1038 * 2. xdp_build_skb_from_frame() does more checks that we don't need, 1039 * like eth_type_trans() (which virtio-net does in receive_buf()). 1040 */ 1041 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev, 1042 struct virtnet_info *vi, 1043 struct xdp_buff *xdp, 1044 unsigned int xdp_frags_truesz) 1045 { 1046 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 1047 unsigned int headroom, data_len; 1048 struct sk_buff *skb; 1049 int metasize; 1050 u8 nr_frags; 1051 1052 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { 1053 pr_debug("Error building skb as missing reserved tailroom for xdp"); 1054 return NULL; 1055 } 1056 1057 if (unlikely(xdp_buff_has_frags(xdp))) 1058 nr_frags = sinfo->nr_frags; 1059 1060 skb = build_skb(xdp->data_hard_start, xdp->frame_sz); 1061 if (unlikely(!skb)) 1062 return NULL; 1063 1064 headroom = xdp->data - xdp->data_hard_start; 1065 data_len = xdp->data_end - xdp->data; 1066 skb_reserve(skb, headroom); 1067 __skb_put(skb, data_len); 1068 1069 metasize = xdp->data - xdp->data_meta; 1070 metasize = metasize > 0 ? metasize : 0; 1071 if (metasize) 1072 skb_metadata_set(skb, metasize); 1073 1074 if (unlikely(xdp_buff_has_frags(xdp))) 1075 xdp_update_skb_shared_info(skb, nr_frags, 1076 sinfo->xdp_frags_size, 1077 xdp_frags_truesz, 1078 xdp_buff_is_frag_pfmemalloc(xdp)); 1079 1080 return skb; 1081 } 1082 1083 /* TODO: build xdp in big mode */ 1084 static int virtnet_build_xdp_buff_mrg(struct net_device *dev, 1085 struct virtnet_info *vi, 1086 struct receive_queue *rq, 1087 struct xdp_buff *xdp, 1088 void *buf, 1089 unsigned int len, 1090 unsigned int frame_sz, 1091 int *num_buf, 1092 unsigned int *xdp_frags_truesize, 1093 struct virtnet_rq_stats *stats) 1094 { 1095 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1096 unsigned int headroom, tailroom, room; 1097 unsigned int truesize, cur_frag_size; 1098 struct skb_shared_info *shinfo; 1099 unsigned int xdp_frags_truesz = 0; 1100 struct page *page; 1101 skb_frag_t *frag; 1102 int offset; 1103 void *ctx; 1104 1105 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); 1106 xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM, 1107 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); 1108 1109 if (!*num_buf) 1110 return 0; 1111 1112 if (*num_buf > 1) { 1113 /* If we want to build multi-buffer xdp, we need 1114 * to specify that the flags of xdp_buff have the 1115 * XDP_FLAGS_HAS_FRAG bit. 1116 */ 1117 if (!xdp_buff_has_frags(xdp)) 1118 xdp_buff_set_frags_flag(xdp); 1119 1120 shinfo = xdp_get_shared_info_from_buff(xdp); 1121 shinfo->nr_frags = 0; 1122 shinfo->xdp_frags_size = 0; 1123 } 1124 1125 if (*num_buf > MAX_SKB_FRAGS + 1) 1126 return -EINVAL; 1127 1128 while (--*num_buf > 0) { 1129 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); 1130 if (unlikely(!buf)) { 1131 pr_debug("%s: rx error: %d buffers out of %d missing\n", 1132 dev->name, *num_buf, 1133 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); 1134 dev->stats.rx_length_errors++; 1135 return -EINVAL; 1136 } 1137 1138 stats->bytes += len; 1139 page = virt_to_head_page(buf); 1140 offset = buf - page_address(page); 1141 1142 truesize = mergeable_ctx_to_truesize(ctx); 1143 headroom = mergeable_ctx_to_headroom(ctx); 1144 tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1145 room = SKB_DATA_ALIGN(headroom + tailroom); 1146 1147 cur_frag_size = truesize; 1148 xdp_frags_truesz += cur_frag_size; 1149 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) { 1150 put_page(page); 1151 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1152 dev->name, len, (unsigned long)(truesize - room)); 1153 dev->stats.rx_length_errors++; 1154 return -EINVAL; 1155 } 1156 1157 frag = &shinfo->frags[shinfo->nr_frags++]; 1158 __skb_frag_set_page(frag, page); 1159 skb_frag_off_set(frag, offset); 1160 skb_frag_size_set(frag, len); 1161 if (page_is_pfmemalloc(page)) 1162 xdp_buff_set_frag_pfmemalloc(xdp); 1163 1164 shinfo->xdp_frags_size += len; 1165 } 1166 1167 *xdp_frags_truesize = xdp_frags_truesz; 1168 return 0; 1169 } 1170 1171 static struct sk_buff *receive_mergeable(struct net_device *dev, 1172 struct virtnet_info *vi, 1173 struct receive_queue *rq, 1174 void *buf, 1175 void *ctx, 1176 unsigned int len, 1177 unsigned int *xdp_xmit, 1178 struct virtnet_rq_stats *stats) 1179 { 1180 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1181 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 1182 struct page *page = virt_to_head_page(buf); 1183 int offset = buf - page_address(page); 1184 struct sk_buff *head_skb, *curr_skb; 1185 struct bpf_prog *xdp_prog; 1186 unsigned int truesize = mergeable_ctx_to_truesize(ctx); 1187 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 1188 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1189 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 1190 unsigned int frame_sz, xdp_room; 1191 int err; 1192 1193 head_skb = NULL; 1194 stats->bytes += len - vi->hdr_len; 1195 1196 if (unlikely(len > truesize - room)) { 1197 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1198 dev->name, len, (unsigned long)(truesize - room)); 1199 dev->stats.rx_length_errors++; 1200 goto err_skb; 1201 } 1202 1203 if (likely(!vi->xdp_enabled)) { 1204 xdp_prog = NULL; 1205 goto skip_xdp; 1206 } 1207 1208 rcu_read_lock(); 1209 xdp_prog = rcu_dereference(rq->xdp_prog); 1210 if (xdp_prog) { 1211 unsigned int xdp_frags_truesz = 0; 1212 struct skb_shared_info *shinfo; 1213 struct xdp_frame *xdpf; 1214 struct page *xdp_page; 1215 struct xdp_buff xdp; 1216 void *data; 1217 u32 act; 1218 int i; 1219 1220 /* Transient failure which in theory could occur if 1221 * in-flight packets from before XDP was enabled reach 1222 * the receive path after XDP is loaded. 1223 */ 1224 if (unlikely(hdr->hdr.gso_type)) 1225 goto err_xdp; 1226 1227 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers 1228 * with headroom may add hole in truesize, which 1229 * make their length exceed PAGE_SIZE. So we disabled the 1230 * hole mechanism for xdp. See add_recvbuf_mergeable(). 1231 */ 1232 frame_sz = truesize; 1233 1234 /* This happens when headroom is not enough because 1235 * of the buffer was prefilled before XDP is set. 1236 * This should only happen for the first several packets. 1237 * In fact, vq reset can be used here to help us clean up 1238 * the prefilled buffers, but many existing devices do not 1239 * support it, and we don't want to bother users who are 1240 * using xdp normally. 1241 */ 1242 if (!xdp_prog->aux->xdp_has_frags && 1243 (num_buf > 1 || headroom < virtnet_get_headroom(vi))) { 1244 /* linearize data for XDP */ 1245 xdp_page = xdp_linearize_page(rq, &num_buf, 1246 page, offset, 1247 VIRTIO_XDP_HEADROOM, 1248 &len); 1249 frame_sz = PAGE_SIZE; 1250 1251 if (!xdp_page) 1252 goto err_xdp; 1253 offset = VIRTIO_XDP_HEADROOM; 1254 } else if (unlikely(headroom < virtnet_get_headroom(vi))) { 1255 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + 1256 sizeof(struct skb_shared_info)); 1257 if (len + xdp_room > PAGE_SIZE) 1258 goto err_xdp; 1259 1260 xdp_page = alloc_page(GFP_ATOMIC); 1261 if (!xdp_page) 1262 goto err_xdp; 1263 1264 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, 1265 page_address(page) + offset, len); 1266 frame_sz = PAGE_SIZE; 1267 offset = VIRTIO_XDP_HEADROOM; 1268 } else { 1269 xdp_page = page; 1270 } 1271 1272 data = page_address(xdp_page) + offset; 1273 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, 1274 &num_buf, &xdp_frags_truesz, stats); 1275 if (unlikely(err)) 1276 goto err_xdp_frags; 1277 1278 act = bpf_prog_run_xdp(xdp_prog, &xdp); 1279 stats->xdp_packets++; 1280 1281 switch (act) { 1282 case XDP_PASS: 1283 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); 1284 if (unlikely(!head_skb)) 1285 goto err_xdp_frags; 1286 1287 if (unlikely(xdp_page != page)) 1288 put_page(page); 1289 rcu_read_unlock(); 1290 return head_skb; 1291 case XDP_TX: 1292 stats->xdp_tx++; 1293 xdpf = xdp_convert_buff_to_frame(&xdp); 1294 if (unlikely(!xdpf)) { 1295 netdev_dbg(dev, "convert buff to frame failed for xdp\n"); 1296 goto err_xdp_frags; 1297 } 1298 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); 1299 if (unlikely(!err)) { 1300 xdp_return_frame_rx_napi(xdpf); 1301 } else if (unlikely(err < 0)) { 1302 trace_xdp_exception(vi->dev, xdp_prog, act); 1303 goto err_xdp_frags; 1304 } 1305 *xdp_xmit |= VIRTIO_XDP_TX; 1306 if (unlikely(xdp_page != page)) 1307 put_page(page); 1308 rcu_read_unlock(); 1309 goto xdp_xmit; 1310 case XDP_REDIRECT: 1311 stats->xdp_redirects++; 1312 err = xdp_do_redirect(dev, &xdp, xdp_prog); 1313 if (err) 1314 goto err_xdp_frags; 1315 *xdp_xmit |= VIRTIO_XDP_REDIR; 1316 if (unlikely(xdp_page != page)) 1317 put_page(page); 1318 rcu_read_unlock(); 1319 goto xdp_xmit; 1320 default: 1321 bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); 1322 fallthrough; 1323 case XDP_ABORTED: 1324 trace_xdp_exception(vi->dev, xdp_prog, act); 1325 fallthrough; 1326 case XDP_DROP: 1327 goto err_xdp_frags; 1328 } 1329 err_xdp_frags: 1330 if (unlikely(xdp_page != page)) 1331 __free_pages(xdp_page, 0); 1332 1333 if (xdp_buff_has_frags(&xdp)) { 1334 shinfo = xdp_get_shared_info_from_buff(&xdp); 1335 for (i = 0; i < shinfo->nr_frags; i++) { 1336 xdp_page = skb_frag_page(&shinfo->frags[i]); 1337 put_page(xdp_page); 1338 } 1339 } 1340 1341 goto err_xdp; 1342 } 1343 rcu_read_unlock(); 1344 1345 skip_xdp: 1346 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); 1347 curr_skb = head_skb; 1348 1349 if (unlikely(!curr_skb)) 1350 goto err_skb; 1351 while (--num_buf) { 1352 int num_skb_frags; 1353 1354 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); 1355 if (unlikely(!buf)) { 1356 pr_debug("%s: rx error: %d buffers out of %d missing\n", 1357 dev->name, num_buf, 1358 virtio16_to_cpu(vi->vdev, 1359 hdr->num_buffers)); 1360 dev->stats.rx_length_errors++; 1361 goto err_buf; 1362 } 1363 1364 stats->bytes += len; 1365 page = virt_to_head_page(buf); 1366 1367 truesize = mergeable_ctx_to_truesize(ctx); 1368 headroom = mergeable_ctx_to_headroom(ctx); 1369 tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1370 room = SKB_DATA_ALIGN(headroom + tailroom); 1371 if (unlikely(len > truesize - room)) { 1372 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1373 dev->name, len, (unsigned long)(truesize - room)); 1374 dev->stats.rx_length_errors++; 1375 goto err_skb; 1376 } 1377 1378 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 1379 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 1380 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 1381 1382 if (unlikely(!nskb)) 1383 goto err_skb; 1384 if (curr_skb == head_skb) 1385 skb_shinfo(curr_skb)->frag_list = nskb; 1386 else 1387 curr_skb->next = nskb; 1388 curr_skb = nskb; 1389 head_skb->truesize += nskb->truesize; 1390 num_skb_frags = 0; 1391 } 1392 if (curr_skb != head_skb) { 1393 head_skb->data_len += len; 1394 head_skb->len += len; 1395 head_skb->truesize += truesize; 1396 } 1397 offset = buf - page_address(page); 1398 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 1399 put_page(page); 1400 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 1401 len, truesize); 1402 } else { 1403 skb_add_rx_frag(curr_skb, num_skb_frags, page, 1404 offset, len, truesize); 1405 } 1406 } 1407 1408 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 1409 return head_skb; 1410 1411 err_xdp: 1412 rcu_read_unlock(); 1413 stats->xdp_drops++; 1414 err_skb: 1415 put_page(page); 1416 while (num_buf-- > 1) { 1417 buf = virtqueue_get_buf(rq->vq, &len); 1418 if (unlikely(!buf)) { 1419 pr_debug("%s: rx error: %d buffers missing\n", 1420 dev->name, num_buf); 1421 dev->stats.rx_length_errors++; 1422 break; 1423 } 1424 stats->bytes += len; 1425 page = virt_to_head_page(buf); 1426 put_page(page); 1427 } 1428 err_buf: 1429 stats->drops++; 1430 dev_kfree_skb(head_skb); 1431 xdp_xmit: 1432 return NULL; 1433 } 1434 1435 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, 1436 struct sk_buff *skb) 1437 { 1438 enum pkt_hash_types rss_hash_type; 1439 1440 if (!hdr_hash || !skb) 1441 return; 1442 1443 switch (__le16_to_cpu(hdr_hash->hash_report)) { 1444 case VIRTIO_NET_HASH_REPORT_TCPv4: 1445 case VIRTIO_NET_HASH_REPORT_UDPv4: 1446 case VIRTIO_NET_HASH_REPORT_TCPv6: 1447 case VIRTIO_NET_HASH_REPORT_UDPv6: 1448 case VIRTIO_NET_HASH_REPORT_TCPv6_EX: 1449 case VIRTIO_NET_HASH_REPORT_UDPv6_EX: 1450 rss_hash_type = PKT_HASH_TYPE_L4; 1451 break; 1452 case VIRTIO_NET_HASH_REPORT_IPv4: 1453 case VIRTIO_NET_HASH_REPORT_IPv6: 1454 case VIRTIO_NET_HASH_REPORT_IPv6_EX: 1455 rss_hash_type = PKT_HASH_TYPE_L3; 1456 break; 1457 case VIRTIO_NET_HASH_REPORT_NONE: 1458 default: 1459 rss_hash_type = PKT_HASH_TYPE_NONE; 1460 } 1461 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); 1462 } 1463 1464 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 1465 void *buf, unsigned int len, void **ctx, 1466 unsigned int *xdp_xmit, 1467 struct virtnet_rq_stats *stats) 1468 { 1469 struct net_device *dev = vi->dev; 1470 struct sk_buff *skb; 1471 struct virtio_net_hdr_mrg_rxbuf *hdr; 1472 1473 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 1474 pr_debug("%s: short packet %i\n", dev->name, len); 1475 dev->stats.rx_length_errors++; 1476 virtnet_rq_free_unused_buf(rq->vq, buf); 1477 return; 1478 } 1479 1480 if (vi->mergeable_rx_bufs) 1481 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, 1482 stats); 1483 else if (vi->big_packets) 1484 skb = receive_big(dev, vi, rq, buf, len, stats); 1485 else 1486 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); 1487 1488 if (unlikely(!skb)) 1489 return; 1490 1491 hdr = skb_vnet_hdr(skb); 1492 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) 1493 virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb); 1494 1495 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 1496 skb->ip_summed = CHECKSUM_UNNECESSARY; 1497 1498 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 1499 virtio_is_little_endian(vi->vdev))) { 1500 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 1501 dev->name, hdr->hdr.gso_type, 1502 hdr->hdr.gso_size); 1503 goto frame_err; 1504 } 1505 1506 skb_record_rx_queue(skb, vq2rxq(rq->vq)); 1507 skb->protocol = eth_type_trans(skb, dev); 1508 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 1509 ntohs(skb->protocol), skb->len, skb->pkt_type); 1510 1511 napi_gro_receive(&rq->napi, skb); 1512 return; 1513 1514 frame_err: 1515 dev->stats.rx_frame_errors++; 1516 dev_kfree_skb(skb); 1517 } 1518 1519 /* Unlike mergeable buffers, all buffers are allocated to the 1520 * same size, except for the headroom. For this reason we do 1521 * not need to use mergeable_len_to_ctx here - it is enough 1522 * to store the headroom as the context ignoring the truesize. 1523 */ 1524 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 1525 gfp_t gfp) 1526 { 1527 struct page_frag *alloc_frag = &rq->alloc_frag; 1528 char *buf; 1529 unsigned int xdp_headroom = virtnet_get_headroom(vi); 1530 void *ctx = (void *)(unsigned long)xdp_headroom; 1531 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; 1532 int err; 1533 1534 len = SKB_DATA_ALIGN(len) + 1535 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1536 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) 1537 return -ENOMEM; 1538 1539 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1540 get_page(alloc_frag->page); 1541 alloc_frag->offset += len; 1542 sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, 1543 vi->hdr_len + GOOD_PACKET_LEN); 1544 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 1545 if (err < 0) 1546 put_page(virt_to_head_page(buf)); 1547 return err; 1548 } 1549 1550 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 1551 gfp_t gfp) 1552 { 1553 struct page *first, *list = NULL; 1554 char *p; 1555 int i, err, offset; 1556 1557 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); 1558 1559 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ 1560 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { 1561 first = get_a_page(rq, gfp); 1562 if (!first) { 1563 if (list) 1564 give_pages(rq, list); 1565 return -ENOMEM; 1566 } 1567 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 1568 1569 /* chain new page in list head to match sg */ 1570 first->private = (unsigned long)list; 1571 list = first; 1572 } 1573 1574 first = get_a_page(rq, gfp); 1575 if (!first) { 1576 give_pages(rq, list); 1577 return -ENOMEM; 1578 } 1579 p = page_address(first); 1580 1581 /* rq->sg[0], rq->sg[1] share the same page */ 1582 /* a separated rq->sg[0] for header - required in case !any_header_sg */ 1583 sg_set_buf(&rq->sg[0], p, vi->hdr_len); 1584 1585 /* rq->sg[1] for data packet, from offset */ 1586 offset = sizeof(struct padded_vnet_hdr); 1587 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 1588 1589 /* chain first in list head */ 1590 first->private = (unsigned long)list; 1591 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, 1592 first, gfp); 1593 if (err < 0) 1594 give_pages(rq, first); 1595 1596 return err; 1597 } 1598 1599 static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 1600 struct ewma_pkt_len *avg_pkt_len, 1601 unsigned int room) 1602 { 1603 struct virtnet_info *vi = rq->vq->vdev->priv; 1604 const size_t hdr_len = vi->hdr_len; 1605 unsigned int len; 1606 1607 if (room) 1608 return PAGE_SIZE - room; 1609 1610 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 1611 rq->min_buf_len, PAGE_SIZE - hdr_len); 1612 1613 return ALIGN(len, L1_CACHE_BYTES); 1614 } 1615 1616 static int add_recvbuf_mergeable(struct virtnet_info *vi, 1617 struct receive_queue *rq, gfp_t gfp) 1618 { 1619 struct page_frag *alloc_frag = &rq->alloc_frag; 1620 unsigned int headroom = virtnet_get_headroom(vi); 1621 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1622 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 1623 char *buf; 1624 void *ctx; 1625 int err; 1626 unsigned int len, hole; 1627 1628 /* Extra tailroom is needed to satisfy XDP's assumption. This 1629 * means rx frags coalescing won't work, but consider we've 1630 * disabled GSO for XDP, it won't be a big issue. 1631 */ 1632 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); 1633 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) 1634 return -ENOMEM; 1635 1636 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1637 buf += headroom; /* advance address leaving hole at front of pkt */ 1638 get_page(alloc_frag->page); 1639 alloc_frag->offset += len + room; 1640 hole = alloc_frag->size - alloc_frag->offset; 1641 if (hole < len + room) { 1642 /* To avoid internal fragmentation, if there is very likely not 1643 * enough space for another buffer, add the remaining space to 1644 * the current buffer. 1645 * XDP core assumes that frame_size of xdp_buff and the length 1646 * of the frag are PAGE_SIZE, so we disable the hole mechanism. 1647 */ 1648 if (!headroom) 1649 len += hole; 1650 alloc_frag->offset += hole; 1651 } 1652 1653 sg_init_one(rq->sg, buf, len); 1654 ctx = mergeable_len_to_ctx(len + room, headroom); 1655 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 1656 if (err < 0) 1657 put_page(virt_to_head_page(buf)); 1658 1659 return err; 1660 } 1661 1662 /* 1663 * Returns false if we couldn't fill entirely (OOM). 1664 * 1665 * Normally run in the receive path, but can also be run from ndo_open 1666 * before we're receiving packets, or from refill_work which is 1667 * careful to disable receiving (using napi_disable). 1668 */ 1669 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 1670 gfp_t gfp) 1671 { 1672 int err; 1673 bool oom; 1674 1675 do { 1676 if (vi->mergeable_rx_bufs) 1677 err = add_recvbuf_mergeable(vi, rq, gfp); 1678 else if (vi->big_packets) 1679 err = add_recvbuf_big(vi, rq, gfp); 1680 else 1681 err = add_recvbuf_small(vi, rq, gfp); 1682 1683 oom = err == -ENOMEM; 1684 if (err) 1685 break; 1686 } while (rq->vq->num_free); 1687 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { 1688 unsigned long flags; 1689 1690 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); 1691 rq->stats.kicks++; 1692 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); 1693 } 1694 1695 return !oom; 1696 } 1697 1698 static void skb_recv_done(struct virtqueue *rvq) 1699 { 1700 struct virtnet_info *vi = rvq->vdev->priv; 1701 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 1702 1703 virtqueue_napi_schedule(&rq->napi, rvq); 1704 } 1705 1706 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) 1707 { 1708 napi_enable(napi); 1709 1710 /* If all buffers were filled by other side before we napi_enabled, we 1711 * won't get another interrupt, so process any outstanding packets now. 1712 * Call local_bh_enable after to trigger softIRQ processing. 1713 */ 1714 local_bh_disable(); 1715 virtqueue_napi_schedule(napi, vq); 1716 local_bh_enable(); 1717 } 1718 1719 static void virtnet_napi_tx_enable(struct virtnet_info *vi, 1720 struct virtqueue *vq, 1721 struct napi_struct *napi) 1722 { 1723 if (!napi->weight) 1724 return; 1725 1726 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only 1727 * enable the feature if this is likely affine with the transmit path. 1728 */ 1729 if (!vi->affinity_hint_set) { 1730 napi->weight = 0; 1731 return; 1732 } 1733 1734 return virtnet_napi_enable(vq, napi); 1735 } 1736 1737 static void virtnet_napi_tx_disable(struct napi_struct *napi) 1738 { 1739 if (napi->weight) 1740 napi_disable(napi); 1741 } 1742 1743 static void refill_work(struct work_struct *work) 1744 { 1745 struct virtnet_info *vi = 1746 container_of(work, struct virtnet_info, refill.work); 1747 bool still_empty; 1748 int i; 1749 1750 for (i = 0; i < vi->curr_queue_pairs; i++) { 1751 struct receive_queue *rq = &vi->rq[i]; 1752 1753 napi_disable(&rq->napi); 1754 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 1755 virtnet_napi_enable(rq->vq, &rq->napi); 1756 1757 /* In theory, this can happen: if we don't get any buffers in 1758 * we will *never* try to fill again. 1759 */ 1760 if (still_empty) 1761 schedule_delayed_work(&vi->refill, HZ/2); 1762 } 1763 } 1764 1765 static int virtnet_receive(struct receive_queue *rq, int budget, 1766 unsigned int *xdp_xmit) 1767 { 1768 struct virtnet_info *vi = rq->vq->vdev->priv; 1769 struct virtnet_rq_stats stats = {}; 1770 unsigned int len; 1771 void *buf; 1772 int i; 1773 1774 if (!vi->big_packets || vi->mergeable_rx_bufs) { 1775 void *ctx; 1776 1777 while (stats.packets < budget && 1778 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { 1779 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); 1780 stats.packets++; 1781 } 1782 } else { 1783 while (stats.packets < budget && 1784 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 1785 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); 1786 stats.packets++; 1787 } 1788 } 1789 1790 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { 1791 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { 1792 spin_lock(&vi->refill_lock); 1793 if (vi->refill_enabled) 1794 schedule_delayed_work(&vi->refill, 0); 1795 spin_unlock(&vi->refill_lock); 1796 } 1797 } 1798 1799 u64_stats_update_begin(&rq->stats.syncp); 1800 for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { 1801 size_t offset = virtnet_rq_stats_desc[i].offset; 1802 u64 *item; 1803 1804 item = (u64 *)((u8 *)&rq->stats + offset); 1805 *item += *(u64 *)((u8 *)&stats + offset); 1806 } 1807 u64_stats_update_end(&rq->stats.syncp); 1808 1809 return stats.packets; 1810 } 1811 1812 static void virtnet_poll_cleantx(struct receive_queue *rq) 1813 { 1814 struct virtnet_info *vi = rq->vq->vdev->priv; 1815 unsigned int index = vq2rxq(rq->vq); 1816 struct send_queue *sq = &vi->sq[index]; 1817 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 1818 1819 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) 1820 return; 1821 1822 if (__netif_tx_trylock(txq)) { 1823 if (sq->reset) { 1824 __netif_tx_unlock(txq); 1825 return; 1826 } 1827 1828 do { 1829 virtqueue_disable_cb(sq->vq); 1830 free_old_xmit_skbs(sq, true); 1831 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 1832 1833 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 1834 netif_tx_wake_queue(txq); 1835 1836 __netif_tx_unlock(txq); 1837 } 1838 } 1839 1840 static int virtnet_poll(struct napi_struct *napi, int budget) 1841 { 1842 struct receive_queue *rq = 1843 container_of(napi, struct receive_queue, napi); 1844 struct virtnet_info *vi = rq->vq->vdev->priv; 1845 struct send_queue *sq; 1846 unsigned int received; 1847 unsigned int xdp_xmit = 0; 1848 1849 virtnet_poll_cleantx(rq); 1850 1851 received = virtnet_receive(rq, budget, &xdp_xmit); 1852 1853 if (xdp_xmit & VIRTIO_XDP_REDIR) 1854 xdp_do_flush(); 1855 1856 /* Out of packets? */ 1857 if (received < budget) 1858 virtqueue_napi_complete(napi, rq->vq, received); 1859 1860 if (xdp_xmit & VIRTIO_XDP_TX) { 1861 sq = virtnet_xdp_get_sq(vi); 1862 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 1863 u64_stats_update_begin(&sq->stats.syncp); 1864 sq->stats.kicks++; 1865 u64_stats_update_end(&sq->stats.syncp); 1866 } 1867 virtnet_xdp_put_sq(vi, sq); 1868 } 1869 1870 return received; 1871 } 1872 1873 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) 1874 { 1875 virtnet_napi_tx_disable(&vi->sq[qp_index].napi); 1876 napi_disable(&vi->rq[qp_index].napi); 1877 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); 1878 } 1879 1880 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) 1881 { 1882 struct net_device *dev = vi->dev; 1883 int err; 1884 1885 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, 1886 vi->rq[qp_index].napi.napi_id); 1887 if (err < 0) 1888 return err; 1889 1890 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, 1891 MEM_TYPE_PAGE_SHARED, NULL); 1892 if (err < 0) 1893 goto err_xdp_reg_mem_model; 1894 1895 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); 1896 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); 1897 1898 return 0; 1899 1900 err_xdp_reg_mem_model: 1901 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); 1902 return err; 1903 } 1904 1905 static int virtnet_open(struct net_device *dev) 1906 { 1907 struct virtnet_info *vi = netdev_priv(dev); 1908 int i, err; 1909 1910 enable_delayed_refill(vi); 1911 1912 for (i = 0; i < vi->max_queue_pairs; i++) { 1913 if (i < vi->curr_queue_pairs) 1914 /* Make sure we have some buffers: if oom use wq. */ 1915 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 1916 schedule_delayed_work(&vi->refill, 0); 1917 1918 err = virtnet_enable_queue_pair(vi, i); 1919 if (err < 0) 1920 goto err_enable_qp; 1921 } 1922 1923 return 0; 1924 1925 err_enable_qp: 1926 disable_delayed_refill(vi); 1927 cancel_delayed_work_sync(&vi->refill); 1928 1929 for (i--; i >= 0; i--) 1930 virtnet_disable_queue_pair(vi, i); 1931 return err; 1932 } 1933 1934 static int virtnet_poll_tx(struct napi_struct *napi, int budget) 1935 { 1936 struct send_queue *sq = container_of(napi, struct send_queue, napi); 1937 struct virtnet_info *vi = sq->vq->vdev->priv; 1938 unsigned int index = vq2txq(sq->vq); 1939 struct netdev_queue *txq; 1940 int opaque; 1941 bool done; 1942 1943 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { 1944 /* We don't need to enable cb for XDP */ 1945 napi_complete_done(napi, 0); 1946 return 0; 1947 } 1948 1949 txq = netdev_get_tx_queue(vi->dev, index); 1950 __netif_tx_lock(txq, raw_smp_processor_id()); 1951 virtqueue_disable_cb(sq->vq); 1952 free_old_xmit_skbs(sq, true); 1953 1954 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 1955 netif_tx_wake_queue(txq); 1956 1957 opaque = virtqueue_enable_cb_prepare(sq->vq); 1958 1959 done = napi_complete_done(napi, 0); 1960 1961 if (!done) 1962 virtqueue_disable_cb(sq->vq); 1963 1964 __netif_tx_unlock(txq); 1965 1966 if (done) { 1967 if (unlikely(virtqueue_poll(sq->vq, opaque))) { 1968 if (napi_schedule_prep(napi)) { 1969 __netif_tx_lock(txq, raw_smp_processor_id()); 1970 virtqueue_disable_cb(sq->vq); 1971 __netif_tx_unlock(txq); 1972 __napi_schedule(napi); 1973 } 1974 } 1975 } 1976 1977 return 0; 1978 } 1979 1980 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 1981 { 1982 struct virtio_net_hdr_mrg_rxbuf *hdr; 1983 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 1984 struct virtnet_info *vi = sq->vq->vdev->priv; 1985 int num_sg; 1986 unsigned hdr_len = vi->hdr_len; 1987 bool can_push; 1988 1989 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 1990 1991 can_push = vi->any_header_sg && 1992 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 1993 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 1994 /* Even if we can, don't push here yet as this would skew 1995 * csum_start offset below. */ 1996 if (can_push) 1997 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 1998 else 1999 hdr = skb_vnet_hdr(skb); 2000 2001 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 2002 virtio_is_little_endian(vi->vdev), false, 2003 0)) 2004 return -EPROTO; 2005 2006 if (vi->mergeable_rx_bufs) 2007 hdr->num_buffers = 0; 2008 2009 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 2010 if (can_push) { 2011 __skb_push(skb, hdr_len); 2012 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 2013 if (unlikely(num_sg < 0)) 2014 return num_sg; 2015 /* Pull header back to avoid skew in tx bytes calculations. */ 2016 __skb_pull(skb, hdr_len); 2017 } else { 2018 sg_set_buf(sq->sg, hdr, hdr_len); 2019 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 2020 if (unlikely(num_sg < 0)) 2021 return num_sg; 2022 num_sg++; 2023 } 2024 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 2025 } 2026 2027 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 2028 { 2029 struct virtnet_info *vi = netdev_priv(dev); 2030 int qnum = skb_get_queue_mapping(skb); 2031 struct send_queue *sq = &vi->sq[qnum]; 2032 int err; 2033 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 2034 bool kick = !netdev_xmit_more(); 2035 bool use_napi = sq->napi.weight; 2036 2037 /* Free up any pending old buffers before queueing new ones. */ 2038 do { 2039 if (use_napi) 2040 virtqueue_disable_cb(sq->vq); 2041 2042 free_old_xmit_skbs(sq, false); 2043 2044 } while (use_napi && kick && 2045 unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 2046 2047 /* timestamp packet in software */ 2048 skb_tx_timestamp(skb); 2049 2050 /* Try to transmit */ 2051 err = xmit_skb(sq, skb); 2052 2053 /* This should not happen! */ 2054 if (unlikely(err)) { 2055 dev->stats.tx_fifo_errors++; 2056 if (net_ratelimit()) 2057 dev_warn(&dev->dev, 2058 "Unexpected TXQ (%d) queue failure: %d\n", 2059 qnum, err); 2060 dev->stats.tx_dropped++; 2061 dev_kfree_skb_any(skb); 2062 return NETDEV_TX_OK; 2063 } 2064 2065 /* Don't wait up for transmitted skbs to be freed. */ 2066 if (!use_napi) { 2067 skb_orphan(skb); 2068 nf_reset_ct(skb); 2069 } 2070 2071 check_sq_full_and_disable(vi, dev, sq); 2072 2073 if (kick || netif_xmit_stopped(txq)) { 2074 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 2075 u64_stats_update_begin(&sq->stats.syncp); 2076 sq->stats.kicks++; 2077 u64_stats_update_end(&sq->stats.syncp); 2078 } 2079 } 2080 2081 return NETDEV_TX_OK; 2082 } 2083 2084 static int virtnet_rx_resize(struct virtnet_info *vi, 2085 struct receive_queue *rq, u32 ring_num) 2086 { 2087 bool running = netif_running(vi->dev); 2088 int err, qindex; 2089 2090 qindex = rq - vi->rq; 2091 2092 if (running) 2093 napi_disable(&rq->napi); 2094 2095 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf); 2096 if (err) 2097 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); 2098 2099 if (!try_fill_recv(vi, rq, GFP_KERNEL)) 2100 schedule_delayed_work(&vi->refill, 0); 2101 2102 if (running) 2103 virtnet_napi_enable(rq->vq, &rq->napi); 2104 return err; 2105 } 2106 2107 static int virtnet_tx_resize(struct virtnet_info *vi, 2108 struct send_queue *sq, u32 ring_num) 2109 { 2110 bool running = netif_running(vi->dev); 2111 struct netdev_queue *txq; 2112 int err, qindex; 2113 2114 qindex = sq - vi->sq; 2115 2116 if (running) 2117 virtnet_napi_tx_disable(&sq->napi); 2118 2119 txq = netdev_get_tx_queue(vi->dev, qindex); 2120 2121 /* 1. wait all ximt complete 2122 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue() 2123 */ 2124 __netif_tx_lock_bh(txq); 2125 2126 /* Prevent rx poll from accessing sq. */ 2127 sq->reset = true; 2128 2129 /* Prevent the upper layer from trying to send packets. */ 2130 netif_stop_subqueue(vi->dev, qindex); 2131 2132 __netif_tx_unlock_bh(txq); 2133 2134 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); 2135 if (err) 2136 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); 2137 2138 __netif_tx_lock_bh(txq); 2139 sq->reset = false; 2140 netif_tx_wake_queue(txq); 2141 __netif_tx_unlock_bh(txq); 2142 2143 if (running) 2144 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); 2145 return err; 2146 } 2147 2148 /* 2149 * Send command via the control virtqueue and check status. Commands 2150 * supported by the hypervisor, as indicated by feature bits, should 2151 * never fail unless improperly formatted. 2152 */ 2153 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 2154 struct scatterlist *out) 2155 { 2156 struct scatterlist *sgs[4], hdr, stat; 2157 unsigned out_num = 0, tmp; 2158 int ret; 2159 2160 /* Caller should know better */ 2161 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 2162 2163 vi->ctrl->status = ~0; 2164 vi->ctrl->hdr.class = class; 2165 vi->ctrl->hdr.cmd = cmd; 2166 /* Add header */ 2167 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); 2168 sgs[out_num++] = &hdr; 2169 2170 if (out) 2171 sgs[out_num++] = out; 2172 2173 /* Add return status. */ 2174 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); 2175 sgs[out_num] = &stat; 2176 2177 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 2178 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 2179 if (ret < 0) { 2180 dev_warn(&vi->vdev->dev, 2181 "Failed to add sgs for command vq: %d\n.", ret); 2182 return false; 2183 } 2184 2185 if (unlikely(!virtqueue_kick(vi->cvq))) 2186 return vi->ctrl->status == VIRTIO_NET_OK; 2187 2188 /* Spin for a response, the kick causes an ioport write, trapping 2189 * into the hypervisor, so the request should be handled immediately. 2190 */ 2191 while (!virtqueue_get_buf(vi->cvq, &tmp) && 2192 !virtqueue_is_broken(vi->cvq)) 2193 cpu_relax(); 2194 2195 return vi->ctrl->status == VIRTIO_NET_OK; 2196 } 2197 2198 static int virtnet_set_mac_address(struct net_device *dev, void *p) 2199 { 2200 struct virtnet_info *vi = netdev_priv(dev); 2201 struct virtio_device *vdev = vi->vdev; 2202 int ret; 2203 struct sockaddr *addr; 2204 struct scatterlist sg; 2205 2206 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 2207 return -EOPNOTSUPP; 2208 2209 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 2210 if (!addr) 2211 return -ENOMEM; 2212 2213 ret = eth_prepare_mac_addr_change(dev, addr); 2214 if (ret) 2215 goto out; 2216 2217 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 2218 sg_init_one(&sg, addr->sa_data, dev->addr_len); 2219 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 2220 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 2221 dev_warn(&vdev->dev, 2222 "Failed to set mac address by vq command.\n"); 2223 ret = -EINVAL; 2224 goto out; 2225 } 2226 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 2227 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2228 unsigned int i; 2229 2230 /* Naturally, this has an atomicity problem. */ 2231 for (i = 0; i < dev->addr_len; i++) 2232 virtio_cwrite8(vdev, 2233 offsetof(struct virtio_net_config, mac) + 2234 i, addr->sa_data[i]); 2235 } 2236 2237 eth_commit_mac_addr_change(dev, p); 2238 ret = 0; 2239 2240 out: 2241 kfree(addr); 2242 return ret; 2243 } 2244 2245 static void virtnet_stats(struct net_device *dev, 2246 struct rtnl_link_stats64 *tot) 2247 { 2248 struct virtnet_info *vi = netdev_priv(dev); 2249 unsigned int start; 2250 int i; 2251 2252 for (i = 0; i < vi->max_queue_pairs; i++) { 2253 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops; 2254 struct receive_queue *rq = &vi->rq[i]; 2255 struct send_queue *sq = &vi->sq[i]; 2256 2257 do { 2258 start = u64_stats_fetch_begin(&sq->stats.syncp); 2259 tpackets = sq->stats.packets; 2260 tbytes = sq->stats.bytes; 2261 terrors = sq->stats.tx_timeouts; 2262 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); 2263 2264 do { 2265 start = u64_stats_fetch_begin(&rq->stats.syncp); 2266 rpackets = rq->stats.packets; 2267 rbytes = rq->stats.bytes; 2268 rdrops = rq->stats.drops; 2269 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); 2270 2271 tot->rx_packets += rpackets; 2272 tot->tx_packets += tpackets; 2273 tot->rx_bytes += rbytes; 2274 tot->tx_bytes += tbytes; 2275 tot->rx_dropped += rdrops; 2276 tot->tx_errors += terrors; 2277 } 2278 2279 tot->tx_dropped = dev->stats.tx_dropped; 2280 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 2281 tot->rx_length_errors = dev->stats.rx_length_errors; 2282 tot->rx_frame_errors = dev->stats.rx_frame_errors; 2283 } 2284 2285 static void virtnet_ack_link_announce(struct virtnet_info *vi) 2286 { 2287 rtnl_lock(); 2288 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 2289 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 2290 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 2291 rtnl_unlock(); 2292 } 2293 2294 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 2295 { 2296 struct scatterlist sg; 2297 struct net_device *dev = vi->dev; 2298 2299 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 2300 return 0; 2301 2302 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 2303 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); 2304 2305 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 2306 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 2307 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 2308 queue_pairs); 2309 return -EINVAL; 2310 } else { 2311 vi->curr_queue_pairs = queue_pairs; 2312 /* virtnet_open() will refill when device is going to up. */ 2313 if (dev->flags & IFF_UP) 2314 schedule_delayed_work(&vi->refill, 0); 2315 } 2316 2317 return 0; 2318 } 2319 2320 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 2321 { 2322 int err; 2323 2324 rtnl_lock(); 2325 err = _virtnet_set_queues(vi, queue_pairs); 2326 rtnl_unlock(); 2327 return err; 2328 } 2329 2330 static int virtnet_close(struct net_device *dev) 2331 { 2332 struct virtnet_info *vi = netdev_priv(dev); 2333 int i; 2334 2335 /* Make sure NAPI doesn't schedule refill work */ 2336 disable_delayed_refill(vi); 2337 /* Make sure refill_work doesn't re-enable napi! */ 2338 cancel_delayed_work_sync(&vi->refill); 2339 2340 for (i = 0; i < vi->max_queue_pairs; i++) 2341 virtnet_disable_queue_pair(vi, i); 2342 2343 return 0; 2344 } 2345 2346 static void virtnet_set_rx_mode(struct net_device *dev) 2347 { 2348 struct virtnet_info *vi = netdev_priv(dev); 2349 struct scatterlist sg[2]; 2350 struct virtio_net_ctrl_mac *mac_data; 2351 struct netdev_hw_addr *ha; 2352 int uc_count; 2353 int mc_count; 2354 void *buf; 2355 int i; 2356 2357 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 2358 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 2359 return; 2360 2361 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); 2362 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 2363 2364 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); 2365 2366 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 2367 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 2368 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 2369 vi->ctrl->promisc ? "en" : "dis"); 2370 2371 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); 2372 2373 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 2374 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 2375 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 2376 vi->ctrl->allmulti ? "en" : "dis"); 2377 2378 uc_count = netdev_uc_count(dev); 2379 mc_count = netdev_mc_count(dev); 2380 /* MAC filter - use one buffer for both lists */ 2381 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 2382 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 2383 mac_data = buf; 2384 if (!buf) 2385 return; 2386 2387 sg_init_table(sg, 2); 2388 2389 /* Store the unicast list and count in the front of the buffer */ 2390 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 2391 i = 0; 2392 netdev_for_each_uc_addr(ha, dev) 2393 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 2394 2395 sg_set_buf(&sg[0], mac_data, 2396 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 2397 2398 /* multicast list and count fill the end */ 2399 mac_data = (void *)&mac_data->macs[uc_count][0]; 2400 2401 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 2402 i = 0; 2403 netdev_for_each_mc_addr(ha, dev) 2404 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 2405 2406 sg_set_buf(&sg[1], mac_data, 2407 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 2408 2409 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 2410 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 2411 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 2412 2413 kfree(buf); 2414 } 2415 2416 static int virtnet_vlan_rx_add_vid(struct net_device *dev, 2417 __be16 proto, u16 vid) 2418 { 2419 struct virtnet_info *vi = netdev_priv(dev); 2420 struct scatterlist sg; 2421 2422 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 2423 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 2424 2425 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 2426 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 2427 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 2428 return 0; 2429 } 2430 2431 static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 2432 __be16 proto, u16 vid) 2433 { 2434 struct virtnet_info *vi = netdev_priv(dev); 2435 struct scatterlist sg; 2436 2437 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 2438 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 2439 2440 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 2441 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 2442 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 2443 return 0; 2444 } 2445 2446 static void virtnet_clean_affinity(struct virtnet_info *vi) 2447 { 2448 int i; 2449 2450 if (vi->affinity_hint_set) { 2451 for (i = 0; i < vi->max_queue_pairs; i++) { 2452 virtqueue_set_affinity(vi->rq[i].vq, NULL); 2453 virtqueue_set_affinity(vi->sq[i].vq, NULL); 2454 } 2455 2456 vi->affinity_hint_set = false; 2457 } 2458 } 2459 2460 static void virtnet_set_affinity(struct virtnet_info *vi) 2461 { 2462 cpumask_var_t mask; 2463 int stragglers; 2464 int group_size; 2465 int i, j, cpu; 2466 int num_cpu; 2467 int stride; 2468 2469 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 2470 virtnet_clean_affinity(vi); 2471 return; 2472 } 2473 2474 num_cpu = num_online_cpus(); 2475 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); 2476 stragglers = num_cpu >= vi->curr_queue_pairs ? 2477 num_cpu % vi->curr_queue_pairs : 2478 0; 2479 cpu = cpumask_first(cpu_online_mask); 2480 2481 for (i = 0; i < vi->curr_queue_pairs; i++) { 2482 group_size = stride + (i < stragglers ? 1 : 0); 2483 2484 for (j = 0; j < group_size; j++) { 2485 cpumask_set_cpu(cpu, mask); 2486 cpu = cpumask_next_wrap(cpu, cpu_online_mask, 2487 nr_cpu_ids, false); 2488 } 2489 virtqueue_set_affinity(vi->rq[i].vq, mask); 2490 virtqueue_set_affinity(vi->sq[i].vq, mask); 2491 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); 2492 cpumask_clear(mask); 2493 } 2494 2495 vi->affinity_hint_set = true; 2496 free_cpumask_var(mask); 2497 } 2498 2499 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 2500 { 2501 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2502 node); 2503 virtnet_set_affinity(vi); 2504 return 0; 2505 } 2506 2507 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 2508 { 2509 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2510 node_dead); 2511 virtnet_set_affinity(vi); 2512 return 0; 2513 } 2514 2515 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 2516 { 2517 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2518 node); 2519 2520 virtnet_clean_affinity(vi); 2521 return 0; 2522 } 2523 2524 static enum cpuhp_state virtionet_online; 2525 2526 static int virtnet_cpu_notif_add(struct virtnet_info *vi) 2527 { 2528 int ret; 2529 2530 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 2531 if (ret) 2532 return ret; 2533 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 2534 &vi->node_dead); 2535 if (!ret) 2536 return ret; 2537 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 2538 return ret; 2539 } 2540 2541 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 2542 { 2543 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 2544 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 2545 &vi->node_dead); 2546 } 2547 2548 static void virtnet_get_ringparam(struct net_device *dev, 2549 struct ethtool_ringparam *ring, 2550 struct kernel_ethtool_ringparam *kernel_ring, 2551 struct netlink_ext_ack *extack) 2552 { 2553 struct virtnet_info *vi = netdev_priv(dev); 2554 2555 ring->rx_max_pending = vi->rq[0].vq->num_max; 2556 ring->tx_max_pending = vi->sq[0].vq->num_max; 2557 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 2558 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 2559 } 2560 2561 static int virtnet_set_ringparam(struct net_device *dev, 2562 struct ethtool_ringparam *ring, 2563 struct kernel_ethtool_ringparam *kernel_ring, 2564 struct netlink_ext_ack *extack) 2565 { 2566 struct virtnet_info *vi = netdev_priv(dev); 2567 u32 rx_pending, tx_pending; 2568 struct receive_queue *rq; 2569 struct send_queue *sq; 2570 int i, err; 2571 2572 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 2573 return -EINVAL; 2574 2575 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 2576 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 2577 2578 if (ring->rx_pending == rx_pending && 2579 ring->tx_pending == tx_pending) 2580 return 0; 2581 2582 if (ring->rx_pending > vi->rq[0].vq->num_max) 2583 return -EINVAL; 2584 2585 if (ring->tx_pending > vi->sq[0].vq->num_max) 2586 return -EINVAL; 2587 2588 for (i = 0; i < vi->max_queue_pairs; i++) { 2589 rq = vi->rq + i; 2590 sq = vi->sq + i; 2591 2592 if (ring->tx_pending != tx_pending) { 2593 err = virtnet_tx_resize(vi, sq, ring->tx_pending); 2594 if (err) 2595 return err; 2596 } 2597 2598 if (ring->rx_pending != rx_pending) { 2599 err = virtnet_rx_resize(vi, rq, ring->rx_pending); 2600 if (err) 2601 return err; 2602 } 2603 } 2604 2605 return 0; 2606 } 2607 2608 static bool virtnet_commit_rss_command(struct virtnet_info *vi) 2609 { 2610 struct net_device *dev = vi->dev; 2611 struct scatterlist sgs[4]; 2612 unsigned int sg_buf_size; 2613 2614 /* prepare sgs */ 2615 sg_init_table(sgs, 4); 2616 2617 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table); 2618 sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); 2619 2620 sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); 2621 sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); 2622 2623 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) 2624 - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); 2625 sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); 2626 2627 sg_buf_size = vi->rss_key_size; 2628 sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); 2629 2630 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 2631 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG 2632 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { 2633 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); 2634 return false; 2635 } 2636 return true; 2637 } 2638 2639 static void virtnet_init_default_rss(struct virtnet_info *vi) 2640 { 2641 u32 indir_val = 0; 2642 int i = 0; 2643 2644 vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; 2645 vi->rss_hash_types_saved = vi->rss_hash_types_supported; 2646 vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size 2647 ? vi->rss_indir_table_size - 1 : 0; 2648 vi->ctrl->rss.unclassified_queue = 0; 2649 2650 for (; i < vi->rss_indir_table_size; ++i) { 2651 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); 2652 vi->ctrl->rss.indirection_table[i] = indir_val; 2653 } 2654 2655 vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs; 2656 vi->ctrl->rss.hash_key_length = vi->rss_key_size; 2657 2658 netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); 2659 } 2660 2661 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) 2662 { 2663 info->data = 0; 2664 switch (info->flow_type) { 2665 case TCP_V4_FLOW: 2666 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { 2667 info->data = RXH_IP_SRC | RXH_IP_DST | 2668 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2669 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 2670 info->data = RXH_IP_SRC | RXH_IP_DST; 2671 } 2672 break; 2673 case TCP_V6_FLOW: 2674 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { 2675 info->data = RXH_IP_SRC | RXH_IP_DST | 2676 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2677 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 2678 info->data = RXH_IP_SRC | RXH_IP_DST; 2679 } 2680 break; 2681 case UDP_V4_FLOW: 2682 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { 2683 info->data = RXH_IP_SRC | RXH_IP_DST | 2684 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2685 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 2686 info->data = RXH_IP_SRC | RXH_IP_DST; 2687 } 2688 break; 2689 case UDP_V6_FLOW: 2690 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { 2691 info->data = RXH_IP_SRC | RXH_IP_DST | 2692 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2693 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 2694 info->data = RXH_IP_SRC | RXH_IP_DST; 2695 } 2696 break; 2697 case IPV4_FLOW: 2698 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) 2699 info->data = RXH_IP_SRC | RXH_IP_DST; 2700 2701 break; 2702 case IPV6_FLOW: 2703 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) 2704 info->data = RXH_IP_SRC | RXH_IP_DST; 2705 2706 break; 2707 default: 2708 info->data = 0; 2709 break; 2710 } 2711 } 2712 2713 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) 2714 { 2715 u32 new_hashtypes = vi->rss_hash_types_saved; 2716 bool is_disable = info->data & RXH_DISCARD; 2717 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); 2718 2719 /* supports only 'sd', 'sdfn' and 'r' */ 2720 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) 2721 return false; 2722 2723 switch (info->flow_type) { 2724 case TCP_V4_FLOW: 2725 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4); 2726 if (!is_disable) 2727 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 2728 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0); 2729 break; 2730 case UDP_V4_FLOW: 2731 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4); 2732 if (!is_disable) 2733 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 2734 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0); 2735 break; 2736 case IPV4_FLOW: 2737 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4; 2738 if (!is_disable) 2739 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4; 2740 break; 2741 case TCP_V6_FLOW: 2742 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6); 2743 if (!is_disable) 2744 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 2745 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0); 2746 break; 2747 case UDP_V6_FLOW: 2748 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6); 2749 if (!is_disable) 2750 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 2751 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0); 2752 break; 2753 case IPV6_FLOW: 2754 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6; 2755 if (!is_disable) 2756 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6; 2757 break; 2758 default: 2759 /* unsupported flow */ 2760 return false; 2761 } 2762 2763 /* if unsupported hashtype was set */ 2764 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) 2765 return false; 2766 2767 if (new_hashtypes != vi->rss_hash_types_saved) { 2768 vi->rss_hash_types_saved = new_hashtypes; 2769 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; 2770 if (vi->dev->features & NETIF_F_RXHASH) 2771 return virtnet_commit_rss_command(vi); 2772 } 2773 2774 return true; 2775 } 2776 2777 static void virtnet_get_drvinfo(struct net_device *dev, 2778 struct ethtool_drvinfo *info) 2779 { 2780 struct virtnet_info *vi = netdev_priv(dev); 2781 struct virtio_device *vdev = vi->vdev; 2782 2783 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 2784 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 2785 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 2786 2787 } 2788 2789 /* TODO: Eliminate OOO packets during switching */ 2790 static int virtnet_set_channels(struct net_device *dev, 2791 struct ethtool_channels *channels) 2792 { 2793 struct virtnet_info *vi = netdev_priv(dev); 2794 u16 queue_pairs = channels->combined_count; 2795 int err; 2796 2797 /* We don't support separate rx/tx channels. 2798 * We don't allow setting 'other' channels. 2799 */ 2800 if (channels->rx_count || channels->tx_count || channels->other_count) 2801 return -EINVAL; 2802 2803 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 2804 return -EINVAL; 2805 2806 /* For now we don't support modifying channels while XDP is loaded 2807 * also when XDP is loaded all RX queues have XDP programs so we only 2808 * need to check a single RX queue. 2809 */ 2810 if (vi->rq[0].xdp_prog) 2811 return -EINVAL; 2812 2813 cpus_read_lock(); 2814 err = _virtnet_set_queues(vi, queue_pairs); 2815 if (err) { 2816 cpus_read_unlock(); 2817 goto err; 2818 } 2819 virtnet_set_affinity(vi); 2820 cpus_read_unlock(); 2821 2822 netif_set_real_num_tx_queues(dev, queue_pairs); 2823 netif_set_real_num_rx_queues(dev, queue_pairs); 2824 err: 2825 return err; 2826 } 2827 2828 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2829 { 2830 struct virtnet_info *vi = netdev_priv(dev); 2831 unsigned int i, j; 2832 u8 *p = data; 2833 2834 switch (stringset) { 2835 case ETH_SS_STATS: 2836 for (i = 0; i < vi->curr_queue_pairs; i++) { 2837 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) 2838 ethtool_sprintf(&p, "rx_queue_%u_%s", i, 2839 virtnet_rq_stats_desc[j].desc); 2840 } 2841 2842 for (i = 0; i < vi->curr_queue_pairs; i++) { 2843 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) 2844 ethtool_sprintf(&p, "tx_queue_%u_%s", i, 2845 virtnet_sq_stats_desc[j].desc); 2846 } 2847 break; 2848 } 2849 } 2850 2851 static int virtnet_get_sset_count(struct net_device *dev, int sset) 2852 { 2853 struct virtnet_info *vi = netdev_priv(dev); 2854 2855 switch (sset) { 2856 case ETH_SS_STATS: 2857 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + 2858 VIRTNET_SQ_STATS_LEN); 2859 default: 2860 return -EOPNOTSUPP; 2861 } 2862 } 2863 2864 static void virtnet_get_ethtool_stats(struct net_device *dev, 2865 struct ethtool_stats *stats, u64 *data) 2866 { 2867 struct virtnet_info *vi = netdev_priv(dev); 2868 unsigned int idx = 0, start, i, j; 2869 const u8 *stats_base; 2870 size_t offset; 2871 2872 for (i = 0; i < vi->curr_queue_pairs; i++) { 2873 struct receive_queue *rq = &vi->rq[i]; 2874 2875 stats_base = (u8 *)&rq->stats; 2876 do { 2877 start = u64_stats_fetch_begin(&rq->stats.syncp); 2878 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { 2879 offset = virtnet_rq_stats_desc[j].offset; 2880 data[idx + j] = *(u64 *)(stats_base + offset); 2881 } 2882 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); 2883 idx += VIRTNET_RQ_STATS_LEN; 2884 } 2885 2886 for (i = 0; i < vi->curr_queue_pairs; i++) { 2887 struct send_queue *sq = &vi->sq[i]; 2888 2889 stats_base = (u8 *)&sq->stats; 2890 do { 2891 start = u64_stats_fetch_begin(&sq->stats.syncp); 2892 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { 2893 offset = virtnet_sq_stats_desc[j].offset; 2894 data[idx + j] = *(u64 *)(stats_base + offset); 2895 } 2896 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); 2897 idx += VIRTNET_SQ_STATS_LEN; 2898 } 2899 } 2900 2901 static void virtnet_get_channels(struct net_device *dev, 2902 struct ethtool_channels *channels) 2903 { 2904 struct virtnet_info *vi = netdev_priv(dev); 2905 2906 channels->combined_count = vi->curr_queue_pairs; 2907 channels->max_combined = vi->max_queue_pairs; 2908 channels->max_other = 0; 2909 channels->rx_count = 0; 2910 channels->tx_count = 0; 2911 channels->other_count = 0; 2912 } 2913 2914 static int virtnet_set_link_ksettings(struct net_device *dev, 2915 const struct ethtool_link_ksettings *cmd) 2916 { 2917 struct virtnet_info *vi = netdev_priv(dev); 2918 2919 return ethtool_virtdev_set_link_ksettings(dev, cmd, 2920 &vi->speed, &vi->duplex); 2921 } 2922 2923 static int virtnet_get_link_ksettings(struct net_device *dev, 2924 struct ethtool_link_ksettings *cmd) 2925 { 2926 struct virtnet_info *vi = netdev_priv(dev); 2927 2928 cmd->base.speed = vi->speed; 2929 cmd->base.duplex = vi->duplex; 2930 cmd->base.port = PORT_OTHER; 2931 2932 return 0; 2933 } 2934 2935 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, 2936 struct ethtool_coalesce *ec) 2937 { 2938 struct scatterlist sgs_tx, sgs_rx; 2939 2940 vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); 2941 vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); 2942 sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); 2943 2944 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 2945 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, 2946 &sgs_tx)) 2947 return -EINVAL; 2948 2949 /* Save parameters */ 2950 vi->tx_usecs = ec->tx_coalesce_usecs; 2951 vi->tx_max_packets = ec->tx_max_coalesced_frames; 2952 2953 vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); 2954 vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); 2955 sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); 2956 2957 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 2958 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, 2959 &sgs_rx)) 2960 return -EINVAL; 2961 2962 /* Save parameters */ 2963 vi->rx_usecs = ec->rx_coalesce_usecs; 2964 vi->rx_max_packets = ec->rx_max_coalesced_frames; 2965 2966 return 0; 2967 } 2968 2969 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) 2970 { 2971 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL 2972 * feature is negotiated. 2973 */ 2974 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) 2975 return -EOPNOTSUPP; 2976 2977 if (ec->tx_max_coalesced_frames > 1 || 2978 ec->rx_max_coalesced_frames != 1) 2979 return -EINVAL; 2980 2981 return 0; 2982 } 2983 2984 static int virtnet_set_coalesce(struct net_device *dev, 2985 struct ethtool_coalesce *ec, 2986 struct kernel_ethtool_coalesce *kernel_coal, 2987 struct netlink_ext_ack *extack) 2988 { 2989 struct virtnet_info *vi = netdev_priv(dev); 2990 int ret, i, napi_weight; 2991 bool update_napi = false; 2992 2993 /* Can't change NAPI weight if the link is up */ 2994 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; 2995 if (napi_weight ^ vi->sq[0].napi.weight) { 2996 if (dev->flags & IFF_UP) 2997 return -EBUSY; 2998 else 2999 update_napi = true; 3000 } 3001 3002 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) 3003 ret = virtnet_send_notf_coal_cmds(vi, ec); 3004 else 3005 ret = virtnet_coal_params_supported(ec); 3006 3007 if (ret) 3008 return ret; 3009 3010 if (update_napi) { 3011 for (i = 0; i < vi->max_queue_pairs; i++) 3012 vi->sq[i].napi.weight = napi_weight; 3013 } 3014 3015 return ret; 3016 } 3017 3018 static int virtnet_get_coalesce(struct net_device *dev, 3019 struct ethtool_coalesce *ec, 3020 struct kernel_ethtool_coalesce *kernel_coal, 3021 struct netlink_ext_ack *extack) 3022 { 3023 struct virtnet_info *vi = netdev_priv(dev); 3024 3025 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { 3026 ec->rx_coalesce_usecs = vi->rx_usecs; 3027 ec->tx_coalesce_usecs = vi->tx_usecs; 3028 ec->tx_max_coalesced_frames = vi->tx_max_packets; 3029 ec->rx_max_coalesced_frames = vi->rx_max_packets; 3030 } else { 3031 ec->rx_max_coalesced_frames = 1; 3032 3033 if (vi->sq[0].napi.weight) 3034 ec->tx_max_coalesced_frames = 1; 3035 } 3036 3037 return 0; 3038 } 3039 3040 static void virtnet_init_settings(struct net_device *dev) 3041 { 3042 struct virtnet_info *vi = netdev_priv(dev); 3043 3044 vi->speed = SPEED_UNKNOWN; 3045 vi->duplex = DUPLEX_UNKNOWN; 3046 } 3047 3048 static void virtnet_update_settings(struct virtnet_info *vi) 3049 { 3050 u32 speed; 3051 u8 duplex; 3052 3053 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) 3054 return; 3055 3056 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); 3057 3058 if (ethtool_validate_speed(speed)) 3059 vi->speed = speed; 3060 3061 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); 3062 3063 if (ethtool_validate_duplex(duplex)) 3064 vi->duplex = duplex; 3065 } 3066 3067 static u32 virtnet_get_rxfh_key_size(struct net_device *dev) 3068 { 3069 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; 3070 } 3071 3072 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev) 3073 { 3074 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; 3075 } 3076 3077 static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 3078 { 3079 struct virtnet_info *vi = netdev_priv(dev); 3080 int i; 3081 3082 if (indir) { 3083 for (i = 0; i < vi->rss_indir_table_size; ++i) 3084 indir[i] = vi->ctrl->rss.indirection_table[i]; 3085 } 3086 3087 if (key) 3088 memcpy(key, vi->ctrl->rss.key, vi->rss_key_size); 3089 3090 if (hfunc) 3091 *hfunc = ETH_RSS_HASH_TOP; 3092 3093 return 0; 3094 } 3095 3096 static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) 3097 { 3098 struct virtnet_info *vi = netdev_priv(dev); 3099 int i; 3100 3101 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 3102 return -EOPNOTSUPP; 3103 3104 if (indir) { 3105 for (i = 0; i < vi->rss_indir_table_size; ++i) 3106 vi->ctrl->rss.indirection_table[i] = indir[i]; 3107 } 3108 if (key) 3109 memcpy(vi->ctrl->rss.key, key, vi->rss_key_size); 3110 3111 virtnet_commit_rss_command(vi); 3112 3113 return 0; 3114 } 3115 3116 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) 3117 { 3118 struct virtnet_info *vi = netdev_priv(dev); 3119 int rc = 0; 3120 3121 switch (info->cmd) { 3122 case ETHTOOL_GRXRINGS: 3123 info->data = vi->curr_queue_pairs; 3124 break; 3125 case ETHTOOL_GRXFH: 3126 virtnet_get_hashflow(vi, info); 3127 break; 3128 default: 3129 rc = -EOPNOTSUPP; 3130 } 3131 3132 return rc; 3133 } 3134 3135 static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 3136 { 3137 struct virtnet_info *vi = netdev_priv(dev); 3138 int rc = 0; 3139 3140 switch (info->cmd) { 3141 case ETHTOOL_SRXFH: 3142 if (!virtnet_set_hashflow(vi, info)) 3143 rc = -EINVAL; 3144 3145 break; 3146 default: 3147 rc = -EOPNOTSUPP; 3148 } 3149 3150 return rc; 3151 } 3152 3153 static const struct ethtool_ops virtnet_ethtool_ops = { 3154 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 3155 ETHTOOL_COALESCE_USECS, 3156 .get_drvinfo = virtnet_get_drvinfo, 3157 .get_link = ethtool_op_get_link, 3158 .get_ringparam = virtnet_get_ringparam, 3159 .set_ringparam = virtnet_set_ringparam, 3160 .get_strings = virtnet_get_strings, 3161 .get_sset_count = virtnet_get_sset_count, 3162 .get_ethtool_stats = virtnet_get_ethtool_stats, 3163 .set_channels = virtnet_set_channels, 3164 .get_channels = virtnet_get_channels, 3165 .get_ts_info = ethtool_op_get_ts_info, 3166 .get_link_ksettings = virtnet_get_link_ksettings, 3167 .set_link_ksettings = virtnet_set_link_ksettings, 3168 .set_coalesce = virtnet_set_coalesce, 3169 .get_coalesce = virtnet_get_coalesce, 3170 .get_rxfh_key_size = virtnet_get_rxfh_key_size, 3171 .get_rxfh_indir_size = virtnet_get_rxfh_indir_size, 3172 .get_rxfh = virtnet_get_rxfh, 3173 .set_rxfh = virtnet_set_rxfh, 3174 .get_rxnfc = virtnet_get_rxnfc, 3175 .set_rxnfc = virtnet_set_rxnfc, 3176 }; 3177 3178 static void virtnet_freeze_down(struct virtio_device *vdev) 3179 { 3180 struct virtnet_info *vi = vdev->priv; 3181 3182 /* Make sure no work handler is accessing the device */ 3183 flush_work(&vi->config_work); 3184 3185 netif_tx_lock_bh(vi->dev); 3186 netif_device_detach(vi->dev); 3187 netif_tx_unlock_bh(vi->dev); 3188 if (netif_running(vi->dev)) 3189 virtnet_close(vi->dev); 3190 } 3191 3192 static int init_vqs(struct virtnet_info *vi); 3193 3194 static int virtnet_restore_up(struct virtio_device *vdev) 3195 { 3196 struct virtnet_info *vi = vdev->priv; 3197 int err; 3198 3199 err = init_vqs(vi); 3200 if (err) 3201 return err; 3202 3203 virtio_device_ready(vdev); 3204 3205 enable_delayed_refill(vi); 3206 3207 if (netif_running(vi->dev)) { 3208 err = virtnet_open(vi->dev); 3209 if (err) 3210 return err; 3211 } 3212 3213 netif_tx_lock_bh(vi->dev); 3214 netif_device_attach(vi->dev); 3215 netif_tx_unlock_bh(vi->dev); 3216 return err; 3217 } 3218 3219 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 3220 { 3221 struct scatterlist sg; 3222 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); 3223 3224 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); 3225 3226 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 3227 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { 3228 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); 3229 return -EINVAL; 3230 } 3231 3232 return 0; 3233 } 3234 3235 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) 3236 { 3237 u64 offloads = 0; 3238 3239 if (!vi->guest_offloads) 3240 return 0; 3241 3242 return virtnet_set_guest_offloads(vi, offloads); 3243 } 3244 3245 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) 3246 { 3247 u64 offloads = vi->guest_offloads; 3248 3249 if (!vi->guest_offloads) 3250 return 0; 3251 3252 return virtnet_set_guest_offloads(vi, offloads); 3253 } 3254 3255 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, 3256 struct netlink_ext_ack *extack) 3257 { 3258 unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + 3259 sizeof(struct skb_shared_info)); 3260 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; 3261 struct virtnet_info *vi = netdev_priv(dev); 3262 struct bpf_prog *old_prog; 3263 u16 xdp_qp = 0, curr_qp; 3264 int i, err; 3265 3266 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) 3267 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 3268 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 3269 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 3270 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 3271 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || 3272 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || 3273 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { 3274 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); 3275 return -EOPNOTSUPP; 3276 } 3277 3278 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 3279 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); 3280 return -EINVAL; 3281 } 3282 3283 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { 3284 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags"); 3285 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); 3286 return -EINVAL; 3287 } 3288 3289 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 3290 if (prog) 3291 xdp_qp = nr_cpu_ids; 3292 3293 /* XDP requires extra queues for XDP_TX */ 3294 if (curr_qp + xdp_qp > vi->max_queue_pairs) { 3295 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", 3296 curr_qp + xdp_qp, vi->max_queue_pairs); 3297 xdp_qp = 0; 3298 } 3299 3300 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); 3301 if (!prog && !old_prog) 3302 return 0; 3303 3304 if (prog) 3305 bpf_prog_add(prog, vi->max_queue_pairs - 1); 3306 3307 /* Make sure NAPI is not using any XDP TX queues for RX. */ 3308 if (netif_running(dev)) { 3309 for (i = 0; i < vi->max_queue_pairs; i++) { 3310 napi_disable(&vi->rq[i].napi); 3311 virtnet_napi_tx_disable(&vi->sq[i].napi); 3312 } 3313 } 3314 3315 if (!prog) { 3316 for (i = 0; i < vi->max_queue_pairs; i++) { 3317 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 3318 if (i == 0) 3319 virtnet_restore_guest_offloads(vi); 3320 } 3321 synchronize_net(); 3322 } 3323 3324 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 3325 if (err) 3326 goto err; 3327 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 3328 vi->xdp_queue_pairs = xdp_qp; 3329 3330 if (prog) { 3331 vi->xdp_enabled = true; 3332 for (i = 0; i < vi->max_queue_pairs; i++) { 3333 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 3334 if (i == 0 && !old_prog) 3335 virtnet_clear_guest_offloads(vi); 3336 } 3337 if (!old_prog) 3338 xdp_features_set_redirect_target(dev, true); 3339 } else { 3340 xdp_features_clear_redirect_target(dev); 3341 vi->xdp_enabled = false; 3342 } 3343 3344 for (i = 0; i < vi->max_queue_pairs; i++) { 3345 if (old_prog) 3346 bpf_prog_put(old_prog); 3347 if (netif_running(dev)) { 3348 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 3349 virtnet_napi_tx_enable(vi, vi->sq[i].vq, 3350 &vi->sq[i].napi); 3351 } 3352 } 3353 3354 return 0; 3355 3356 err: 3357 if (!prog) { 3358 virtnet_clear_guest_offloads(vi); 3359 for (i = 0; i < vi->max_queue_pairs; i++) 3360 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); 3361 } 3362 3363 if (netif_running(dev)) { 3364 for (i = 0; i < vi->max_queue_pairs; i++) { 3365 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 3366 virtnet_napi_tx_enable(vi, vi->sq[i].vq, 3367 &vi->sq[i].napi); 3368 } 3369 } 3370 if (prog) 3371 bpf_prog_sub(prog, vi->max_queue_pairs - 1); 3372 return err; 3373 } 3374 3375 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) 3376 { 3377 switch (xdp->command) { 3378 case XDP_SETUP_PROG: 3379 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); 3380 default: 3381 return -EINVAL; 3382 } 3383 } 3384 3385 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, 3386 size_t len) 3387 { 3388 struct virtnet_info *vi = netdev_priv(dev); 3389 int ret; 3390 3391 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 3392 return -EOPNOTSUPP; 3393 3394 ret = snprintf(buf, len, "sby"); 3395 if (ret >= len) 3396 return -EOPNOTSUPP; 3397 3398 return 0; 3399 } 3400 3401 static int virtnet_set_features(struct net_device *dev, 3402 netdev_features_t features) 3403 { 3404 struct virtnet_info *vi = netdev_priv(dev); 3405 u64 offloads; 3406 int err; 3407 3408 if ((dev->features ^ features) & NETIF_F_GRO_HW) { 3409 if (vi->xdp_enabled) 3410 return -EBUSY; 3411 3412 if (features & NETIF_F_GRO_HW) 3413 offloads = vi->guest_offloads_capable; 3414 else 3415 offloads = vi->guest_offloads_capable & 3416 ~GUEST_OFFLOAD_GRO_HW_MASK; 3417 3418 err = virtnet_set_guest_offloads(vi, offloads); 3419 if (err) 3420 return err; 3421 vi->guest_offloads = offloads; 3422 } 3423 3424 if ((dev->features ^ features) & NETIF_F_RXHASH) { 3425 if (features & NETIF_F_RXHASH) 3426 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; 3427 else 3428 vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; 3429 3430 if (!virtnet_commit_rss_command(vi)) 3431 return -EINVAL; 3432 } 3433 3434 return 0; 3435 } 3436 3437 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) 3438 { 3439 struct virtnet_info *priv = netdev_priv(dev); 3440 struct send_queue *sq = &priv->sq[txqueue]; 3441 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); 3442 3443 u64_stats_update_begin(&sq->stats.syncp); 3444 sq->stats.tx_timeouts++; 3445 u64_stats_update_end(&sq->stats.syncp); 3446 3447 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", 3448 txqueue, sq->name, sq->vq->index, sq->vq->name, 3449 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); 3450 } 3451 3452 static const struct net_device_ops virtnet_netdev = { 3453 .ndo_open = virtnet_open, 3454 .ndo_stop = virtnet_close, 3455 .ndo_start_xmit = start_xmit, 3456 .ndo_validate_addr = eth_validate_addr, 3457 .ndo_set_mac_address = virtnet_set_mac_address, 3458 .ndo_set_rx_mode = virtnet_set_rx_mode, 3459 .ndo_get_stats64 = virtnet_stats, 3460 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 3461 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 3462 .ndo_bpf = virtnet_xdp, 3463 .ndo_xdp_xmit = virtnet_xdp_xmit, 3464 .ndo_features_check = passthru_features_check, 3465 .ndo_get_phys_port_name = virtnet_get_phys_port_name, 3466 .ndo_set_features = virtnet_set_features, 3467 .ndo_tx_timeout = virtnet_tx_timeout, 3468 }; 3469 3470 static void virtnet_config_changed_work(struct work_struct *work) 3471 { 3472 struct virtnet_info *vi = 3473 container_of(work, struct virtnet_info, config_work); 3474 u16 v; 3475 3476 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 3477 struct virtio_net_config, status, &v) < 0) 3478 return; 3479 3480 if (v & VIRTIO_NET_S_ANNOUNCE) { 3481 netdev_notify_peers(vi->dev); 3482 virtnet_ack_link_announce(vi); 3483 } 3484 3485 /* Ignore unknown (future) status bits */ 3486 v &= VIRTIO_NET_S_LINK_UP; 3487 3488 if (vi->status == v) 3489 return; 3490 3491 vi->status = v; 3492 3493 if (vi->status & VIRTIO_NET_S_LINK_UP) { 3494 virtnet_update_settings(vi); 3495 netif_carrier_on(vi->dev); 3496 netif_tx_wake_all_queues(vi->dev); 3497 } else { 3498 netif_carrier_off(vi->dev); 3499 netif_tx_stop_all_queues(vi->dev); 3500 } 3501 } 3502 3503 static void virtnet_config_changed(struct virtio_device *vdev) 3504 { 3505 struct virtnet_info *vi = vdev->priv; 3506 3507 schedule_work(&vi->config_work); 3508 } 3509 3510 static void virtnet_free_queues(struct virtnet_info *vi) 3511 { 3512 int i; 3513 3514 for (i = 0; i < vi->max_queue_pairs; i++) { 3515 __netif_napi_del(&vi->rq[i].napi); 3516 __netif_napi_del(&vi->sq[i].napi); 3517 } 3518 3519 /* We called __netif_napi_del(), 3520 * we need to respect an RCU grace period before freeing vi->rq 3521 */ 3522 synchronize_net(); 3523 3524 kfree(vi->rq); 3525 kfree(vi->sq); 3526 kfree(vi->ctrl); 3527 } 3528 3529 static void _free_receive_bufs(struct virtnet_info *vi) 3530 { 3531 struct bpf_prog *old_prog; 3532 int i; 3533 3534 for (i = 0; i < vi->max_queue_pairs; i++) { 3535 while (vi->rq[i].pages) 3536 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 3537 3538 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 3539 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 3540 if (old_prog) 3541 bpf_prog_put(old_prog); 3542 } 3543 } 3544 3545 static void free_receive_bufs(struct virtnet_info *vi) 3546 { 3547 rtnl_lock(); 3548 _free_receive_bufs(vi); 3549 rtnl_unlock(); 3550 } 3551 3552 static void free_receive_page_frags(struct virtnet_info *vi) 3553 { 3554 int i; 3555 for (i = 0; i < vi->max_queue_pairs; i++) 3556 if (vi->rq[i].alloc_frag.page) 3557 put_page(vi->rq[i].alloc_frag.page); 3558 } 3559 3560 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) 3561 { 3562 if (!is_xdp_frame(buf)) 3563 dev_kfree_skb(buf); 3564 else 3565 xdp_return_frame(ptr_to_xdp(buf)); 3566 } 3567 3568 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf) 3569 { 3570 struct virtnet_info *vi = vq->vdev->priv; 3571 int i = vq2rxq(vq); 3572 3573 if (vi->mergeable_rx_bufs) 3574 put_page(virt_to_head_page(buf)); 3575 else if (vi->big_packets) 3576 give_pages(&vi->rq[i], buf); 3577 else 3578 put_page(virt_to_head_page(buf)); 3579 } 3580 3581 static void free_unused_bufs(struct virtnet_info *vi) 3582 { 3583 void *buf; 3584 int i; 3585 3586 for (i = 0; i < vi->max_queue_pairs; i++) { 3587 struct virtqueue *vq = vi->sq[i].vq; 3588 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 3589 virtnet_sq_free_unused_buf(vq, buf); 3590 cond_resched(); 3591 } 3592 3593 for (i = 0; i < vi->max_queue_pairs; i++) { 3594 struct virtqueue *vq = vi->rq[i].vq; 3595 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 3596 virtnet_rq_free_unused_buf(vq, buf); 3597 cond_resched(); 3598 } 3599 } 3600 3601 static void virtnet_del_vqs(struct virtnet_info *vi) 3602 { 3603 struct virtio_device *vdev = vi->vdev; 3604 3605 virtnet_clean_affinity(vi); 3606 3607 vdev->config->del_vqs(vdev); 3608 3609 virtnet_free_queues(vi); 3610 } 3611 3612 /* How large should a single buffer be so a queue full of these can fit at 3613 * least one full packet? 3614 * Logic below assumes the mergeable buffer header is used. 3615 */ 3616 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) 3617 { 3618 const unsigned int hdr_len = vi->hdr_len; 3619 unsigned int rq_size = virtqueue_get_vring_size(vq); 3620 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; 3621 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 3622 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 3623 3624 return max(max(min_buf_len, hdr_len) - hdr_len, 3625 (unsigned int)GOOD_PACKET_LEN); 3626 } 3627 3628 static int virtnet_find_vqs(struct virtnet_info *vi) 3629 { 3630 vq_callback_t **callbacks; 3631 struct virtqueue **vqs; 3632 int ret = -ENOMEM; 3633 int i, total_vqs; 3634 const char **names; 3635 bool *ctx; 3636 3637 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 3638 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 3639 * possible control vq. 3640 */ 3641 total_vqs = vi->max_queue_pairs * 2 + 3642 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 3643 3644 /* Allocate space for find_vqs parameters */ 3645 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); 3646 if (!vqs) 3647 goto err_vq; 3648 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); 3649 if (!callbacks) 3650 goto err_callback; 3651 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); 3652 if (!names) 3653 goto err_names; 3654 if (!vi->big_packets || vi->mergeable_rx_bufs) { 3655 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); 3656 if (!ctx) 3657 goto err_ctx; 3658 } else { 3659 ctx = NULL; 3660 } 3661 3662 /* Parameters for control virtqueue, if any */ 3663 if (vi->has_cvq) { 3664 callbacks[total_vqs - 1] = NULL; 3665 names[total_vqs - 1] = "control"; 3666 } 3667 3668 /* Allocate/initialize parameters for send/receive virtqueues */ 3669 for (i = 0; i < vi->max_queue_pairs; i++) { 3670 callbacks[rxq2vq(i)] = skb_recv_done; 3671 callbacks[txq2vq(i)] = skb_xmit_done; 3672 sprintf(vi->rq[i].name, "input.%d", i); 3673 sprintf(vi->sq[i].name, "output.%d", i); 3674 names[rxq2vq(i)] = vi->rq[i].name; 3675 names[txq2vq(i)] = vi->sq[i].name; 3676 if (ctx) 3677 ctx[rxq2vq(i)] = true; 3678 } 3679 3680 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks, 3681 names, ctx, NULL); 3682 if (ret) 3683 goto err_find; 3684 3685 if (vi->has_cvq) { 3686 vi->cvq = vqs[total_vqs - 1]; 3687 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 3688 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3689 } 3690 3691 for (i = 0; i < vi->max_queue_pairs; i++) { 3692 vi->rq[i].vq = vqs[rxq2vq(i)]; 3693 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); 3694 vi->sq[i].vq = vqs[txq2vq(i)]; 3695 } 3696 3697 /* run here: ret == 0. */ 3698 3699 3700 err_find: 3701 kfree(ctx); 3702 err_ctx: 3703 kfree(names); 3704 err_names: 3705 kfree(callbacks); 3706 err_callback: 3707 kfree(vqs); 3708 err_vq: 3709 return ret; 3710 } 3711 3712 static int virtnet_alloc_queues(struct virtnet_info *vi) 3713 { 3714 int i; 3715 3716 if (vi->has_cvq) { 3717 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); 3718 if (!vi->ctrl) 3719 goto err_ctrl; 3720 } else { 3721 vi->ctrl = NULL; 3722 } 3723 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); 3724 if (!vi->sq) 3725 goto err_sq; 3726 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); 3727 if (!vi->rq) 3728 goto err_rq; 3729 3730 INIT_DELAYED_WORK(&vi->refill, refill_work); 3731 for (i = 0; i < vi->max_queue_pairs; i++) { 3732 vi->rq[i].pages = NULL; 3733 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, 3734 napi_weight); 3735 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, 3736 virtnet_poll_tx, 3737 napi_tx ? napi_weight : 0); 3738 3739 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 3740 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 3741 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 3742 3743 u64_stats_init(&vi->rq[i].stats.syncp); 3744 u64_stats_init(&vi->sq[i].stats.syncp); 3745 } 3746 3747 return 0; 3748 3749 err_rq: 3750 kfree(vi->sq); 3751 err_sq: 3752 kfree(vi->ctrl); 3753 err_ctrl: 3754 return -ENOMEM; 3755 } 3756 3757 static int init_vqs(struct virtnet_info *vi) 3758 { 3759 int ret; 3760 3761 /* Allocate send & receive queues */ 3762 ret = virtnet_alloc_queues(vi); 3763 if (ret) 3764 goto err; 3765 3766 ret = virtnet_find_vqs(vi); 3767 if (ret) 3768 goto err_free; 3769 3770 cpus_read_lock(); 3771 virtnet_set_affinity(vi); 3772 cpus_read_unlock(); 3773 3774 return 0; 3775 3776 err_free: 3777 virtnet_free_queues(vi); 3778 err: 3779 return ret; 3780 } 3781 3782 #ifdef CONFIG_SYSFS 3783 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 3784 char *buf) 3785 { 3786 struct virtnet_info *vi = netdev_priv(queue->dev); 3787 unsigned int queue_index = get_netdev_rx_queue_index(queue); 3788 unsigned int headroom = virtnet_get_headroom(vi); 3789 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 3790 struct ewma_pkt_len *avg; 3791 3792 BUG_ON(queue_index >= vi->max_queue_pairs); 3793 avg = &vi->rq[queue_index].mrg_avg_pkt_len; 3794 return sprintf(buf, "%u\n", 3795 get_mergeable_buf_len(&vi->rq[queue_index], avg, 3796 SKB_DATA_ALIGN(headroom + tailroom))); 3797 } 3798 3799 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 3800 __ATTR_RO(mergeable_rx_buffer_size); 3801 3802 static struct attribute *virtio_net_mrg_rx_attrs[] = { 3803 &mergeable_rx_buffer_size_attribute.attr, 3804 NULL 3805 }; 3806 3807 static const struct attribute_group virtio_net_mrg_rx_group = { 3808 .name = "virtio_net", 3809 .attrs = virtio_net_mrg_rx_attrs 3810 }; 3811 #endif 3812 3813 static bool virtnet_fail_on_feature(struct virtio_device *vdev, 3814 unsigned int fbit, 3815 const char *fname, const char *dname) 3816 { 3817 if (!virtio_has_feature(vdev, fbit)) 3818 return false; 3819 3820 dev_err(&vdev->dev, "device advertises feature %s but not %s", 3821 fname, dname); 3822 3823 return true; 3824 } 3825 3826 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 3827 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 3828 3829 static bool virtnet_validate_features(struct virtio_device *vdev) 3830 { 3831 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 3832 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 3833 "VIRTIO_NET_F_CTRL_VQ") || 3834 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 3835 "VIRTIO_NET_F_CTRL_VQ") || 3836 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 3837 "VIRTIO_NET_F_CTRL_VQ") || 3838 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 3839 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 3840 "VIRTIO_NET_F_CTRL_VQ") || 3841 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS, 3842 "VIRTIO_NET_F_CTRL_VQ") || 3843 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT, 3844 "VIRTIO_NET_F_CTRL_VQ") || 3845 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL, 3846 "VIRTIO_NET_F_CTRL_VQ"))) { 3847 return false; 3848 } 3849 3850 return true; 3851 } 3852 3853 #define MIN_MTU ETH_MIN_MTU 3854 #define MAX_MTU ETH_MAX_MTU 3855 3856 static int virtnet_validate(struct virtio_device *vdev) 3857 { 3858 if (!vdev->config->get) { 3859 dev_err(&vdev->dev, "%s failure: config access disabled\n", 3860 __func__); 3861 return -EINVAL; 3862 } 3863 3864 if (!virtnet_validate_features(vdev)) 3865 return -EINVAL; 3866 3867 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 3868 int mtu = virtio_cread16(vdev, 3869 offsetof(struct virtio_net_config, 3870 mtu)); 3871 if (mtu < MIN_MTU) 3872 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 3873 } 3874 3875 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) && 3876 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 3877 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby"); 3878 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY); 3879 } 3880 3881 return 0; 3882 } 3883 3884 static bool virtnet_check_guest_gso(const struct virtnet_info *vi) 3885 { 3886 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 3887 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 3888 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 3889 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 3890 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && 3891 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); 3892 } 3893 3894 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) 3895 { 3896 bool guest_gso = virtnet_check_guest_gso(vi); 3897 3898 /* If device can receive ANY guest GSO packets, regardless of mtu, 3899 * allocate packets of maximum size, otherwise limit it to only 3900 * mtu size worth only. 3901 */ 3902 if (mtu > ETH_DATA_LEN || guest_gso) { 3903 vi->big_packets = true; 3904 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); 3905 } 3906 } 3907 3908 static int virtnet_probe(struct virtio_device *vdev) 3909 { 3910 int i, err = -ENOMEM; 3911 struct net_device *dev; 3912 struct virtnet_info *vi; 3913 u16 max_queue_pairs; 3914 int mtu = 0; 3915 3916 /* Find if host supports multiqueue/rss virtio_net device */ 3917 max_queue_pairs = 1; 3918 if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 3919 max_queue_pairs = 3920 virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); 3921 3922 /* We need at least 2 queue's */ 3923 if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 3924 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 3925 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 3926 max_queue_pairs = 1; 3927 3928 /* Allocate ourselves a network device with room for our info */ 3929 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 3930 if (!dev) 3931 return -ENOMEM; 3932 3933 /* Set up network device as normal. */ 3934 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | 3935 IFF_TX_SKB_NO_LINEAR; 3936 dev->netdev_ops = &virtnet_netdev; 3937 dev->features = NETIF_F_HIGHDMA; 3938 3939 dev->ethtool_ops = &virtnet_ethtool_ops; 3940 SET_NETDEV_DEV(dev, &vdev->dev); 3941 3942 /* Do we support "hardware" checksums? */ 3943 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 3944 /* This opens up the world of extra features. */ 3945 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 3946 if (csum) 3947 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 3948 3949 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 3950 dev->hw_features |= NETIF_F_TSO 3951 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 3952 } 3953 /* Individual feature bits: what can host handle? */ 3954 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 3955 dev->hw_features |= NETIF_F_TSO; 3956 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 3957 dev->hw_features |= NETIF_F_TSO6; 3958 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 3959 dev->hw_features |= NETIF_F_TSO_ECN; 3960 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO)) 3961 dev->hw_features |= NETIF_F_GSO_UDP_L4; 3962 3963 dev->features |= NETIF_F_GSO_ROBUST; 3964 3965 if (gso) 3966 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 3967 /* (!csum && gso) case will be fixed by register_netdev() */ 3968 } 3969 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 3970 dev->features |= NETIF_F_RXCSUM; 3971 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 3972 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) 3973 dev->features |= NETIF_F_GRO_HW; 3974 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) 3975 dev->hw_features |= NETIF_F_GRO_HW; 3976 3977 dev->vlan_features = dev->features; 3978 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; 3979 3980 /* MTU range: 68 - 65535 */ 3981 dev->min_mtu = MIN_MTU; 3982 dev->max_mtu = MAX_MTU; 3983 3984 /* Configuration may specify what MAC to use. Otherwise random. */ 3985 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 3986 u8 addr[ETH_ALEN]; 3987 3988 virtio_cread_bytes(vdev, 3989 offsetof(struct virtio_net_config, mac), 3990 addr, ETH_ALEN); 3991 eth_hw_addr_set(dev, addr); 3992 } else { 3993 eth_hw_addr_random(dev); 3994 dev_info(&vdev->dev, "Assigned random MAC address %pM\n", 3995 dev->dev_addr); 3996 } 3997 3998 /* Set up our device-specific information */ 3999 vi = netdev_priv(dev); 4000 vi->dev = dev; 4001 vi->vdev = vdev; 4002 vdev->priv = vi; 4003 4004 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 4005 spin_lock_init(&vi->refill_lock); 4006 4007 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { 4008 vi->mergeable_rx_bufs = true; 4009 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; 4010 } 4011 4012 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { 4013 vi->rx_usecs = 0; 4014 vi->tx_usecs = 0; 4015 vi->tx_max_packets = 0; 4016 vi->rx_max_packets = 0; 4017 } 4018 4019 if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) 4020 vi->has_rss_hash_report = true; 4021 4022 if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 4023 vi->has_rss = true; 4024 4025 if (vi->has_rss || vi->has_rss_hash_report) { 4026 vi->rss_indir_table_size = 4027 virtio_cread16(vdev, offsetof(struct virtio_net_config, 4028 rss_max_indirection_table_length)); 4029 vi->rss_key_size = 4030 virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size)); 4031 4032 vi->rss_hash_types_supported = 4033 virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types)); 4034 vi->rss_hash_types_supported &= 4035 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX | 4036 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | 4037 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX); 4038 4039 dev->hw_features |= NETIF_F_RXHASH; 4040 } 4041 4042 if (vi->has_rss_hash_report) 4043 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); 4044 else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 4045 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 4046 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 4047 else 4048 vi->hdr_len = sizeof(struct virtio_net_hdr); 4049 4050 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 4051 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 4052 vi->any_header_sg = true; 4053 4054 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 4055 vi->has_cvq = true; 4056 4057 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 4058 mtu = virtio_cread16(vdev, 4059 offsetof(struct virtio_net_config, 4060 mtu)); 4061 if (mtu < dev->min_mtu) { 4062 /* Should never trigger: MTU was previously validated 4063 * in virtnet_validate. 4064 */ 4065 dev_err(&vdev->dev, 4066 "device MTU appears to have changed it is now %d < %d", 4067 mtu, dev->min_mtu); 4068 err = -EINVAL; 4069 goto free; 4070 } 4071 4072 dev->mtu = mtu; 4073 dev->max_mtu = mtu; 4074 } 4075 4076 virtnet_set_big_packets(vi, mtu); 4077 4078 if (vi->any_header_sg) 4079 dev->needed_headroom = vi->hdr_len; 4080 4081 /* Enable multiqueue by default */ 4082 if (num_online_cpus() >= max_queue_pairs) 4083 vi->curr_queue_pairs = max_queue_pairs; 4084 else 4085 vi->curr_queue_pairs = num_online_cpus(); 4086 vi->max_queue_pairs = max_queue_pairs; 4087 4088 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 4089 err = init_vqs(vi); 4090 if (err) 4091 goto free; 4092 4093 #ifdef CONFIG_SYSFS 4094 if (vi->mergeable_rx_bufs) 4095 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 4096 #endif 4097 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 4098 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 4099 4100 virtnet_init_settings(dev); 4101 4102 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { 4103 vi->failover = net_failover_create(vi->dev); 4104 if (IS_ERR(vi->failover)) { 4105 err = PTR_ERR(vi->failover); 4106 goto free_vqs; 4107 } 4108 } 4109 4110 if (vi->has_rss || vi->has_rss_hash_report) 4111 virtnet_init_default_rss(vi); 4112 4113 /* serialize netdev register + virtio_device_ready() with ndo_open() */ 4114 rtnl_lock(); 4115 4116 err = register_netdevice(dev); 4117 if (err) { 4118 pr_debug("virtio_net: registering device failed\n"); 4119 rtnl_unlock(); 4120 goto free_failover; 4121 } 4122 4123 virtio_device_ready(vdev); 4124 4125 /* a random MAC address has been assigned, notify the device. 4126 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there 4127 * because many devices work fine without getting MAC explicitly 4128 */ 4129 if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 4130 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 4131 struct scatterlist sg; 4132 4133 sg_init_one(&sg, dev->dev_addr, dev->addr_len); 4134 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 4135 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 4136 pr_debug("virtio_net: setting MAC address failed\n"); 4137 rtnl_unlock(); 4138 err = -EINVAL; 4139 goto free_unregister_netdev; 4140 } 4141 } 4142 4143 rtnl_unlock(); 4144 4145 err = virtnet_cpu_notif_add(vi); 4146 if (err) { 4147 pr_debug("virtio_net: registering cpu notifier failed\n"); 4148 goto free_unregister_netdev; 4149 } 4150 4151 virtnet_set_queues(vi, vi->curr_queue_pairs); 4152 4153 /* Assume link up if device can't report link status, 4154 otherwise get link status from config. */ 4155 netif_carrier_off(dev); 4156 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 4157 schedule_work(&vi->config_work); 4158 } else { 4159 vi->status = VIRTIO_NET_S_LINK_UP; 4160 virtnet_update_settings(vi); 4161 netif_carrier_on(dev); 4162 } 4163 4164 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) 4165 if (virtio_has_feature(vi->vdev, guest_offloads[i])) 4166 set_bit(guest_offloads[i], &vi->guest_offloads); 4167 vi->guest_offloads_capable = vi->guest_offloads; 4168 4169 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 4170 dev->name, max_queue_pairs); 4171 4172 return 0; 4173 4174 free_unregister_netdev: 4175 unregister_netdev(dev); 4176 free_failover: 4177 net_failover_destroy(vi->failover); 4178 free_vqs: 4179 virtio_reset_device(vdev); 4180 cancel_delayed_work_sync(&vi->refill); 4181 free_receive_page_frags(vi); 4182 virtnet_del_vqs(vi); 4183 free: 4184 free_netdev(dev); 4185 return err; 4186 } 4187 4188 static void remove_vq_common(struct virtnet_info *vi) 4189 { 4190 virtio_reset_device(vi->vdev); 4191 4192 /* Free unused buffers in both send and recv, if any. */ 4193 free_unused_bufs(vi); 4194 4195 free_receive_bufs(vi); 4196 4197 free_receive_page_frags(vi); 4198 4199 virtnet_del_vqs(vi); 4200 } 4201 4202 static void virtnet_remove(struct virtio_device *vdev) 4203 { 4204 struct virtnet_info *vi = vdev->priv; 4205 4206 virtnet_cpu_notif_remove(vi); 4207 4208 /* Make sure no work handler is accessing the device. */ 4209 flush_work(&vi->config_work); 4210 4211 unregister_netdev(vi->dev); 4212 4213 net_failover_destroy(vi->failover); 4214 4215 remove_vq_common(vi); 4216 4217 free_netdev(vi->dev); 4218 } 4219 4220 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) 4221 { 4222 struct virtnet_info *vi = vdev->priv; 4223 4224 virtnet_cpu_notif_remove(vi); 4225 virtnet_freeze_down(vdev); 4226 remove_vq_common(vi); 4227 4228 return 0; 4229 } 4230 4231 static __maybe_unused int virtnet_restore(struct virtio_device *vdev) 4232 { 4233 struct virtnet_info *vi = vdev->priv; 4234 int err; 4235 4236 err = virtnet_restore_up(vdev); 4237 if (err) 4238 return err; 4239 virtnet_set_queues(vi, vi->curr_queue_pairs); 4240 4241 err = virtnet_cpu_notif_add(vi); 4242 if (err) { 4243 virtnet_freeze_down(vdev); 4244 remove_vq_common(vi); 4245 return err; 4246 } 4247 4248 return 0; 4249 } 4250 4251 static struct virtio_device_id id_table[] = { 4252 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 4253 { 0 }, 4254 }; 4255 4256 #define VIRTNET_FEATURES \ 4257 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 4258 VIRTIO_NET_F_MAC, \ 4259 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 4260 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 4261 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 4262 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \ 4263 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 4264 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 4265 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 4266 VIRTIO_NET_F_CTRL_MAC_ADDR, \ 4267 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ 4268 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ 4269 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \ 4270 VIRTIO_NET_F_GUEST_HDRLEN 4271 4272 static unsigned int features[] = { 4273 VIRTNET_FEATURES, 4274 }; 4275 4276 static unsigned int features_legacy[] = { 4277 VIRTNET_FEATURES, 4278 VIRTIO_NET_F_GSO, 4279 VIRTIO_F_ANY_LAYOUT, 4280 }; 4281 4282 static struct virtio_driver virtio_net_driver = { 4283 .feature_table = features, 4284 .feature_table_size = ARRAY_SIZE(features), 4285 .feature_table_legacy = features_legacy, 4286 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 4287 .driver.name = KBUILD_MODNAME, 4288 .driver.owner = THIS_MODULE, 4289 .id_table = id_table, 4290 .validate = virtnet_validate, 4291 .probe = virtnet_probe, 4292 .remove = virtnet_remove, 4293 .config_changed = virtnet_config_changed, 4294 #ifdef CONFIG_PM_SLEEP 4295 .freeze = virtnet_freeze, 4296 .restore = virtnet_restore, 4297 #endif 4298 }; 4299 4300 static __init int virtio_net_driver_init(void) 4301 { 4302 int ret; 4303 4304 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 4305 virtnet_cpu_online, 4306 virtnet_cpu_down_prep); 4307 if (ret < 0) 4308 goto out; 4309 virtionet_online = ret; 4310 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 4311 NULL, virtnet_cpu_dead); 4312 if (ret) 4313 goto err_dead; 4314 ret = register_virtio_driver(&virtio_net_driver); 4315 if (ret) 4316 goto err_virtio; 4317 return 0; 4318 err_virtio: 4319 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 4320 err_dead: 4321 cpuhp_remove_multi_state(virtionet_online); 4322 out: 4323 return ret; 4324 } 4325 module_init(virtio_net_driver_init); 4326 4327 static __exit void virtio_net_driver_exit(void) 4328 { 4329 unregister_virtio_driver(&virtio_net_driver); 4330 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 4331 cpuhp_remove_multi_state(virtionet_online); 4332 } 4333 module_exit(virtio_net_driver_exit); 4334 4335 MODULE_DEVICE_TABLE(virtio, id_table); 4336 MODULE_DESCRIPTION("Virtio network driver"); 4337 MODULE_LICENSE("GPL"); 4338