11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 248925e37SRusty Russell /* A network driver using virtio. 3296f96fcSRusty Russell * 4296f96fcSRusty Russell * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 5296f96fcSRusty Russell */ 6296f96fcSRusty Russell //#define DEBUG 7296f96fcSRusty Russell #include <linux/netdevice.h> 8296f96fcSRusty Russell #include <linux/etherdevice.h> 9a9ea3fc6SHerbert Xu #include <linux/ethtool.h> 10296f96fcSRusty Russell #include <linux/module.h> 11296f96fcSRusty Russell #include <linux/virtio.h> 12296f96fcSRusty Russell #include <linux/virtio_net.h> 13f600b690SJohn Fastabend #include <linux/bpf.h> 14a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h> 15296f96fcSRusty Russell #include <linux/scatterlist.h> 16e918085aSAlex Williamson #include <linux/if_vlan.h> 175a0e3ad6STejun Heo #include <linux/slab.h> 188de4b2f3SWanlong Gao #include <linux/cpu.h> 19ab7db917SMichael Dalton #include <linux/average.h> 20186b3c99SJason Wang #include <linux/filter.h> 212ca653d6SCaleb Raitto #include <linux/kernel.h> 22d85b758fSMichael S. Tsirkin #include <net/route.h> 23754b8a21SJesper Dangaard Brouer #include <net/xdp.h> 24ba5e4426SSridhar Samudrala #include <net/net_failover.h> 25296f96fcSRusty Russell 26d34710e3SAmerigo Wang static int napi_weight = NAPI_POLL_WEIGHT; 276c0cd7c0SDor Laor module_param(napi_weight, int, 0444); 286c0cd7c0SDor Laor 2931c03aefSWillem de Bruijn static bool csum = true, gso = true, napi_tx = true; 3034a48579SRusty Russell module_param(csum, bool, 0444); 3134a48579SRusty Russell module_param(gso, bool, 0444); 32b92f1e67SWillem de Bruijn module_param(napi_tx, bool, 0644); 3334a48579SRusty Russell 34296f96fcSRusty Russell /* FIXME: MTU in config. */ 355061de36SMichael Dalton #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 363f2c31d9SMark McLoughlin #define GOOD_COPY_LEN 128 37296f96fcSRusty Russell 38f6b10209SJason Wang #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 39f6b10209SJason Wang 402de2f7f4SJohn Fastabend /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ 412de2f7f4SJohn Fastabend #define VIRTIO_XDP_HEADROOM 256 422de2f7f4SJohn Fastabend 432471c75eSJesper Dangaard Brouer /* Separating two types of XDP xmit */ 442471c75eSJesper Dangaard Brouer #define VIRTIO_XDP_TX BIT(0) 452471c75eSJesper Dangaard Brouer #define VIRTIO_XDP_REDIR BIT(1) 462471c75eSJesper Dangaard Brouer 475050471dSToshiaki Makita #define VIRTIO_XDP_FLAG BIT(0) 485050471dSToshiaki Makita 495377d758SJohannes Berg /* RX packet size EWMA. The average packet size is used to determine the packet 505377d758SJohannes Berg * buffer size when refilling RX rings. As the entire RX ring may be refilled 515377d758SJohannes Berg * at once, the weight is chosen so that the EWMA will be insensitive to short- 525377d758SJohannes Berg * term, transient changes in packet size. 53ab7db917SMichael Dalton */ 54eb1e011aSJohannes Berg DECLARE_EWMA(pkt_len, 0, 64) 55ab7db917SMichael Dalton 5666846048SRick Jones #define VIRTNET_DRIVER_VERSION "1.0.0" 572a41f71dSAlex Williamson 587acd4329SColin Ian King static const unsigned long guest_offloads[] = { 597acd4329SColin Ian King VIRTIO_NET_F_GUEST_TSO4, 603f93522fSJason Wang VIRTIO_NET_F_GUEST_TSO6, 613f93522fSJason Wang VIRTIO_NET_F_GUEST_ECN, 62e59ff2c4SJason Wang VIRTIO_NET_F_GUEST_UFO, 63e59ff2c4SJason Wang VIRTIO_NET_F_GUEST_CSUM 647acd4329SColin Ian King }; 653f93522fSJason Wang 66dbcf24d1SJason Wang #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ 671a03b8a3STonghao Zhang (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ 681a03b8a3STonghao Zhang (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ 691a03b8a3STonghao Zhang (1ULL << VIRTIO_NET_F_GUEST_UFO)) 701a03b8a3STonghao Zhang 71d7dfc5cfSToshiaki Makita struct virtnet_stat_desc { 72d7dfc5cfSToshiaki Makita char desc[ETH_GSTRING_LEN]; 73d7dfc5cfSToshiaki Makita size_t offset; 743fa2a1dfSstephen hemminger }; 753fa2a1dfSstephen hemminger 76d7dfc5cfSToshiaki Makita struct virtnet_sq_stats { 77d7dfc5cfSToshiaki Makita struct u64_stats_sync syncp; 78d7dfc5cfSToshiaki Makita u64 packets; 79d7dfc5cfSToshiaki Makita u64 bytes; 805b8f3c8dSToshiaki Makita u64 xdp_tx; 815b8f3c8dSToshiaki Makita u64 xdp_tx_drops; 82461f03dcSToshiaki Makita u64 kicks; 83a520794bSTony Lu u64 tx_timeouts; 84d7dfc5cfSToshiaki Makita }; 85d7dfc5cfSToshiaki Makita 86d46eeeafSJason Wang struct virtnet_rq_stats { 87d46eeeafSJason Wang struct u64_stats_sync syncp; 88d7dfc5cfSToshiaki Makita u64 packets; 89d7dfc5cfSToshiaki Makita u64 bytes; 902c4a2f7dSToshiaki Makita u64 drops; 915b8f3c8dSToshiaki Makita u64 xdp_packets; 925b8f3c8dSToshiaki Makita u64 xdp_tx; 935b8f3c8dSToshiaki Makita u64 xdp_redirects; 945b8f3c8dSToshiaki Makita u64 xdp_drops; 95461f03dcSToshiaki Makita u64 kicks; 96d7dfc5cfSToshiaki Makita }; 97d7dfc5cfSToshiaki Makita 98d7dfc5cfSToshiaki Makita #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) 99d46eeeafSJason Wang #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) 100d7dfc5cfSToshiaki Makita 101d7dfc5cfSToshiaki Makita static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { 102d7dfc5cfSToshiaki Makita { "packets", VIRTNET_SQ_STAT(packets) }, 103d7dfc5cfSToshiaki Makita { "bytes", VIRTNET_SQ_STAT(bytes) }, 1045b8f3c8dSToshiaki Makita { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, 1055b8f3c8dSToshiaki Makita { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, 106461f03dcSToshiaki Makita { "kicks", VIRTNET_SQ_STAT(kicks) }, 107a520794bSTony Lu { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, 108d7dfc5cfSToshiaki Makita }; 109d7dfc5cfSToshiaki Makita 110d7dfc5cfSToshiaki Makita static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { 111d7dfc5cfSToshiaki Makita { "packets", VIRTNET_RQ_STAT(packets) }, 112d7dfc5cfSToshiaki Makita { "bytes", VIRTNET_RQ_STAT(bytes) }, 1132c4a2f7dSToshiaki Makita { "drops", VIRTNET_RQ_STAT(drops) }, 1145b8f3c8dSToshiaki Makita { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, 1155b8f3c8dSToshiaki Makita { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, 1165b8f3c8dSToshiaki Makita { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, 1175b8f3c8dSToshiaki Makita { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, 118461f03dcSToshiaki Makita { "kicks", VIRTNET_RQ_STAT(kicks) }, 119d7dfc5cfSToshiaki Makita }; 120d7dfc5cfSToshiaki Makita 121d7dfc5cfSToshiaki Makita #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) 122d7dfc5cfSToshiaki Makita #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) 123d7dfc5cfSToshiaki Makita 124e9d7417bSJason Wang /* Internal representation of a send virtqueue */ 125e9d7417bSJason Wang struct send_queue { 126e9d7417bSJason Wang /* Virtqueue associated with this send _queue */ 127e9d7417bSJason Wang struct virtqueue *vq; 128e9d7417bSJason Wang 129e9d7417bSJason Wang /* TX: fragments + linear part + virtio header */ 130e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 131986a4f4dSJason Wang 132986a4f4dSJason Wang /* Name of the send queue: output.$index */ 133986a4f4dSJason Wang char name[40]; 134b92f1e67SWillem de Bruijn 135d7dfc5cfSToshiaki Makita struct virtnet_sq_stats stats; 136d7dfc5cfSToshiaki Makita 137b92f1e67SWillem de Bruijn struct napi_struct napi; 138e9d7417bSJason Wang }; 139e9d7417bSJason Wang 140e9d7417bSJason Wang /* Internal representation of a receive virtqueue */ 141e9d7417bSJason Wang struct receive_queue { 142e9d7417bSJason Wang /* Virtqueue associated with this receive_queue */ 143e9d7417bSJason Wang struct virtqueue *vq; 144e9d7417bSJason Wang 145296f96fcSRusty Russell struct napi_struct napi; 146296f96fcSRusty Russell 147f600b690SJohn Fastabend struct bpf_prog __rcu *xdp_prog; 148f600b690SJohn Fastabend 149d7dfc5cfSToshiaki Makita struct virtnet_rq_stats stats; 150d7dfc5cfSToshiaki Makita 151e9d7417bSJason Wang /* Chain pages by the private ptr. */ 152e9d7417bSJason Wang struct page *pages; 153e9d7417bSJason Wang 154ab7db917SMichael Dalton /* Average packet length for mergeable receive buffers. */ 1555377d758SJohannes Berg struct ewma_pkt_len mrg_avg_pkt_len; 156ab7db917SMichael Dalton 157fb51879dSMichael Dalton /* Page frag for packet buffer allocation. */ 158fb51879dSMichael Dalton struct page_frag alloc_frag; 159fb51879dSMichael Dalton 160e9d7417bSJason Wang /* RX: fragments + linear part + virtio header */ 161e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 162986a4f4dSJason Wang 163d85b758fSMichael S. Tsirkin /* Min single buffer size for mergeable buffers case. */ 164d85b758fSMichael S. Tsirkin unsigned int min_buf_len; 165d85b758fSMichael S. Tsirkin 166986a4f4dSJason Wang /* Name of this receive queue: input.$index */ 167986a4f4dSJason Wang char name[40]; 168754b8a21SJesper Dangaard Brouer 169754b8a21SJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq; 170e9d7417bSJason Wang }; 171e9d7417bSJason Wang 172c7114b12SAndrew Melnychenko /* This structure can contain rss message with maximum settings for indirection table and keysize 173c7114b12SAndrew Melnychenko * Note, that default structure that describes RSS configuration virtio_net_rss_config 174c7114b12SAndrew Melnychenko * contains same info but can't handle table values. 175c7114b12SAndrew Melnychenko * In any case, structure would be passed to virtio hw through sg_buf split by parts 176c7114b12SAndrew Melnychenko * because table sizes may be differ according to the device configuration. 177c7114b12SAndrew Melnychenko */ 178c7114b12SAndrew Melnychenko #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 179c7114b12SAndrew Melnychenko #define VIRTIO_NET_RSS_MAX_TABLE_LEN 128 180c7114b12SAndrew Melnychenko struct virtio_net_ctrl_rss { 181c7114b12SAndrew Melnychenko u32 hash_types; 182c7114b12SAndrew Melnychenko u16 indirection_table_mask; 183c7114b12SAndrew Melnychenko u16 unclassified_queue; 184c7114b12SAndrew Melnychenko u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN]; 185c7114b12SAndrew Melnychenko u16 max_tx_vq; 186c7114b12SAndrew Melnychenko u8 hash_key_length; 187c7114b12SAndrew Melnychenko u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE]; 188c7114b12SAndrew Melnychenko }; 189c7114b12SAndrew Melnychenko 19012e57169SMichael S. Tsirkin /* Control VQ buffers: protected by the rtnl lock */ 19112e57169SMichael S. Tsirkin struct control_buf { 19212e57169SMichael S. Tsirkin struct virtio_net_ctrl_hdr hdr; 19312e57169SMichael S. Tsirkin virtio_net_ctrl_ack status; 19412e57169SMichael S. Tsirkin struct virtio_net_ctrl_mq mq; 19512e57169SMichael S. Tsirkin u8 promisc; 19612e57169SMichael S. Tsirkin u8 allmulti; 197d7fad4c8SMichael S. Tsirkin __virtio16 vid; 198f4ee703aSMichael S. Tsirkin __virtio64 offloads; 199c7114b12SAndrew Melnychenko struct virtio_net_ctrl_rss rss; 20012e57169SMichael S. Tsirkin }; 20112e57169SMichael S. Tsirkin 202e9d7417bSJason Wang struct virtnet_info { 203e9d7417bSJason Wang struct virtio_device *vdev; 204e9d7417bSJason Wang struct virtqueue *cvq; 205e9d7417bSJason Wang struct net_device *dev; 206986a4f4dSJason Wang struct send_queue *sq; 207986a4f4dSJason Wang struct receive_queue *rq; 208e9d7417bSJason Wang unsigned int status; 209e9d7417bSJason Wang 210986a4f4dSJason Wang /* Max # of queue pairs supported by the device */ 211986a4f4dSJason Wang u16 max_queue_pairs; 212986a4f4dSJason Wang 213986a4f4dSJason Wang /* # of queue pairs currently used by the driver */ 214986a4f4dSJason Wang u16 curr_queue_pairs; 215986a4f4dSJason Wang 216672aafd5SJohn Fastabend /* # of XDP queue pairs currently used by the driver */ 217672aafd5SJohn Fastabend u16 xdp_queue_pairs; 218672aafd5SJohn Fastabend 21997c2c69eSXuan Zhuo /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ 22097c2c69eSXuan Zhuo bool xdp_enabled; 22197c2c69eSXuan Zhuo 22297402b96SHerbert Xu /* I like... big packets and I cannot lie! */ 22397402b96SHerbert Xu bool big_packets; 22497402b96SHerbert Xu 2253f2c31d9SMark McLoughlin /* Host will merge rx buffers for big packets (shake it! shake it!) */ 2263f2c31d9SMark McLoughlin bool mergeable_rx_bufs; 2273f2c31d9SMark McLoughlin 228c7114b12SAndrew Melnychenko /* Host supports rss and/or hash report */ 229c7114b12SAndrew Melnychenko bool has_rss; 23091f41f01SAndrew Melnychenko bool has_rss_hash_report; 231c7114b12SAndrew Melnychenko u8 rss_key_size; 232c7114b12SAndrew Melnychenko u16 rss_indir_table_size; 233c7114b12SAndrew Melnychenko u32 rss_hash_types_supported; 234c1170820SAndrew Melnychenko u32 rss_hash_types_saved; 235c7114b12SAndrew Melnychenko 236986a4f4dSJason Wang /* Has control virtqueue */ 237986a4f4dSJason Wang bool has_cvq; 238986a4f4dSJason Wang 239e7428e95SMichael S. Tsirkin /* Host can handle any s/g split between our header and packet data */ 240e7428e95SMichael S. Tsirkin bool any_header_sg; 241e7428e95SMichael S. Tsirkin 242012873d0SMichael S. Tsirkin /* Packet virtio header size */ 243012873d0SMichael S. Tsirkin u8 hdr_len; 244012873d0SMichael S. Tsirkin 2455a159128SJason Wang /* Work struct for delayed refilling if we run low on memory. */ 2463161e453SRusty Russell struct delayed_work refill; 2473161e453SRusty Russell 2485a159128SJason Wang /* Is delayed refill enabled? */ 2495a159128SJason Wang bool refill_enabled; 2505a159128SJason Wang 2515a159128SJason Wang /* The lock to synchronize the access to refill_enabled */ 2525a159128SJason Wang spinlock_t refill_lock; 2535a159128SJason Wang 254586d17c5SJason Wang /* Work struct for config space updates */ 255586d17c5SJason Wang struct work_struct config_work; 256586d17c5SJason Wang 257986a4f4dSJason Wang /* Does the affinity hint is set for virtqueues? */ 258986a4f4dSJason Wang bool affinity_hint_set; 25947be2479SWanlong Gao 2608017c279SSebastian Andrzej Siewior /* CPU hotplug instances for online & dead */ 2618017c279SSebastian Andrzej Siewior struct hlist_node node; 2628017c279SSebastian Andrzej Siewior struct hlist_node node_dead; 2632ac46030SMichael S. Tsirkin 26412e57169SMichael S. Tsirkin struct control_buf *ctrl; 26516032be5SNikolay Aleksandrov 26616032be5SNikolay Aleksandrov /* Ethtool settings */ 26716032be5SNikolay Aleksandrov u8 duplex; 26816032be5SNikolay Aleksandrov u32 speed; 2693f93522fSJason Wang 2703f93522fSJason Wang unsigned long guest_offloads; 271a02e8964SWillem de Bruijn unsigned long guest_offloads_capable; 272ba5e4426SSridhar Samudrala 273ba5e4426SSridhar Samudrala /* failover when STANDBY feature enabled */ 274ba5e4426SSridhar Samudrala struct failover *failover; 275296f96fcSRusty Russell }; 276296f96fcSRusty Russell 2779ab86bbcSShirley Ma struct padded_vnet_hdr { 278c1ddc42dSAndrew Melnychenko struct virtio_net_hdr_v1_hash hdr; 2799ab86bbcSShirley Ma /* 280012873d0SMichael S. Tsirkin * hdr is in a separate sg buffer, and data sg buffer shares same page 281012873d0SMichael S. Tsirkin * with this header sg. This padding makes next sg 16 byte aligned 282012873d0SMichael S. Tsirkin * after the header. 2839ab86bbcSShirley Ma */ 284c1ddc42dSAndrew Melnychenko char padding[12]; 2859ab86bbcSShirley Ma }; 2869ab86bbcSShirley Ma 2875050471dSToshiaki Makita static bool is_xdp_frame(void *ptr) 2885050471dSToshiaki Makita { 2895050471dSToshiaki Makita return (unsigned long)ptr & VIRTIO_XDP_FLAG; 2905050471dSToshiaki Makita } 2915050471dSToshiaki Makita 2925050471dSToshiaki Makita static void *xdp_to_ptr(struct xdp_frame *ptr) 2935050471dSToshiaki Makita { 2945050471dSToshiaki Makita return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); 2955050471dSToshiaki Makita } 2965050471dSToshiaki Makita 2975050471dSToshiaki Makita static struct xdp_frame *ptr_to_xdp(void *ptr) 2985050471dSToshiaki Makita { 2995050471dSToshiaki Makita return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); 3005050471dSToshiaki Makita } 3015050471dSToshiaki Makita 302986a4f4dSJason Wang /* Converting between virtqueue no. and kernel tx/rx queue no. 303986a4f4dSJason Wang * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 304986a4f4dSJason Wang */ 305986a4f4dSJason Wang static int vq2txq(struct virtqueue *vq) 306986a4f4dSJason Wang { 3079d0ca6edSRusty Russell return (vq->index - 1) / 2; 308986a4f4dSJason Wang } 309986a4f4dSJason Wang 310986a4f4dSJason Wang static int txq2vq(int txq) 311986a4f4dSJason Wang { 312986a4f4dSJason Wang return txq * 2 + 1; 313986a4f4dSJason Wang } 314986a4f4dSJason Wang 315986a4f4dSJason Wang static int vq2rxq(struct virtqueue *vq) 316986a4f4dSJason Wang { 3179d0ca6edSRusty Russell return vq->index / 2; 318986a4f4dSJason Wang } 319986a4f4dSJason Wang 320986a4f4dSJason Wang static int rxq2vq(int rxq) 321986a4f4dSJason Wang { 322986a4f4dSJason Wang return rxq * 2; 323986a4f4dSJason Wang } 324986a4f4dSJason Wang 325012873d0SMichael S. Tsirkin static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) 326296f96fcSRusty Russell { 327012873d0SMichael S. Tsirkin return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; 328296f96fcSRusty Russell } 329296f96fcSRusty Russell 3309ab86bbcSShirley Ma /* 3319ab86bbcSShirley Ma * private is used to chain pages for big packets, put the whole 3329ab86bbcSShirley Ma * most recent used list in the beginning for reuse 3339ab86bbcSShirley Ma */ 334e9d7417bSJason Wang static void give_pages(struct receive_queue *rq, struct page *page) 335fb6813f4SRusty Russell { 3369ab86bbcSShirley Ma struct page *end; 3379ab86bbcSShirley Ma 338e9d7417bSJason Wang /* Find end of list, sew whole thing into vi->rq.pages. */ 3399ab86bbcSShirley Ma for (end = page; end->private; end = (struct page *)end->private); 340e9d7417bSJason Wang end->private = (unsigned long)rq->pages; 341e9d7417bSJason Wang rq->pages = page; 342fb6813f4SRusty Russell } 343fb6813f4SRusty Russell 344e9d7417bSJason Wang static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 345fb6813f4SRusty Russell { 346e9d7417bSJason Wang struct page *p = rq->pages; 347fb6813f4SRusty Russell 3489ab86bbcSShirley Ma if (p) { 349e9d7417bSJason Wang rq->pages = (struct page *)p->private; 3509ab86bbcSShirley Ma /* clear private here, it is used to chain pages */ 3519ab86bbcSShirley Ma p->private = 0; 3529ab86bbcSShirley Ma } else 353fb6813f4SRusty Russell p = alloc_page(gfp_mask); 354fb6813f4SRusty Russell return p; 355fb6813f4SRusty Russell } 356fb6813f4SRusty Russell 3575a159128SJason Wang static void enable_delayed_refill(struct virtnet_info *vi) 3585a159128SJason Wang { 3595a159128SJason Wang spin_lock_bh(&vi->refill_lock); 3605a159128SJason Wang vi->refill_enabled = true; 3615a159128SJason Wang spin_unlock_bh(&vi->refill_lock); 3625a159128SJason Wang } 3635a159128SJason Wang 3645a159128SJason Wang static void disable_delayed_refill(struct virtnet_info *vi) 3655a159128SJason Wang { 3665a159128SJason Wang spin_lock_bh(&vi->refill_lock); 3675a159128SJason Wang vi->refill_enabled = false; 3685a159128SJason Wang spin_unlock_bh(&vi->refill_lock); 3695a159128SJason Wang } 3705a159128SJason Wang 371e4e8452aSWillem de Bruijn static void virtqueue_napi_schedule(struct napi_struct *napi, 372e4e8452aSWillem de Bruijn struct virtqueue *vq) 373e4e8452aSWillem de Bruijn { 374e4e8452aSWillem de Bruijn if (napi_schedule_prep(napi)) { 375e4e8452aSWillem de Bruijn virtqueue_disable_cb(vq); 376e4e8452aSWillem de Bruijn __napi_schedule(napi); 377e4e8452aSWillem de Bruijn } 378e4e8452aSWillem de Bruijn } 379e4e8452aSWillem de Bruijn 380e4e8452aSWillem de Bruijn static void virtqueue_napi_complete(struct napi_struct *napi, 381e4e8452aSWillem de Bruijn struct virtqueue *vq, int processed) 382e4e8452aSWillem de Bruijn { 383e4e8452aSWillem de Bruijn int opaque; 384e4e8452aSWillem de Bruijn 385e4e8452aSWillem de Bruijn opaque = virtqueue_enable_cb_prepare(vq); 386fdaa767aSToshiaki Makita if (napi_complete_done(napi, processed)) { 387fdaa767aSToshiaki Makita if (unlikely(virtqueue_poll(vq, opaque))) 388e4e8452aSWillem de Bruijn virtqueue_napi_schedule(napi, vq); 389fdaa767aSToshiaki Makita } else { 390fdaa767aSToshiaki Makita virtqueue_disable_cb(vq); 391fdaa767aSToshiaki Makita } 392e4e8452aSWillem de Bruijn } 393e4e8452aSWillem de Bruijn 394e9d7417bSJason Wang static void skb_xmit_done(struct virtqueue *vq) 395296f96fcSRusty Russell { 396e9d7417bSJason Wang struct virtnet_info *vi = vq->vdev->priv; 397b92f1e67SWillem de Bruijn struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; 398296f96fcSRusty Russell 3992cb9c6baSRusty Russell /* Suppress further interrupts. */ 400e9d7417bSJason Wang virtqueue_disable_cb(vq); 40111a3a154SRusty Russell 402b92f1e67SWillem de Bruijn if (napi->weight) 403b92f1e67SWillem de Bruijn virtqueue_napi_schedule(napi, vq); 404b92f1e67SWillem de Bruijn else 405363f1514SRusty Russell /* We were probably waiting for more output buffers. */ 406986a4f4dSJason Wang netif_wake_subqueue(vi->dev, vq2txq(vq)); 407296f96fcSRusty Russell } 408296f96fcSRusty Russell 40928b39bc7SJason Wang #define MRG_CTX_HEADER_SHIFT 22 41028b39bc7SJason Wang static void *mergeable_len_to_ctx(unsigned int truesize, 41128b39bc7SJason Wang unsigned int headroom) 41228b39bc7SJason Wang { 41328b39bc7SJason Wang return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); 41428b39bc7SJason Wang } 41528b39bc7SJason Wang 41628b39bc7SJason Wang static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) 41728b39bc7SJason Wang { 41828b39bc7SJason Wang return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; 41928b39bc7SJason Wang } 42028b39bc7SJason Wang 42128b39bc7SJason Wang static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) 42228b39bc7SJason Wang { 42328b39bc7SJason Wang return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); 42428b39bc7SJason Wang } 42528b39bc7SJason Wang 4263464645aSMike Waychison /* Called from bottom half context */ 427946fa564SMichael S. Tsirkin static struct sk_buff *page_to_skb(struct virtnet_info *vi, 428946fa564SMichael S. Tsirkin struct receive_queue *rq, 4292613af0eSMichael Dalton struct page *page, unsigned int offset, 430436c9453SJason Wang unsigned int len, unsigned int truesize, 431fb32856bSXuan Zhuo bool hdr_valid, unsigned int metasize, 432c32325b8SJakub Kicinski unsigned int headroom) 4339ab86bbcSShirley Ma { 4349ab86bbcSShirley Ma struct sk_buff *skb; 435012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 4362613af0eSMichael Dalton unsigned int copy, hdr_len, hdr_padded_len; 437af39c8f7SEric Dumazet struct page *page_to_free = NULL; 438fb32856bSXuan Zhuo int tailroom, shinfo_size; 439f80bd740SXuan Zhuo char *p, *hdr_p, *buf; 4409ab86bbcSShirley Ma 4412613af0eSMichael Dalton p = page_address(page) + offset; 442fb32856bSXuan Zhuo hdr_p = p; 4439ab86bbcSShirley Ma 444012873d0SMichael S. Tsirkin hdr_len = vi->hdr_len; 445012873d0SMichael S. Tsirkin if (vi->mergeable_rx_bufs) 446c1ddc42dSAndrew Melnychenko hdr_padded_len = hdr_len; 447012873d0SMichael S. Tsirkin else 4482613af0eSMichael Dalton hdr_padded_len = sizeof(struct padded_vnet_hdr); 4493f2c31d9SMark McLoughlin 450c32325b8SJakub Kicinski /* If headroom is not 0, there is an offset between the beginning of the 451fb32856bSXuan Zhuo * data and the allocated space, otherwise the data and the allocated 452fb32856bSXuan Zhuo * space are aligned. 4538fb7da9eSXuan Zhuo * 4548fb7da9eSXuan Zhuo * Buffers with headroom use PAGE_SIZE as alloc size, see 4558fb7da9eSXuan Zhuo * add_recvbuf_mergeable() + get_mergeable_buf_len() 456fb32856bSXuan Zhuo */ 457c32325b8SJakub Kicinski truesize = headroom ? PAGE_SIZE : truesize; 458fc02e8cbSMichael S. Tsirkin tailroom = truesize - headroom; 459c32325b8SJakub Kicinski buf = p - headroom; 4603f2c31d9SMark McLoughlin 4619ab86bbcSShirley Ma len -= hdr_len; 4622613af0eSMichael Dalton offset += hdr_padded_len; 4632613af0eSMichael Dalton p += hdr_padded_len; 464fc02e8cbSMichael S. Tsirkin tailroom -= hdr_padded_len + len; 4653f2c31d9SMark McLoughlin 466fb32856bSXuan Zhuo shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 467fb32856bSXuan Zhuo 468f80bd740SXuan Zhuo /* copy small packet so we can reuse these pages */ 469f5d7872aSEric Dumazet if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { 470f80bd740SXuan Zhuo skb = build_skb(buf, truesize); 471fb32856bSXuan Zhuo if (unlikely(!skb)) 472fb32856bSXuan Zhuo return NULL; 473fb32856bSXuan Zhuo 474f80bd740SXuan Zhuo skb_reserve(skb, p - buf); 475fb32856bSXuan Zhuo skb_put(skb, len); 476afd92d82SJason Wang 477afd92d82SJason Wang page = (struct page *)page->private; 478afd92d82SJason Wang if (page) 479afd92d82SJason Wang give_pages(rq, page); 480fb32856bSXuan Zhuo goto ok; 481fb32856bSXuan Zhuo } 482fb32856bSXuan Zhuo 483fb32856bSXuan Zhuo /* copy small packet so we can reuse these pages for small data */ 484fb32856bSXuan Zhuo skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 485fb32856bSXuan Zhuo if (unlikely(!skb)) 486fb32856bSXuan Zhuo return NULL; 487fb32856bSXuan Zhuo 4880f6925b3SEric Dumazet /* Copy all frame if it fits skb->head, otherwise 4890f6925b3SEric Dumazet * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. 4900f6925b3SEric Dumazet */ 4910f6925b3SEric Dumazet if (len <= skb_tailroom(skb)) 4923f2c31d9SMark McLoughlin copy = len; 4930f6925b3SEric Dumazet else 4940f6925b3SEric Dumazet copy = ETH_HLEN + metasize; 49559ae1d12SJohannes Berg skb_put_data(skb, p, copy); 4963f2c31d9SMark McLoughlin 4973f2c31d9SMark McLoughlin len -= copy; 4989ab86bbcSShirley Ma offset += copy; 4993f2c31d9SMark McLoughlin 5002613af0eSMichael Dalton if (vi->mergeable_rx_bufs) { 5012613af0eSMichael Dalton if (len) 5022613af0eSMichael Dalton skb_add_rx_frag(skb, 0, page, offset, len, truesize); 5032613af0eSMichael Dalton else 504af39c8f7SEric Dumazet page_to_free = page; 505fb32856bSXuan Zhuo goto ok; 5062613af0eSMichael Dalton } 5072613af0eSMichael Dalton 508e878d78bSSasha Levin /* 509e878d78bSSasha Levin * Verify that we can indeed put this data into a skb. 510e878d78bSSasha Levin * This is here to handle cases when the device erroneously 511e878d78bSSasha Levin * tries to receive more than is possible. This is usually 512e878d78bSSasha Levin * the case of a broken device. 513e878d78bSSasha Levin */ 514e878d78bSSasha Levin if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 515be443899SAmerigo Wang net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 516e878d78bSSasha Levin dev_kfree_skb(skb); 517e878d78bSSasha Levin return NULL; 518e878d78bSSasha Levin } 5192613af0eSMichael Dalton BUG_ON(offset >= PAGE_SIZE); 5209ab86bbcSShirley Ma while (len) { 5212613af0eSMichael Dalton unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 5222613af0eSMichael Dalton skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 5232613af0eSMichael Dalton frag_size, truesize); 5242613af0eSMichael Dalton len -= frag_size; 5259ab86bbcSShirley Ma page = (struct page *)page->private; 5269ab86bbcSShirley Ma offset = 0; 5273f2c31d9SMark McLoughlin } 5283f2c31d9SMark McLoughlin 5299ab86bbcSShirley Ma if (page) 530e9d7417bSJason Wang give_pages(rq, page); 5313f2c31d9SMark McLoughlin 532fb32856bSXuan Zhuo ok: 533fb32856bSXuan Zhuo /* hdr_valid means no XDP, so we can copy the vnet header */ 534fb32856bSXuan Zhuo if (hdr_valid) { 535fb32856bSXuan Zhuo hdr = skb_vnet_hdr(skb); 536fb32856bSXuan Zhuo memcpy(hdr, hdr_p, hdr_len); 537fb32856bSXuan Zhuo } 538af39c8f7SEric Dumazet if (page_to_free) 539af39c8f7SEric Dumazet put_page(page_to_free); 540fb32856bSXuan Zhuo 541fb32856bSXuan Zhuo if (metasize) { 542fb32856bSXuan Zhuo __skb_pull(skb, metasize); 543fb32856bSXuan Zhuo skb_metadata_set(skb, metasize); 544fb32856bSXuan Zhuo } 545fb32856bSXuan Zhuo 5469ab86bbcSShirley Ma return skb; 5479ab86bbcSShirley Ma } 5489ab86bbcSShirley Ma 549735fc405SJesper Dangaard Brouer static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, 550735fc405SJesper Dangaard Brouer struct send_queue *sq, 55144fa2dbdSJesper Dangaard Brouer struct xdp_frame *xdpf) 55256434a01SJohn Fastabend { 55356434a01SJohn Fastabend struct virtio_net_hdr_mrg_rxbuf *hdr; 55456434a01SJohn Fastabend int err; 55556434a01SJohn Fastabend 556cac320c8SJesper Dangaard Brouer if (unlikely(xdpf->headroom < vi->hdr_len)) 557cac320c8SJesper Dangaard Brouer return -EOVERFLOW; 558cac320c8SJesper Dangaard Brouer 559cac320c8SJesper Dangaard Brouer /* Make room for virtqueue hdr (also change xdpf->headroom?) */ 560cac320c8SJesper Dangaard Brouer xdpf->data -= vi->hdr_len; 56156434a01SJohn Fastabend /* Zero header and leave csum up to XDP layers */ 562cac320c8SJesper Dangaard Brouer hdr = xdpf->data; 56356434a01SJohn Fastabend memset(hdr, 0, vi->hdr_len); 564cac320c8SJesper Dangaard Brouer xdpf->len += vi->hdr_len; 56556434a01SJohn Fastabend 566cac320c8SJesper Dangaard Brouer sg_init_one(sq->sg, xdpf->data, xdpf->len); 567bb91accfSJason Wang 5685050471dSToshiaki Makita err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), 5695050471dSToshiaki Makita GFP_ATOMIC); 57011b7d897SJesper Dangaard Brouer if (unlikely(err)) 571cac320c8SJesper Dangaard Brouer return -ENOSPC; /* Caller handle free/refcnt */ 57256434a01SJohn Fastabend 573cac320c8SJesper Dangaard Brouer return 0; 57456434a01SJohn Fastabend } 57556434a01SJohn Fastabend 57697c2c69eSXuan Zhuo /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on 57797c2c69eSXuan Zhuo * the current cpu, so it does not need to be locked. 57897c2c69eSXuan Zhuo * 57997c2c69eSXuan Zhuo * Here we use marco instead of inline functions because we have to deal with 58097c2c69eSXuan Zhuo * three issues at the same time: 1. the choice of sq. 2. judge and execute the 58197c2c69eSXuan Zhuo * lock/unlock of txq 3. make sparse happy. It is difficult for two inline 58297c2c69eSXuan Zhuo * functions to perfectly solve these three problems at the same time. 58397c2c69eSXuan Zhuo */ 58497c2c69eSXuan Zhuo #define virtnet_xdp_get_sq(vi) ({ \ 5853dcc1edcSLi RongQing int cpu = smp_processor_id(); \ 58697c2c69eSXuan Zhuo struct netdev_queue *txq; \ 58797c2c69eSXuan Zhuo typeof(vi) v = (vi); \ 58897c2c69eSXuan Zhuo unsigned int qp; \ 58997c2c69eSXuan Zhuo \ 59097c2c69eSXuan Zhuo if (v->curr_queue_pairs > nr_cpu_ids) { \ 59197c2c69eSXuan Zhuo qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ 5923dcc1edcSLi RongQing qp += cpu; \ 59397c2c69eSXuan Zhuo txq = netdev_get_tx_queue(v->dev, qp); \ 59497c2c69eSXuan Zhuo __netif_tx_acquire(txq); \ 59597c2c69eSXuan Zhuo } else { \ 5963dcc1edcSLi RongQing qp = cpu % v->curr_queue_pairs; \ 59797c2c69eSXuan Zhuo txq = netdev_get_tx_queue(v->dev, qp); \ 5983dcc1edcSLi RongQing __netif_tx_lock(txq, cpu); \ 59997c2c69eSXuan Zhuo } \ 60097c2c69eSXuan Zhuo v->sq + qp; \ 60197c2c69eSXuan Zhuo }) 6022a43565cSToshiaki Makita 60397c2c69eSXuan Zhuo #define virtnet_xdp_put_sq(vi, q) { \ 60497c2c69eSXuan Zhuo struct netdev_queue *txq; \ 60597c2c69eSXuan Zhuo typeof(vi) v = (vi); \ 60697c2c69eSXuan Zhuo \ 60797c2c69eSXuan Zhuo txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ 60897c2c69eSXuan Zhuo if (v->curr_queue_pairs > nr_cpu_ids) \ 60997c2c69eSXuan Zhuo __netif_tx_release(txq); \ 61097c2c69eSXuan Zhuo else \ 61197c2c69eSXuan Zhuo __netif_tx_unlock(txq); \ 6122a43565cSToshiaki Makita } 6132a43565cSToshiaki Makita 614735fc405SJesper Dangaard Brouer static int virtnet_xdp_xmit(struct net_device *dev, 61542b33468SJesper Dangaard Brouer int n, struct xdp_frame **frames, u32 flags) 616186b3c99SJason Wang { 617186b3c99SJason Wang struct virtnet_info *vi = netdev_priv(dev); 6188dcc5b0aSJesper Dangaard Brouer struct receive_queue *rq = vi->rq; 6198dcc5b0aSJesper Dangaard Brouer struct bpf_prog *xdp_prog; 620735fc405SJesper Dangaard Brouer struct send_queue *sq; 621735fc405SJesper Dangaard Brouer unsigned int len; 622546f2897SToshiaki Makita int packets = 0; 623546f2897SToshiaki Makita int bytes = 0; 624fdc13979SLorenzo Bianconi int nxmit = 0; 625461f03dcSToshiaki Makita int kicks = 0; 6265050471dSToshiaki Makita void *ptr; 627fdc13979SLorenzo Bianconi int ret; 628735fc405SJesper Dangaard Brouer int i; 629735fc405SJesper Dangaard Brouer 6308dcc5b0aSJesper Dangaard Brouer /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 6318dcc5b0aSJesper Dangaard Brouer * indicate XDP resources have been successfully allocated. 6328dcc5b0aSJesper Dangaard Brouer */ 6339719c6b9SJohn Fastabend xdp_prog = rcu_access_pointer(rq->xdp_prog); 6341667c08aSToshiaki Makita if (!xdp_prog) 6351667c08aSToshiaki Makita return -ENXIO; 6361667c08aSToshiaki Makita 63797c2c69eSXuan Zhuo sq = virtnet_xdp_get_sq(vi); 6389ab86bbcSShirley Ma 6399ab86bbcSShirley Ma if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { 640186b3c99SJason Wang ret = -EINVAL; 641186b3c99SJason Wang goto out; 642186b3c99SJason Wang } 643186b3c99SJason Wang 644735fc405SJesper Dangaard Brouer /* Free up any pending old buffers before queueing new ones. */ 6455050471dSToshiaki Makita while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 646546f2897SToshiaki Makita if (likely(is_xdp_frame(ptr))) { 647546f2897SToshiaki Makita struct xdp_frame *frame = ptr_to_xdp(ptr); 648546f2897SToshiaki Makita 649546f2897SToshiaki Makita bytes += frame->len; 650546f2897SToshiaki Makita xdp_return_frame(frame); 651546f2897SToshiaki Makita } else { 652546f2897SToshiaki Makita struct sk_buff *skb = ptr; 653546f2897SToshiaki Makita 654546f2897SToshiaki Makita bytes += skb->len; 655546f2897SToshiaki Makita napi_consume_skb(skb, false); 656546f2897SToshiaki Makita } 657546f2897SToshiaki Makita packets++; 6585050471dSToshiaki Makita } 659735fc405SJesper Dangaard Brouer 660735fc405SJesper Dangaard Brouer for (i = 0; i < n; i++) { 661735fc405SJesper Dangaard Brouer struct xdp_frame *xdpf = frames[i]; 662735fc405SJesper Dangaard Brouer 663fdc13979SLorenzo Bianconi if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) 664fdc13979SLorenzo Bianconi break; 665fdc13979SLorenzo Bianconi nxmit++; 666735fc405SJesper Dangaard Brouer } 667fdc13979SLorenzo Bianconi ret = nxmit; 6685d274cb4SJesper Dangaard Brouer 669461f03dcSToshiaki Makita if (flags & XDP_XMIT_FLUSH) { 670461f03dcSToshiaki Makita if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) 671461f03dcSToshiaki Makita kicks = 1; 672461f03dcSToshiaki Makita } 6735b8f3c8dSToshiaki Makita out: 6745b8f3c8dSToshiaki Makita u64_stats_update_begin(&sq->stats.syncp); 675546f2897SToshiaki Makita sq->stats.bytes += bytes; 676546f2897SToshiaki Makita sq->stats.packets += packets; 6775b8f3c8dSToshiaki Makita sq->stats.xdp_tx += n; 678fdc13979SLorenzo Bianconi sq->stats.xdp_tx_drops += n - nxmit; 679461f03dcSToshiaki Makita sq->stats.kicks += kicks; 6805b8f3c8dSToshiaki Makita u64_stats_update_end(&sq->stats.syncp); 6815d274cb4SJesper Dangaard Brouer 68297c2c69eSXuan Zhuo virtnet_xdp_put_sq(vi, sq); 6835b8f3c8dSToshiaki Makita return ret; 684186b3c99SJason Wang } 685186b3c99SJason Wang 686f6b10209SJason Wang static unsigned int virtnet_get_headroom(struct virtnet_info *vi) 687f6b10209SJason Wang { 68897c2c69eSXuan Zhuo return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; 689f6b10209SJason Wang } 690f6b10209SJason Wang 6914941d472SJason Wang /* We copy the packet for XDP in the following cases: 6924941d472SJason Wang * 6934941d472SJason Wang * 1) Packet is scattered across multiple rx buffers. 6944941d472SJason Wang * 2) Headroom space is insufficient. 6954941d472SJason Wang * 6964941d472SJason Wang * This is inefficient but it's a temporary condition that 6974941d472SJason Wang * we hit right after XDP is enabled and until queue is refilled 6984941d472SJason Wang * with large buffers with sufficient headroom - so it should affect 6994941d472SJason Wang * at most queue size packets. 7004941d472SJason Wang * Afterwards, the conditions to enable 7014941d472SJason Wang * XDP should preclude the underlying device from sending packets 7024941d472SJason Wang * across multiple buffers (num_buf > 1), and we make sure buffers 7034941d472SJason Wang * have enough headroom. 70472979a6cSJohn Fastabend */ 70572979a6cSJohn Fastabend static struct page *xdp_linearize_page(struct receive_queue *rq, 70656a86f84SJason Wang u16 *num_buf, 70772979a6cSJohn Fastabend struct page *p, 70872979a6cSJohn Fastabend int offset, 7094941d472SJason Wang int page_off, 71072979a6cSJohn Fastabend unsigned int *len) 71172979a6cSJohn Fastabend { 71272979a6cSJohn Fastabend struct page *page = alloc_page(GFP_ATOMIC); 71372979a6cSJohn Fastabend 71472979a6cSJohn Fastabend if (!page) 71572979a6cSJohn Fastabend return NULL; 71672979a6cSJohn Fastabend 71772979a6cSJohn Fastabend memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 71872979a6cSJohn Fastabend page_off += *len; 71972979a6cSJohn Fastabend 72056a86f84SJason Wang while (--*num_buf) { 7213cc81a9aSJason Wang int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 72272979a6cSJohn Fastabend unsigned int buflen; 72372979a6cSJohn Fastabend void *buf; 72472979a6cSJohn Fastabend int off; 72572979a6cSJohn Fastabend 726680557cfSMichael S. Tsirkin buf = virtqueue_get_buf(rq->vq, &buflen); 727680557cfSMichael S. Tsirkin if (unlikely(!buf)) 72872979a6cSJohn Fastabend goto err_buf; 72972979a6cSJohn Fastabend 73072979a6cSJohn Fastabend p = virt_to_head_page(buf); 73172979a6cSJohn Fastabend off = buf - page_address(p); 73272979a6cSJohn Fastabend 73356a86f84SJason Wang /* guard against a misconfigured or uncooperative backend that 73456a86f84SJason Wang * is sending packet larger than the MTU. 73556a86f84SJason Wang */ 7363cc81a9aSJason Wang if ((page_off + buflen + tailroom) > PAGE_SIZE) { 73756a86f84SJason Wang put_page(p); 73856a86f84SJason Wang goto err_buf; 73956a86f84SJason Wang } 74056a86f84SJason Wang 74172979a6cSJohn Fastabend memcpy(page_address(page) + page_off, 74272979a6cSJohn Fastabend page_address(p) + off, buflen); 74372979a6cSJohn Fastabend page_off += buflen; 74456a86f84SJason Wang put_page(p); 74572979a6cSJohn Fastabend } 74672979a6cSJohn Fastabend 7472de2f7f4SJohn Fastabend /* Headroom does not contribute to packet length */ 7482de2f7f4SJohn Fastabend *len = page_off - VIRTIO_XDP_HEADROOM; 74972979a6cSJohn Fastabend return page; 75072979a6cSJohn Fastabend err_buf: 75172979a6cSJohn Fastabend __free_pages(page, 0); 75272979a6cSJohn Fastabend return NULL; 75372979a6cSJohn Fastabend } 75472979a6cSJohn Fastabend 7554941d472SJason Wang static struct sk_buff *receive_small(struct net_device *dev, 7564941d472SJason Wang struct virtnet_info *vi, 7574941d472SJason Wang struct receive_queue *rq, 7584941d472SJason Wang void *buf, void *ctx, 759186b3c99SJason Wang unsigned int len, 7607d9d60fdSToshiaki Makita unsigned int *xdp_xmit, 761d46eeeafSJason Wang struct virtnet_rq_stats *stats) 7624941d472SJason Wang { 7634941d472SJason Wang struct sk_buff *skb; 7644941d472SJason Wang struct bpf_prog *xdp_prog; 7654941d472SJason Wang unsigned int xdp_headroom = (unsigned long)ctx; 7664941d472SJason Wang unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; 7674941d472SJason Wang unsigned int headroom = vi->hdr_len + header_offset; 7684941d472SJason Wang unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 7694941d472SJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 7704941d472SJason Wang struct page *page = virt_to_head_page(buf); 77111b7d897SJesper Dangaard Brouer unsigned int delta = 0; 7724941d472SJason Wang struct page *xdp_page; 77311b7d897SJesper Dangaard Brouer int err; 774503d539aSYuya Kusakabe unsigned int metasize = 0; 77511b7d897SJesper Dangaard Brouer 7764941d472SJason Wang len -= vi->hdr_len; 777d46eeeafSJason Wang stats->bytes += len; 7784941d472SJason Wang 779ad993a95SXie Yongji if (unlikely(len > GOOD_PACKET_LEN)) { 780ad993a95SXie Yongji pr_debug("%s: rx error: len %u exceeds max size %d\n", 781ad993a95SXie Yongji dev->name, len, GOOD_PACKET_LEN); 782ad993a95SXie Yongji dev->stats.rx_length_errors++; 783053c9e18SWenliang Wang goto err; 784ad993a95SXie Yongji } 7856213f07cSLi RongQing 7866213f07cSLi RongQing if (likely(!vi->xdp_enabled)) { 7876213f07cSLi RongQing xdp_prog = NULL; 7886213f07cSLi RongQing goto skip_xdp; 7896213f07cSLi RongQing } 7906213f07cSLi RongQing 7914941d472SJason Wang rcu_read_lock(); 7924941d472SJason Wang xdp_prog = rcu_dereference(rq->xdp_prog); 7934941d472SJason Wang if (xdp_prog) { 7944941d472SJason Wang struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; 79544fa2dbdSJesper Dangaard Brouer struct xdp_frame *xdpf; 7964941d472SJason Wang struct xdp_buff xdp; 7974941d472SJason Wang void *orig_data; 7984941d472SJason Wang u32 act; 7994941d472SJason Wang 80095dbe9e7SJesper Dangaard Brouer if (unlikely(hdr->hdr.gso_type)) 8014941d472SJason Wang goto err_xdp; 8024941d472SJason Wang 8034941d472SJason Wang if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { 8044941d472SJason Wang int offset = buf - page_address(page) + header_offset; 8054941d472SJason Wang unsigned int tlen = len + vi->hdr_len; 8064941d472SJason Wang u16 num_buf = 1; 8074941d472SJason Wang 8084941d472SJason Wang xdp_headroom = virtnet_get_headroom(vi); 8094941d472SJason Wang header_offset = VIRTNET_RX_PAD + xdp_headroom; 8104941d472SJason Wang headroom = vi->hdr_len + header_offset; 8114941d472SJason Wang buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 8124941d472SJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 8134941d472SJason Wang xdp_page = xdp_linearize_page(rq, &num_buf, page, 8144941d472SJason Wang offset, header_offset, 8154941d472SJason Wang &tlen); 8164941d472SJason Wang if (!xdp_page) 8174941d472SJason Wang goto err_xdp; 8184941d472SJason Wang 8194941d472SJason Wang buf = page_address(xdp_page); 8204941d472SJason Wang put_page(page); 8214941d472SJason Wang page = xdp_page; 8224941d472SJason Wang } 8234941d472SJason Wang 82443b5169dSLorenzo Bianconi xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); 825be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, 826be9df4afSLorenzo Bianconi xdp_headroom, len, true); 8274941d472SJason Wang orig_data = xdp.data; 8284941d472SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 829d46eeeafSJason Wang stats->xdp_packets++; 8304941d472SJason Wang 8314941d472SJason Wang switch (act) { 8324941d472SJason Wang case XDP_PASS: 8334941d472SJason Wang /* Recalculate length in case bpf program changed it */ 8344941d472SJason Wang delta = orig_data - xdp.data; 8356870de43SNikita V. Shirokov len = xdp.data_end - xdp.data; 836503d539aSYuya Kusakabe metasize = xdp.data - xdp.data_meta; 8374941d472SJason Wang break; 8384941d472SJason Wang case XDP_TX: 839d46eeeafSJason Wang stats->xdp_tx++; 8401b698fa5SLorenzo Bianconi xdpf = xdp_convert_buff_to_frame(&xdp); 84144fa2dbdSJesper Dangaard Brouer if (unlikely(!xdpf)) 84244fa2dbdSJesper Dangaard Brouer goto err_xdp; 843ca9e83b4SJason Wang err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); 844fdc13979SLorenzo Bianconi if (unlikely(!err)) { 845fdc13979SLorenzo Bianconi xdp_return_frame_rx_napi(xdpf); 846fdc13979SLorenzo Bianconi } else if (unlikely(err < 0)) { 8474941d472SJason Wang trace_xdp_exception(vi->dev, xdp_prog, act); 84811b7d897SJesper Dangaard Brouer goto err_xdp; 84911b7d897SJesper Dangaard Brouer } 8502471c75eSJesper Dangaard Brouer *xdp_xmit |= VIRTIO_XDP_TX; 851186b3c99SJason Wang rcu_read_unlock(); 852186b3c99SJason Wang goto xdp_xmit; 853186b3c99SJason Wang case XDP_REDIRECT: 854d46eeeafSJason Wang stats->xdp_redirects++; 855186b3c99SJason Wang err = xdp_do_redirect(dev, &xdp, xdp_prog); 85611b7d897SJesper Dangaard Brouer if (err) 85711b7d897SJesper Dangaard Brouer goto err_xdp; 8582471c75eSJesper Dangaard Brouer *xdp_xmit |= VIRTIO_XDP_REDIR; 8594941d472SJason Wang rcu_read_unlock(); 8604941d472SJason Wang goto xdp_xmit; 8614941d472SJason Wang default: 862c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); 863df561f66SGustavo A. R. Silva fallthrough; 8644941d472SJason Wang case XDP_ABORTED: 8654941d472SJason Wang trace_xdp_exception(vi->dev, xdp_prog, act); 86695efabf0SGustavo A. R. Silva goto err_xdp; 8674941d472SJason Wang case XDP_DROP: 8684941d472SJason Wang goto err_xdp; 8694941d472SJason Wang } 8704941d472SJason Wang } 8714941d472SJason Wang rcu_read_unlock(); 8724941d472SJason Wang 8736213f07cSLi RongQing skip_xdp: 8744941d472SJason Wang skb = build_skb(buf, buflen); 875053c9e18SWenliang Wang if (!skb) 8764941d472SJason Wang goto err; 8774941d472SJason Wang skb_reserve(skb, headroom - delta); 8786870de43SNikita V. Shirokov skb_put(skb, len); 879f1d4884dSYuya Kusakabe if (!xdp_prog) { 8804941d472SJason Wang buf += header_offset; 8814941d472SJason Wang memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); 882f1d4884dSYuya Kusakabe } /* keep zeroed vnet hdr since XDP is loaded */ 8834941d472SJason Wang 884503d539aSYuya Kusakabe if (metasize) 885503d539aSYuya Kusakabe skb_metadata_set(skb, metasize); 886503d539aSYuya Kusakabe 8874941d472SJason Wang return skb; 8884941d472SJason Wang 8894941d472SJason Wang err_xdp: 8904941d472SJason Wang rcu_read_unlock(); 891d46eeeafSJason Wang stats->xdp_drops++; 892053c9e18SWenliang Wang err: 893d46eeeafSJason Wang stats->drops++; 8944941d472SJason Wang put_page(page); 8954941d472SJason Wang xdp_xmit: 8964941d472SJason Wang return NULL; 8974941d472SJason Wang } 8984941d472SJason Wang 8994941d472SJason Wang static struct sk_buff *receive_big(struct net_device *dev, 9004941d472SJason Wang struct virtnet_info *vi, 9014941d472SJason Wang struct receive_queue *rq, 9024941d472SJason Wang void *buf, 9037d9d60fdSToshiaki Makita unsigned int len, 904d46eeeafSJason Wang struct virtnet_rq_stats *stats) 9054941d472SJason Wang { 9064941d472SJason Wang struct page *page = buf; 907503d539aSYuya Kusakabe struct sk_buff *skb = 908fb32856bSXuan Zhuo page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0); 9094941d472SJason Wang 910d46eeeafSJason Wang stats->bytes += len - vi->hdr_len; 9114941d472SJason Wang if (unlikely(!skb)) 9124941d472SJason Wang goto err; 9134941d472SJason Wang 9144941d472SJason Wang return skb; 9154941d472SJason Wang 9164941d472SJason Wang err: 917d46eeeafSJason Wang stats->drops++; 9184941d472SJason Wang give_pages(rq, page); 9194941d472SJason Wang return NULL; 9204941d472SJason Wang } 9214941d472SJason Wang 9228fc3b9e9SMichael S. Tsirkin static struct sk_buff *receive_mergeable(struct net_device *dev, 923fdd819b2SMichael S. Tsirkin struct virtnet_info *vi, 9248fc3b9e9SMichael S. Tsirkin struct receive_queue *rq, 925680557cfSMichael S. Tsirkin void *buf, 926680557cfSMichael S. Tsirkin void *ctx, 927186b3c99SJason Wang unsigned int len, 9287d9d60fdSToshiaki Makita unsigned int *xdp_xmit, 929d46eeeafSJason Wang struct virtnet_rq_stats *stats) 9309ab86bbcSShirley Ma { 931012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 932012873d0SMichael S. Tsirkin u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 9338fc3b9e9SMichael S. Tsirkin struct page *page = virt_to_head_page(buf); 9348fc3b9e9SMichael S. Tsirkin int offset = buf - page_address(page); 935f600b690SJohn Fastabend struct sk_buff *head_skb, *curr_skb; 936f600b690SJohn Fastabend struct bpf_prog *xdp_prog; 9379ce6146eSJesper Dangaard Brouer unsigned int truesize = mergeable_ctx_to_truesize(ctx); 9384941d472SJason Wang unsigned int headroom = mergeable_ctx_to_headroom(ctx); 939503d539aSYuya Kusakabe unsigned int metasize = 0; 9409ce6146eSJesper Dangaard Brouer unsigned int frame_sz; 9419ce6146eSJesper Dangaard Brouer int err; 942ab7db917SMichael Dalton 94356434a01SJohn Fastabend head_skb = NULL; 944d46eeeafSJason Wang stats->bytes += len - vi->hdr_len; 94556434a01SJohn Fastabend 946ad993a95SXie Yongji if (unlikely(len > truesize)) { 947ad993a95SXie Yongji pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 948ad993a95SXie Yongji dev->name, len, (unsigned long)ctx); 949ad993a95SXie Yongji dev->stats.rx_length_errors++; 950ad993a95SXie Yongji goto err_skb; 951ad993a95SXie Yongji } 9526213f07cSLi RongQing 9536213f07cSLi RongQing if (likely(!vi->xdp_enabled)) { 9546213f07cSLi RongQing xdp_prog = NULL; 9556213f07cSLi RongQing goto skip_xdp; 9566213f07cSLi RongQing } 9576213f07cSLi RongQing 958f600b690SJohn Fastabend rcu_read_lock(); 959f600b690SJohn Fastabend xdp_prog = rcu_dereference(rq->xdp_prog); 960f600b690SJohn Fastabend if (xdp_prog) { 96144fa2dbdSJesper Dangaard Brouer struct xdp_frame *xdpf; 96272979a6cSJohn Fastabend struct page *xdp_page; 9630354e4d1SJohn Fastabend struct xdp_buff xdp; 9640354e4d1SJohn Fastabend void *data; 965f600b690SJohn Fastabend u32 act; 966f600b690SJohn Fastabend 9673d62b2a0SJason Wang /* Transient failure which in theory could occur if 9683d62b2a0SJason Wang * in-flight packets from before XDP was enabled reach 9693d62b2a0SJason Wang * the receive path after XDP is loaded. 9703d62b2a0SJason Wang */ 9713d62b2a0SJason Wang if (unlikely(hdr->hdr.gso_type)) 9723d62b2a0SJason Wang goto err_xdp; 9733d62b2a0SJason Wang 9749ce6146eSJesper Dangaard Brouer /* Buffers with headroom use PAGE_SIZE as alloc size, 9759ce6146eSJesper Dangaard Brouer * see add_recvbuf_mergeable() + get_mergeable_buf_len() 9769ce6146eSJesper Dangaard Brouer */ 9779ce6146eSJesper Dangaard Brouer frame_sz = headroom ? PAGE_SIZE : truesize; 9789ce6146eSJesper Dangaard Brouer 9793cc81a9aSJason Wang /* This happens when rx buffer size is underestimated 9803cc81a9aSJason Wang * or headroom is not enough because of the buffer 9813cc81a9aSJason Wang * was refilled before XDP is set. This should only 9823cc81a9aSJason Wang * happen for the first several packets, so we don't 9833cc81a9aSJason Wang * care much about its performance. 9843cc81a9aSJason Wang */ 9854941d472SJason Wang if (unlikely(num_buf > 1 || 9864941d472SJason Wang headroom < virtnet_get_headroom(vi))) { 98772979a6cSJohn Fastabend /* linearize data for XDP */ 98856a86f84SJason Wang xdp_page = xdp_linearize_page(rq, &num_buf, 9894941d472SJason Wang page, offset, 9904941d472SJason Wang VIRTIO_XDP_HEADROOM, 9914941d472SJason Wang &len); 9929ce6146eSJesper Dangaard Brouer frame_sz = PAGE_SIZE; 9939ce6146eSJesper Dangaard Brouer 99472979a6cSJohn Fastabend if (!xdp_page) 995f600b690SJohn Fastabend goto err_xdp; 9962de2f7f4SJohn Fastabend offset = VIRTIO_XDP_HEADROOM; 99772979a6cSJohn Fastabend } else { 99872979a6cSJohn Fastabend xdp_page = page; 999f600b690SJohn Fastabend } 1000f600b690SJohn Fastabend 10012de2f7f4SJohn Fastabend /* Allow consuming headroom but reserve enough space to push 10022de2f7f4SJohn Fastabend * the descriptor on if we get an XDP_TX return code. 10032de2f7f4SJohn Fastabend */ 10040354e4d1SJohn Fastabend data = page_address(xdp_page) + offset; 100543b5169dSLorenzo Bianconi xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq); 1006be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len, 1007be9df4afSLorenzo Bianconi VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true); 1008754b8a21SJesper Dangaard Brouer 10090354e4d1SJohn Fastabend act = bpf_prog_run_xdp(xdp_prog, &xdp); 1010d46eeeafSJason Wang stats->xdp_packets++; 10110354e4d1SJohn Fastabend 101256434a01SJohn Fastabend switch (act) { 101356434a01SJohn Fastabend case XDP_PASS: 1014503d539aSYuya Kusakabe metasize = xdp.data - xdp.data_meta; 10152de2f7f4SJohn Fastabend 1016503d539aSYuya Kusakabe /* recalculate offset to account for any header 1017503d539aSYuya Kusakabe * adjustments and minus the metasize to copy the 1018503d539aSYuya Kusakabe * metadata in page_to_skb(). Note other cases do not 1019503d539aSYuya Kusakabe * build an skb and avoid using offset 10206870de43SNikita V. Shirokov */ 1021503d539aSYuya Kusakabe offset = xdp.data - page_address(xdp_page) - 1022503d539aSYuya Kusakabe vi->hdr_len - metasize; 1023503d539aSYuya Kusakabe 1024503d539aSYuya Kusakabe /* recalculate len if xdp.data, xdp.data_end or 1025503d539aSYuya Kusakabe * xdp.data_meta were adjusted 1026503d539aSYuya Kusakabe */ 1027503d539aSYuya Kusakabe len = xdp.data_end - xdp.data + vi->hdr_len + metasize; 1028acb16b39SNikolay Aleksandrov 1029acb16b39SNikolay Aleksandrov /* recalculate headroom if xdp.data or xdp_data_meta 1030acb16b39SNikolay Aleksandrov * were adjusted, note that offset should always point 1031acb16b39SNikolay Aleksandrov * to the start of the reserved bytes for virtio_net 1032acb16b39SNikolay Aleksandrov * header which are followed by xdp.data, that means 1033acb16b39SNikolay Aleksandrov * that offset is equal to the headroom (when buf is 1034acb16b39SNikolay Aleksandrov * starting at the beginning of the page, otherwise 1035acb16b39SNikolay Aleksandrov * there is a base offset inside the page) but it's used 1036acb16b39SNikolay Aleksandrov * with a different starting point (buf start) than 1037acb16b39SNikolay Aleksandrov * xdp.data (buf start + vnet hdr size). If xdp.data or 1038acb16b39SNikolay Aleksandrov * data_meta were adjusted by the xdp prog then the 1039acb16b39SNikolay Aleksandrov * headroom size has changed and so has the offset, we 1040acb16b39SNikolay Aleksandrov * can use data_hard_start, which points at buf start + 1041acb16b39SNikolay Aleksandrov * vnet hdr size, to calculate the new headroom and use 1042acb16b39SNikolay Aleksandrov * it later to compute buf start in page_to_skb() 1043acb16b39SNikolay Aleksandrov */ 1044acb16b39SNikolay Aleksandrov headroom = xdp.data - xdp.data_hard_start - metasize; 1045acb16b39SNikolay Aleksandrov 10461830f893SJason Wang /* We can only create skb based on xdp_page. */ 10471830f893SJason Wang if (unlikely(xdp_page != page)) { 10481830f893SJason Wang rcu_read_unlock(); 10491830f893SJason Wang put_page(page); 1050503d539aSYuya Kusakabe head_skb = page_to_skb(vi, rq, xdp_page, offset, 1051503d539aSYuya Kusakabe len, PAGE_SIZE, false, 1052c32325b8SJakub Kicinski metasize, 1053acb16b39SNikolay Aleksandrov headroom); 10541830f893SJason Wang return head_skb; 10551830f893SJason Wang } 105656434a01SJohn Fastabend break; 105756434a01SJohn Fastabend case XDP_TX: 1058d46eeeafSJason Wang stats->xdp_tx++; 10591b698fa5SLorenzo Bianconi xdpf = xdp_convert_buff_to_frame(&xdp); 1060*7a542beeSXuan Zhuo if (unlikely(!xdpf)) { 1061*7a542beeSXuan Zhuo if (unlikely(xdp_page != page)) 1062*7a542beeSXuan Zhuo put_page(xdp_page); 106344fa2dbdSJesper Dangaard Brouer goto err_xdp; 1064*7a542beeSXuan Zhuo } 1065ca9e83b4SJason Wang err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); 1066fdc13979SLorenzo Bianconi if (unlikely(!err)) { 1067fdc13979SLorenzo Bianconi xdp_return_frame_rx_napi(xdpf); 1068fdc13979SLorenzo Bianconi } else if (unlikely(err < 0)) { 10690354e4d1SJohn Fastabend trace_xdp_exception(vi->dev, xdp_prog, act); 107011b7d897SJesper Dangaard Brouer if (unlikely(xdp_page != page)) 107111b7d897SJesper Dangaard Brouer put_page(xdp_page); 107211b7d897SJesper Dangaard Brouer goto err_xdp; 107311b7d897SJesper Dangaard Brouer } 10742471c75eSJesper Dangaard Brouer *xdp_xmit |= VIRTIO_XDP_TX; 107572979a6cSJohn Fastabend if (unlikely(xdp_page != page)) 10765d458a13SJason Wang put_page(page); 107756434a01SJohn Fastabend rcu_read_unlock(); 107856434a01SJohn Fastabend goto xdp_xmit; 10793cc81a9aSJason Wang case XDP_REDIRECT: 1080d46eeeafSJason Wang stats->xdp_redirects++; 10813cc81a9aSJason Wang err = xdp_do_redirect(dev, &xdp, xdp_prog); 10823cc81a9aSJason Wang if (err) { 10833cc81a9aSJason Wang if (unlikely(xdp_page != page)) 10843cc81a9aSJason Wang put_page(xdp_page); 10853cc81a9aSJason Wang goto err_xdp; 10863cc81a9aSJason Wang } 10872471c75eSJesper Dangaard Brouer *xdp_xmit |= VIRTIO_XDP_REDIR; 10883cc81a9aSJason Wang if (unlikely(xdp_page != page)) 10896890418bSJason Wang put_page(page); 10903cc81a9aSJason Wang rcu_read_unlock(); 10913cc81a9aSJason Wang goto xdp_xmit; 109256434a01SJohn Fastabend default: 1093c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); 1094df561f66SGustavo A. R. Silva fallthrough; 10950354e4d1SJohn Fastabend case XDP_ABORTED: 10960354e4d1SJohn Fastabend trace_xdp_exception(vi->dev, xdp_prog, act); 1097df561f66SGustavo A. R. Silva fallthrough; 10980354e4d1SJohn Fastabend case XDP_DROP: 109972979a6cSJohn Fastabend if (unlikely(xdp_page != page)) 110072979a6cSJohn Fastabend __free_pages(xdp_page, 0); 1101f600b690SJohn Fastabend goto err_xdp; 1102f600b690SJohn Fastabend } 110356434a01SJohn Fastabend } 1104f600b690SJohn Fastabend rcu_read_unlock(); 1105f600b690SJohn Fastabend 11066213f07cSLi RongQing skip_xdp: 1107503d539aSYuya Kusakabe head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, 1108c32325b8SJakub Kicinski metasize, headroom); 1109f600b690SJohn Fastabend curr_skb = head_skb; 11109ab86bbcSShirley Ma 11118fc3b9e9SMichael S. Tsirkin if (unlikely(!curr_skb)) 11128fc3b9e9SMichael S. Tsirkin goto err_skb; 11139ab86bbcSShirley Ma while (--num_buf) { 11148fc3b9e9SMichael S. Tsirkin int num_skb_frags; 11158fc3b9e9SMichael S. Tsirkin 1116680557cfSMichael S. Tsirkin buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); 111703e9f8a0SYunjian Wang if (unlikely(!buf)) { 11188fc3b9e9SMichael S. Tsirkin pr_debug("%s: rx error: %d buffers out of %d missing\n", 1119fdd819b2SMichael S. Tsirkin dev->name, num_buf, 1120012873d0SMichael S. Tsirkin virtio16_to_cpu(vi->vdev, 1121012873d0SMichael S. Tsirkin hdr->num_buffers)); 11228fc3b9e9SMichael S. Tsirkin dev->stats.rx_length_errors++; 11238fc3b9e9SMichael S. Tsirkin goto err_buf; 11243f2c31d9SMark McLoughlin } 11258fc3b9e9SMichael S. Tsirkin 1126d46eeeafSJason Wang stats->bytes += len; 11278fc3b9e9SMichael S. Tsirkin page = virt_to_head_page(buf); 112828b39bc7SJason Wang 112928b39bc7SJason Wang truesize = mergeable_ctx_to_truesize(ctx); 113028b39bc7SJason Wang if (unlikely(len > truesize)) { 113156da5fd0SDan Carpenter pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1132680557cfSMichael S. Tsirkin dev->name, len, (unsigned long)ctx); 1133680557cfSMichael S. Tsirkin dev->stats.rx_length_errors++; 1134680557cfSMichael S. Tsirkin goto err_skb; 1135680557cfSMichael S. Tsirkin } 11368fc3b9e9SMichael S. Tsirkin 11378fc3b9e9SMichael S. Tsirkin num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 11382613af0eSMichael Dalton if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 11392613af0eSMichael Dalton struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 11408fc3b9e9SMichael S. Tsirkin 11418fc3b9e9SMichael S. Tsirkin if (unlikely(!nskb)) 11428fc3b9e9SMichael S. Tsirkin goto err_skb; 11432613af0eSMichael Dalton if (curr_skb == head_skb) 11442613af0eSMichael Dalton skb_shinfo(curr_skb)->frag_list = nskb; 11452613af0eSMichael Dalton else 11462613af0eSMichael Dalton curr_skb->next = nskb; 11472613af0eSMichael Dalton curr_skb = nskb; 11482613af0eSMichael Dalton head_skb->truesize += nskb->truesize; 11492613af0eSMichael Dalton num_skb_frags = 0; 11502613af0eSMichael Dalton } 11512613af0eSMichael Dalton if (curr_skb != head_skb) { 11522613af0eSMichael Dalton head_skb->data_len += len; 11532613af0eSMichael Dalton head_skb->len += len; 1154fb51879dSMichael Dalton head_skb->truesize += truesize; 11552613af0eSMichael Dalton } 11568fc3b9e9SMichael S. Tsirkin offset = buf - page_address(page); 1157ba275241SJason Wang if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 1158ba275241SJason Wang put_page(page); 1159ba275241SJason Wang skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 1160fb51879dSMichael Dalton len, truesize); 1161ba275241SJason Wang } else { 11622613af0eSMichael Dalton skb_add_rx_frag(curr_skb, num_skb_frags, page, 1163fb51879dSMichael Dalton offset, len, truesize); 1164ba275241SJason Wang } 11658fc3b9e9SMichael S. Tsirkin } 11668fc3b9e9SMichael S. Tsirkin 11675377d758SJohannes Berg ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 11688fc3b9e9SMichael S. Tsirkin return head_skb; 11698fc3b9e9SMichael S. Tsirkin 1170f600b690SJohn Fastabend err_xdp: 1171f600b690SJohn Fastabend rcu_read_unlock(); 1172d46eeeafSJason Wang stats->xdp_drops++; 11738fc3b9e9SMichael S. Tsirkin err_skb: 11748fc3b9e9SMichael S. Tsirkin put_page(page); 1175850e088dSJason Wang while (num_buf-- > 1) { 1176680557cfSMichael S. Tsirkin buf = virtqueue_get_buf(rq->vq, &len); 1177680557cfSMichael S. Tsirkin if (unlikely(!buf)) { 11788fc3b9e9SMichael S. Tsirkin pr_debug("%s: rx error: %d buffers missing\n", 11798fc3b9e9SMichael S. Tsirkin dev->name, num_buf); 11808fc3b9e9SMichael S. Tsirkin dev->stats.rx_length_errors++; 11818fc3b9e9SMichael S. Tsirkin break; 11828fc3b9e9SMichael S. Tsirkin } 1183d46eeeafSJason Wang stats->bytes += len; 1184680557cfSMichael S. Tsirkin page = virt_to_head_page(buf); 11858fc3b9e9SMichael S. Tsirkin put_page(page); 11863f2c31d9SMark McLoughlin } 11878fc3b9e9SMichael S. Tsirkin err_buf: 1188d46eeeafSJason Wang stats->drops++; 11898fc3b9e9SMichael S. Tsirkin dev_kfree_skb(head_skb); 119056434a01SJohn Fastabend xdp_xmit: 11918fc3b9e9SMichael S. Tsirkin return NULL; 11929ab86bbcSShirley Ma } 11939ab86bbcSShirley Ma 119491f41f01SAndrew Melnychenko static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, 119591f41f01SAndrew Melnychenko struct sk_buff *skb) 119691f41f01SAndrew Melnychenko { 119791f41f01SAndrew Melnychenko enum pkt_hash_types rss_hash_type; 119891f41f01SAndrew Melnychenko 119991f41f01SAndrew Melnychenko if (!hdr_hash || !skb) 120091f41f01SAndrew Melnychenko return; 120191f41f01SAndrew Melnychenko 120291f41f01SAndrew Melnychenko switch ((int)hdr_hash->hash_report) { 120391f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_TCPv4: 120491f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_UDPv4: 120591f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_TCPv6: 120691f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_UDPv6: 120791f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_TCPv6_EX: 120891f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_UDPv6_EX: 120991f41f01SAndrew Melnychenko rss_hash_type = PKT_HASH_TYPE_L4; 121091f41f01SAndrew Melnychenko break; 121191f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_IPv4: 121291f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_IPv6: 121391f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_IPv6_EX: 121491f41f01SAndrew Melnychenko rss_hash_type = PKT_HASH_TYPE_L3; 121591f41f01SAndrew Melnychenko break; 121691f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_NONE: 121791f41f01SAndrew Melnychenko default: 121891f41f01SAndrew Melnychenko rss_hash_type = PKT_HASH_TYPE_NONE; 121991f41f01SAndrew Melnychenko } 122091f41f01SAndrew Melnychenko skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type); 122191f41f01SAndrew Melnychenko } 122291f41f01SAndrew Melnychenko 12237d9d60fdSToshiaki Makita static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 12242471c75eSJesper Dangaard Brouer void *buf, unsigned int len, void **ctx, 1225a0929a44SToshiaki Makita unsigned int *xdp_xmit, 1226d46eeeafSJason Wang struct virtnet_rq_stats *stats) 12279ab86bbcSShirley Ma { 1228e9d7417bSJason Wang struct net_device *dev = vi->dev; 12299ab86bbcSShirley Ma struct sk_buff *skb; 1230012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 12319ab86bbcSShirley Ma 1232bcff3162SMichael S. Tsirkin if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 12339ab86bbcSShirley Ma pr_debug("%s: short packet %i\n", dev->name, len); 12349ab86bbcSShirley Ma dev->stats.rx_length_errors++; 1235ab7db917SMichael Dalton if (vi->mergeable_rx_bufs) { 1236680557cfSMichael S. Tsirkin put_page(virt_to_head_page(buf)); 1237ab7db917SMichael Dalton } else if (vi->big_packets) { 123898bfd23cSMichael Dalton give_pages(rq, buf); 1239ab7db917SMichael Dalton } else { 1240f6b10209SJason Wang put_page(virt_to_head_page(buf)); 1241ab7db917SMichael Dalton } 12427d9d60fdSToshiaki Makita return; 12439ab86bbcSShirley Ma } 12449ab86bbcSShirley Ma 1245f121159dSMichael S. Tsirkin if (vi->mergeable_rx_bufs) 12467d9d60fdSToshiaki Makita skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, 1247a0929a44SToshiaki Makita stats); 1248f121159dSMichael S. Tsirkin else if (vi->big_packets) 1249a0929a44SToshiaki Makita skb = receive_big(dev, vi, rq, buf, len, stats); 1250f121159dSMichael S. Tsirkin else 1251a0929a44SToshiaki Makita skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); 1252f121159dSMichael S. Tsirkin 12538fc3b9e9SMichael S. Tsirkin if (unlikely(!skb)) 12547d9d60fdSToshiaki Makita return; 12553f2c31d9SMark McLoughlin 12569ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 125791f41f01SAndrew Melnychenko if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) 125891f41f01SAndrew Melnychenko virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb); 12593fa2a1dfSstephen hemminger 1260e858fae2SMike Rapoport if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 126110a8d94aSJason Wang skb->ip_summed = CHECKSUM_UNNECESSARY; 1262296f96fcSRusty Russell 1263e858fae2SMike Rapoport if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 1264e858fae2SMike Rapoport virtio_is_little_endian(vi->vdev))) { 1265e858fae2SMike Rapoport net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 1266e858fae2SMike Rapoport dev->name, hdr->hdr.gso_type, 1267fdd819b2SMichael S. Tsirkin hdr->hdr.gso_size); 1268296f96fcSRusty Russell goto frame_err; 1269296f96fcSRusty Russell } 1270296f96fcSRusty Russell 1271133bbb18SWillem de Bruijn skb_record_rx_queue(skb, vq2rxq(rq->vq)); 1272d1dc06dcSMike Rapoport skb->protocol = eth_type_trans(skb, dev); 1273d1dc06dcSMike Rapoport pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 1274d1dc06dcSMike Rapoport ntohs(skb->protocol), skb->len, skb->pkt_type); 1275d1dc06dcSMike Rapoport 12760fbd050aSEric Dumazet napi_gro_receive(&rq->napi, skb); 12777d9d60fdSToshiaki Makita return; 1278296f96fcSRusty Russell 1279296f96fcSRusty Russell frame_err: 1280296f96fcSRusty Russell dev->stats.rx_frame_errors++; 1281296f96fcSRusty Russell dev_kfree_skb(skb); 1282296f96fcSRusty Russell } 1283296f96fcSRusty Russell 1284192f68cfSJason Wang /* Unlike mergeable buffers, all buffers are allocated to the 1285192f68cfSJason Wang * same size, except for the headroom. For this reason we do 1286192f68cfSJason Wang * not need to use mergeable_len_to_ctx here - it is enough 1287192f68cfSJason Wang * to store the headroom as the context ignoring the truesize. 1288192f68cfSJason Wang */ 1289946fa564SMichael S. Tsirkin static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 1290946fa564SMichael S. Tsirkin gfp_t gfp) 1291296f96fcSRusty Russell { 1292f6b10209SJason Wang struct page_frag *alloc_frag = &rq->alloc_frag; 1293f6b10209SJason Wang char *buf; 12942de2f7f4SJohn Fastabend unsigned int xdp_headroom = virtnet_get_headroom(vi); 1295192f68cfSJason Wang void *ctx = (void *)(unsigned long)xdp_headroom; 1296f6b10209SJason Wang int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; 12979ab86bbcSShirley Ma int err; 12983f2c31d9SMark McLoughlin 1299f6b10209SJason Wang len = SKB_DATA_ALIGN(len) + 1300f6b10209SJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1301f6b10209SJason Wang if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) 13029ab86bbcSShirley Ma return -ENOMEM; 1303296f96fcSRusty Russell 1304f6b10209SJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1305f6b10209SJason Wang get_page(alloc_frag->page); 1306f6b10209SJason Wang alloc_frag->offset += len; 1307f6b10209SJason Wang sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, 1308f6b10209SJason Wang vi->hdr_len + GOOD_PACKET_LEN); 1309192f68cfSJason Wang err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 13109ab86bbcSShirley Ma if (err < 0) 1311f6b10209SJason Wang put_page(virt_to_head_page(buf)); 13129ab86bbcSShirley Ma return err; 131397402b96SHerbert Xu } 131497402b96SHerbert Xu 1315012873d0SMichael S. Tsirkin static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 1316012873d0SMichael S. Tsirkin gfp_t gfp) 13179ab86bbcSShirley Ma { 13189ab86bbcSShirley Ma struct page *first, *list = NULL; 13199ab86bbcSShirley Ma char *p; 13209ab86bbcSShirley Ma int i, err, offset; 1321296f96fcSRusty Russell 1322a5835440SRusty Russell sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); 1323a5835440SRusty Russell 1324e9d7417bSJason Wang /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ 13259ab86bbcSShirley Ma for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 1326e9d7417bSJason Wang first = get_a_page(rq, gfp); 13279ab86bbcSShirley Ma if (!first) { 13289ab86bbcSShirley Ma if (list) 1329e9d7417bSJason Wang give_pages(rq, list); 13309ab86bbcSShirley Ma return -ENOMEM; 1331296f96fcSRusty Russell } 1332e9d7417bSJason Wang sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 13339ab86bbcSShirley Ma 13349ab86bbcSShirley Ma /* chain new page in list head to match sg */ 13359ab86bbcSShirley Ma first->private = (unsigned long)list; 13369ab86bbcSShirley Ma list = first; 13379ab86bbcSShirley Ma } 13389ab86bbcSShirley Ma 1339e9d7417bSJason Wang first = get_a_page(rq, gfp); 13409ab86bbcSShirley Ma if (!first) { 1341e9d7417bSJason Wang give_pages(rq, list); 13429ab86bbcSShirley Ma return -ENOMEM; 13439ab86bbcSShirley Ma } 13449ab86bbcSShirley Ma p = page_address(first); 13459ab86bbcSShirley Ma 1346e9d7417bSJason Wang /* rq->sg[0], rq->sg[1] share the same page */ 1347012873d0SMichael S. Tsirkin /* a separated rq->sg[0] for header - required in case !any_header_sg */ 1348012873d0SMichael S. Tsirkin sg_set_buf(&rq->sg[0], p, vi->hdr_len); 13499ab86bbcSShirley Ma 1350e9d7417bSJason Wang /* rq->sg[1] for data packet, from offset */ 13519ab86bbcSShirley Ma offset = sizeof(struct padded_vnet_hdr); 1352e9d7417bSJason Wang sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 13539ab86bbcSShirley Ma 13549ab86bbcSShirley Ma /* chain first in list head */ 13559ab86bbcSShirley Ma first->private = (unsigned long)list; 13569dc7b9e4SRusty Russell err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, 1357aa989f5eSMichael S. Tsirkin first, gfp); 13589ab86bbcSShirley Ma if (err < 0) 1359e9d7417bSJason Wang give_pages(rq, first); 13609ab86bbcSShirley Ma 13619ab86bbcSShirley Ma return err; 13629ab86bbcSShirley Ma } 13639ab86bbcSShirley Ma 1364d85b758fSMichael S. Tsirkin static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 13653cc81a9aSJason Wang struct ewma_pkt_len *avg_pkt_len, 13663cc81a9aSJason Wang unsigned int room) 13679ab86bbcSShirley Ma { 1368c1ddc42dSAndrew Melnychenko struct virtnet_info *vi = rq->vq->vdev->priv; 1369c1ddc42dSAndrew Melnychenko const size_t hdr_len = vi->hdr_len; 1370fbf28d78SMichael Dalton unsigned int len; 1371fbf28d78SMichael Dalton 13723cc81a9aSJason Wang if (room) 13733cc81a9aSJason Wang return PAGE_SIZE - room; 13743cc81a9aSJason Wang 13755377d758SJohannes Berg len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 1376f0c3192cSMichael S. Tsirkin rq->min_buf_len, PAGE_SIZE - hdr_len); 13773cc81a9aSJason Wang 1378e377fcc8SMichael S. Tsirkin return ALIGN(len, L1_CACHE_BYTES); 1379fbf28d78SMichael Dalton } 1380fbf28d78SMichael Dalton 13812de2f7f4SJohn Fastabend static int add_recvbuf_mergeable(struct virtnet_info *vi, 13822de2f7f4SJohn Fastabend struct receive_queue *rq, gfp_t gfp) 1383fbf28d78SMichael Dalton { 1384fb51879dSMichael Dalton struct page_frag *alloc_frag = &rq->alloc_frag; 13852de2f7f4SJohn Fastabend unsigned int headroom = virtnet_get_headroom(vi); 13863cc81a9aSJason Wang unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 13873cc81a9aSJason Wang unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 1388fb51879dSMichael Dalton char *buf; 1389680557cfSMichael S. Tsirkin void *ctx; 13909ab86bbcSShirley Ma int err; 1391fb51879dSMichael Dalton unsigned int len, hole; 13929ab86bbcSShirley Ma 13933cc81a9aSJason Wang /* Extra tailroom is needed to satisfy XDP's assumption. This 13943cc81a9aSJason Wang * means rx frags coalescing won't work, but consider we've 13953cc81a9aSJason Wang * disabled GSO for XDP, it won't be a big issue. 13963cc81a9aSJason Wang */ 13973cc81a9aSJason Wang len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); 13983cc81a9aSJason Wang if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) 13999ab86bbcSShirley Ma return -ENOMEM; 1400ab7db917SMichael Dalton 1401fb51879dSMichael Dalton buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 14022de2f7f4SJohn Fastabend buf += headroom; /* advance address leaving hole at front of pkt */ 1403fb51879dSMichael Dalton get_page(alloc_frag->page); 14043cc81a9aSJason Wang alloc_frag->offset += len + room; 1405fb51879dSMichael Dalton hole = alloc_frag->size - alloc_frag->offset; 14063cc81a9aSJason Wang if (hole < len + room) { 1407ab7db917SMichael Dalton /* To avoid internal fragmentation, if there is very likely not 1408ab7db917SMichael Dalton * enough space for another buffer, add the remaining space to 14091daa8790SMichael S. Tsirkin * the current buffer. 1410ab7db917SMichael Dalton */ 1411fb51879dSMichael Dalton len += hole; 1412fb51879dSMichael Dalton alloc_frag->offset += hole; 1413fb51879dSMichael Dalton } 14149ab86bbcSShirley Ma 1415fb51879dSMichael Dalton sg_init_one(rq->sg, buf, len); 141629fda25aSDavid S. Miller ctx = mergeable_len_to_ctx(len, headroom); 1417680557cfSMichael S. Tsirkin err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 14189ab86bbcSShirley Ma if (err < 0) 14192613af0eSMichael Dalton put_page(virt_to_head_page(buf)); 14209ab86bbcSShirley Ma 14219ab86bbcSShirley Ma return err; 1422296f96fcSRusty Russell } 1423296f96fcSRusty Russell 1424b2baed69SRusty Russell /* 1425b2baed69SRusty Russell * Returns false if we couldn't fill entirely (OOM). 1426b2baed69SRusty Russell * 1427b2baed69SRusty Russell * Normally run in the receive path, but can also be run from ndo_open 1428b2baed69SRusty Russell * before we're receiving packets, or from refill_work which is 1429b2baed69SRusty Russell * careful to disable receiving (using napi_disable). 1430b2baed69SRusty Russell */ 1431946fa564SMichael S. Tsirkin static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 1432946fa564SMichael S. Tsirkin gfp_t gfp) 14333f2c31d9SMark McLoughlin { 14343f2c31d9SMark McLoughlin int err; 14351788f495SMichael S. Tsirkin bool oom; 14363f2c31d9SMark McLoughlin 14370aea51c3SAmit Shah do { 14389ab86bbcSShirley Ma if (vi->mergeable_rx_bufs) 14392de2f7f4SJohn Fastabend err = add_recvbuf_mergeable(vi, rq, gfp); 14409ab86bbcSShirley Ma else if (vi->big_packets) 1441012873d0SMichael S. Tsirkin err = add_recvbuf_big(vi, rq, gfp); 14429ab86bbcSShirley Ma else 1443946fa564SMichael S. Tsirkin err = add_recvbuf_small(vi, rq, gfp); 14443f2c31d9SMark McLoughlin 14451788f495SMichael S. Tsirkin oom = err == -ENOMEM; 14469ed4cb07SRusty Russell if (err) 14473f2c31d9SMark McLoughlin break; 1448b7dfde95SLinus Torvalds } while (rq->vq->num_free); 1449461f03dcSToshiaki Makita if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { 145001c32598SMichael S. Tsirkin unsigned long flags; 145101c32598SMichael S. Tsirkin 145201c32598SMichael S. Tsirkin flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); 1453d46eeeafSJason Wang rq->stats.kicks++; 145401c32598SMichael S. Tsirkin u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); 1455461f03dcSToshiaki Makita } 1456461f03dcSToshiaki Makita 14573161e453SRusty Russell return !oom; 14583f2c31d9SMark McLoughlin } 14593f2c31d9SMark McLoughlin 146018445c4dSRusty Russell static void skb_recv_done(struct virtqueue *rvq) 1461296f96fcSRusty Russell { 1462296f96fcSRusty Russell struct virtnet_info *vi = rvq->vdev->priv; 1463986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 1464e9d7417bSJason Wang 1465e4e8452aSWillem de Bruijn virtqueue_napi_schedule(&rq->napi, rvq); 1466296f96fcSRusty Russell } 1467296f96fcSRusty Russell 1468e4e8452aSWillem de Bruijn static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) 14693e9d08ecSBruce Rogers { 1470e4e8452aSWillem de Bruijn napi_enable(napi); 14713e9d08ecSBruce Rogers 14723e9d08ecSBruce Rogers /* If all buffers were filled by other side before we napi_enabled, we 1473e4e8452aSWillem de Bruijn * won't get another interrupt, so process any outstanding packets now. 1474e4e8452aSWillem de Bruijn * Call local_bh_enable after to trigger softIRQ processing. 1475e4e8452aSWillem de Bruijn */ 1476ec13ee80SMichael S. Tsirkin local_bh_disable(); 1477e4e8452aSWillem de Bruijn virtqueue_napi_schedule(napi, vq); 1478ec13ee80SMichael S. Tsirkin local_bh_enable(); 14793e9d08ecSBruce Rogers } 14803e9d08ecSBruce Rogers 1481b92f1e67SWillem de Bruijn static void virtnet_napi_tx_enable(struct virtnet_info *vi, 1482b92f1e67SWillem de Bruijn struct virtqueue *vq, 1483b92f1e67SWillem de Bruijn struct napi_struct *napi) 1484b92f1e67SWillem de Bruijn { 1485b92f1e67SWillem de Bruijn if (!napi->weight) 1486b92f1e67SWillem de Bruijn return; 1487b92f1e67SWillem de Bruijn 1488b92f1e67SWillem de Bruijn /* Tx napi touches cachelines on the cpu handling tx interrupts. Only 1489b92f1e67SWillem de Bruijn * enable the feature if this is likely affine with the transmit path. 1490b92f1e67SWillem de Bruijn */ 1491b92f1e67SWillem de Bruijn if (!vi->affinity_hint_set) { 1492b92f1e67SWillem de Bruijn napi->weight = 0; 1493b92f1e67SWillem de Bruijn return; 1494b92f1e67SWillem de Bruijn } 1495b92f1e67SWillem de Bruijn 1496b92f1e67SWillem de Bruijn return virtnet_napi_enable(vq, napi); 1497b92f1e67SWillem de Bruijn } 1498b92f1e67SWillem de Bruijn 149978a57b48SWillem de Bruijn static void virtnet_napi_tx_disable(struct napi_struct *napi) 150078a57b48SWillem de Bruijn { 150178a57b48SWillem de Bruijn if (napi->weight) 150278a57b48SWillem de Bruijn napi_disable(napi); 150378a57b48SWillem de Bruijn } 150478a57b48SWillem de Bruijn 15053161e453SRusty Russell static void refill_work(struct work_struct *work) 15063161e453SRusty Russell { 1507e9d7417bSJason Wang struct virtnet_info *vi = 1508e9d7417bSJason Wang container_of(work, struct virtnet_info, refill.work); 15093161e453SRusty Russell bool still_empty; 1510986a4f4dSJason Wang int i; 15113161e453SRusty Russell 151255257d72SSasha Levin for (i = 0; i < vi->curr_queue_pairs; i++) { 1513986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[i]; 1514986a4f4dSJason Wang 1515986a4f4dSJason Wang napi_disable(&rq->napi); 1516946fa564SMichael S. Tsirkin still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 1517e4e8452aSWillem de Bruijn virtnet_napi_enable(rq->vq, &rq->napi); 15183161e453SRusty Russell 15193161e453SRusty Russell /* In theory, this can happen: if we don't get any buffers in 1520986a4f4dSJason Wang * we will *never* try to fill again. 1521986a4f4dSJason Wang */ 15223161e453SRusty Russell if (still_empty) 15233b07e9caSTejun Heo schedule_delayed_work(&vi->refill, HZ/2); 15243161e453SRusty Russell } 1525986a4f4dSJason Wang } 15263161e453SRusty Russell 15272471c75eSJesper Dangaard Brouer static int virtnet_receive(struct receive_queue *rq, int budget, 15282471c75eSJesper Dangaard Brouer unsigned int *xdp_xmit) 1529296f96fcSRusty Russell { 1530e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 1531d46eeeafSJason Wang struct virtnet_rq_stats stats = {}; 1532a0929a44SToshiaki Makita unsigned int len; 15339ab86bbcSShirley Ma void *buf; 1534a0929a44SToshiaki Makita int i; 1535296f96fcSRusty Russell 1536192f68cfSJason Wang if (!vi->big_packets || vi->mergeable_rx_bufs) { 1537680557cfSMichael S. Tsirkin void *ctx; 1538680557cfSMichael S. Tsirkin 1539d46eeeafSJason Wang while (stats.packets < budget && 1540680557cfSMichael S. Tsirkin (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { 1541a0929a44SToshiaki Makita receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); 1542d46eeeafSJason Wang stats.packets++; 1543680557cfSMichael S. Tsirkin } 1544680557cfSMichael S. Tsirkin } else { 1545d46eeeafSJason Wang while (stats.packets < budget && 1546e9d7417bSJason Wang (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 1547a0929a44SToshiaki Makita receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); 1548d46eeeafSJason Wang stats.packets++; 1549296f96fcSRusty Russell } 1550680557cfSMichael S. Tsirkin } 1551296f96fcSRusty Russell 1552718be6baS? jiang if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { 15535a159128SJason Wang if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { 15545a159128SJason Wang spin_lock(&vi->refill_lock); 15555a159128SJason Wang if (vi->refill_enabled) 15563b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 15575a159128SJason Wang spin_unlock(&vi->refill_lock); 15585a159128SJason Wang } 15593161e453SRusty Russell } 1560296f96fcSRusty Russell 1561d7dfc5cfSToshiaki Makita u64_stats_update_begin(&rq->stats.syncp); 1562a0929a44SToshiaki Makita for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { 1563a0929a44SToshiaki Makita size_t offset = virtnet_rq_stats_desc[i].offset; 1564a0929a44SToshiaki Makita u64 *item; 1565a0929a44SToshiaki Makita 1566d46eeeafSJason Wang item = (u64 *)((u8 *)&rq->stats + offset); 1567d46eeeafSJason Wang *item += *(u64 *)((u8 *)&stats + offset); 1568a0929a44SToshiaki Makita } 1569d7dfc5cfSToshiaki Makita u64_stats_update_end(&rq->stats.syncp); 157061845d20SJason Wang 1571d46eeeafSJason Wang return stats.packets; 15722ffa7598SJason Wang } 15732ffa7598SJason Wang 1574df133f3fSMichael S. Tsirkin static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) 1575ea7735d9SWillem de Bruijn { 1576ea7735d9SWillem de Bruijn unsigned int len; 1577ea7735d9SWillem de Bruijn unsigned int packets = 0; 1578ea7735d9SWillem de Bruijn unsigned int bytes = 0; 15795050471dSToshiaki Makita void *ptr; 1580ea7735d9SWillem de Bruijn 15815050471dSToshiaki Makita while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 15825050471dSToshiaki Makita if (likely(!is_xdp_frame(ptr))) { 15835050471dSToshiaki Makita struct sk_buff *skb = ptr; 15845050471dSToshiaki Makita 1585ea7735d9SWillem de Bruijn pr_debug("Sent skb %p\n", skb); 1586ea7735d9SWillem de Bruijn 1587ea7735d9SWillem de Bruijn bytes += skb->len; 1588df133f3fSMichael S. Tsirkin napi_consume_skb(skb, in_napi); 15895050471dSToshiaki Makita } else { 15905050471dSToshiaki Makita struct xdp_frame *frame = ptr_to_xdp(ptr); 15915050471dSToshiaki Makita 15925050471dSToshiaki Makita bytes += frame->len; 15935050471dSToshiaki Makita xdp_return_frame(frame); 15945050471dSToshiaki Makita } 15955050471dSToshiaki Makita packets++; 1596ea7735d9SWillem de Bruijn } 1597ea7735d9SWillem de Bruijn 1598ea7735d9SWillem de Bruijn /* Avoid overhead when no packets have been processed 1599ea7735d9SWillem de Bruijn * happens when called speculatively from start_xmit. 1600ea7735d9SWillem de Bruijn */ 1601ea7735d9SWillem de Bruijn if (!packets) 1602ea7735d9SWillem de Bruijn return; 1603ea7735d9SWillem de Bruijn 1604d7dfc5cfSToshiaki Makita u64_stats_update_begin(&sq->stats.syncp); 1605d7dfc5cfSToshiaki Makita sq->stats.bytes += bytes; 1606d7dfc5cfSToshiaki Makita sq->stats.packets += packets; 1607d7dfc5cfSToshiaki Makita u64_stats_update_end(&sq->stats.syncp); 1608ea7735d9SWillem de Bruijn } 1609ea7735d9SWillem de Bruijn 1610534da5e8SToshiaki Makita static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 1611534da5e8SToshiaki Makita { 1612534da5e8SToshiaki Makita if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 1613534da5e8SToshiaki Makita return false; 1614534da5e8SToshiaki Makita else if (q < vi->curr_queue_pairs) 1615534da5e8SToshiaki Makita return true; 1616534da5e8SToshiaki Makita else 1617534da5e8SToshiaki Makita return false; 1618534da5e8SToshiaki Makita } 1619534da5e8SToshiaki Makita 16207b0411efSWillem de Bruijn static void virtnet_poll_cleantx(struct receive_queue *rq) 16217b0411efSWillem de Bruijn { 16227b0411efSWillem de Bruijn struct virtnet_info *vi = rq->vq->vdev->priv; 16237b0411efSWillem de Bruijn unsigned int index = vq2rxq(rq->vq); 16247b0411efSWillem de Bruijn struct send_queue *sq = &vi->sq[index]; 16257b0411efSWillem de Bruijn struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 16267b0411efSWillem de Bruijn 1627534da5e8SToshiaki Makita if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) 16287b0411efSWillem de Bruijn return; 16297b0411efSWillem de Bruijn 16307b0411efSWillem de Bruijn if (__netif_tx_trylock(txq)) { 1631a7766ef1SMichael S. Tsirkin do { 1632a7766ef1SMichael S. Tsirkin virtqueue_disable_cb(sq->vq); 1633df133f3fSMichael S. Tsirkin free_old_xmit_skbs(sq, true); 1634a7766ef1SMichael S. Tsirkin } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 16357b0411efSWillem de Bruijn 16367b0411efSWillem de Bruijn if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 16377b0411efSWillem de Bruijn netif_tx_wake_queue(txq); 163822bc63c5SMichael S. Tsirkin 163922bc63c5SMichael S. Tsirkin __netif_tx_unlock(txq); 164022bc63c5SMichael S. Tsirkin } 16417b0411efSWillem de Bruijn } 16427b0411efSWillem de Bruijn 16432ffa7598SJason Wang static int virtnet_poll(struct napi_struct *napi, int budget) 16442ffa7598SJason Wang { 16452ffa7598SJason Wang struct receive_queue *rq = 16462ffa7598SJason Wang container_of(napi, struct receive_queue, napi); 16479267c430SJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 16489267c430SJason Wang struct send_queue *sq; 16492a43565cSToshiaki Makita unsigned int received; 16502471c75eSJesper Dangaard Brouer unsigned int xdp_xmit = 0; 16512ffa7598SJason Wang 16527b0411efSWillem de Bruijn virtnet_poll_cleantx(rq); 16537b0411efSWillem de Bruijn 1654186b3c99SJason Wang received = virtnet_receive(rq, budget, &xdp_xmit); 16552ffa7598SJason Wang 16568329d98eSRusty Russell /* Out of packets? */ 1657e4e8452aSWillem de Bruijn if (received < budget) 1658e4e8452aSWillem de Bruijn virtqueue_napi_complete(napi, rq->vq, received); 1659296f96fcSRusty Russell 16602471c75eSJesper Dangaard Brouer if (xdp_xmit & VIRTIO_XDP_REDIR) 16611d233886SToke Høiland-Jørgensen xdp_do_flush(); 16622471c75eSJesper Dangaard Brouer 16632471c75eSJesper Dangaard Brouer if (xdp_xmit & VIRTIO_XDP_TX) { 166497c2c69eSXuan Zhuo sq = virtnet_xdp_get_sq(vi); 1665461f03dcSToshiaki Makita if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 1666461f03dcSToshiaki Makita u64_stats_update_begin(&sq->stats.syncp); 1667461f03dcSToshiaki Makita sq->stats.kicks++; 1668461f03dcSToshiaki Makita u64_stats_update_end(&sq->stats.syncp); 1669461f03dcSToshiaki Makita } 167097c2c69eSXuan Zhuo virtnet_xdp_put_sq(vi, sq); 16719267c430SJason Wang } 1672186b3c99SJason Wang 1673296f96fcSRusty Russell return received; 1674296f96fcSRusty Russell } 1675296f96fcSRusty Russell 1676986a4f4dSJason Wang static int virtnet_open(struct net_device *dev) 1677986a4f4dSJason Wang { 1678986a4f4dSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1679754b8a21SJesper Dangaard Brouer int i, err; 1680986a4f4dSJason Wang 16815a159128SJason Wang enable_delayed_refill(vi); 16825a159128SJason Wang 1683e4166625SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1684e4166625SJason Wang if (i < vi->curr_queue_pairs) 1685986a4f4dSJason Wang /* Make sure we have some buffers: if oom use wq. */ 1686946fa564SMichael S. Tsirkin if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 1687986a4f4dSJason Wang schedule_delayed_work(&vi->refill, 0); 1688754b8a21SJesper Dangaard Brouer 1689b02e5a0eSBjörn Töpel err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id); 1690754b8a21SJesper Dangaard Brouer if (err < 0) 1691754b8a21SJesper Dangaard Brouer return err; 1692754b8a21SJesper Dangaard Brouer 16938d5d8852SJesper Dangaard Brouer err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, 16948d5d8852SJesper Dangaard Brouer MEM_TYPE_PAGE_SHARED, NULL); 16958d5d8852SJesper Dangaard Brouer if (err < 0) { 16968d5d8852SJesper Dangaard Brouer xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); 16978d5d8852SJesper Dangaard Brouer return err; 16988d5d8852SJesper Dangaard Brouer } 16998d5d8852SJesper Dangaard Brouer 1700e4e8452aSWillem de Bruijn virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 1701b92f1e67SWillem de Bruijn virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); 1702986a4f4dSJason Wang } 1703986a4f4dSJason Wang 1704986a4f4dSJason Wang return 0; 1705986a4f4dSJason Wang } 1706986a4f4dSJason Wang 1707b92f1e67SWillem de Bruijn static int virtnet_poll_tx(struct napi_struct *napi, int budget) 1708b92f1e67SWillem de Bruijn { 1709b92f1e67SWillem de Bruijn struct send_queue *sq = container_of(napi, struct send_queue, napi); 1710b92f1e67SWillem de Bruijn struct virtnet_info *vi = sq->vq->vdev->priv; 1711534da5e8SToshiaki Makita unsigned int index = vq2txq(sq->vq); 1712534da5e8SToshiaki Makita struct netdev_queue *txq; 17135a2f966dSMichael S. Tsirkin int opaque; 17145a2f966dSMichael S. Tsirkin bool done; 1715b92f1e67SWillem de Bruijn 1716534da5e8SToshiaki Makita if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { 1717534da5e8SToshiaki Makita /* We don't need to enable cb for XDP */ 1718534da5e8SToshiaki Makita napi_complete_done(napi, 0); 1719534da5e8SToshiaki Makita return 0; 1720534da5e8SToshiaki Makita } 1721534da5e8SToshiaki Makita 1722534da5e8SToshiaki Makita txq = netdev_get_tx_queue(vi->dev, index); 1723b92f1e67SWillem de Bruijn __netif_tx_lock(txq, raw_smp_processor_id()); 17245a2f966dSMichael S. Tsirkin virtqueue_disable_cb(sq->vq); 1725df133f3fSMichael S. Tsirkin free_old_xmit_skbs(sq, true); 17265a2f966dSMichael S. Tsirkin 172722bc63c5SMichael S. Tsirkin if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 172822bc63c5SMichael S. Tsirkin netif_tx_wake_queue(txq); 172922bc63c5SMichael S. Tsirkin 17305a2f966dSMichael S. Tsirkin opaque = virtqueue_enable_cb_prepare(sq->vq); 17315a2f966dSMichael S. Tsirkin 17325a2f966dSMichael S. Tsirkin done = napi_complete_done(napi, 0); 17335a2f966dSMichael S. Tsirkin 17345a2f966dSMichael S. Tsirkin if (!done) 17355a2f966dSMichael S. Tsirkin virtqueue_disable_cb(sq->vq); 17365a2f966dSMichael S. Tsirkin 1737b92f1e67SWillem de Bruijn __netif_tx_unlock(txq); 1738b92f1e67SWillem de Bruijn 17395a2f966dSMichael S. Tsirkin if (done) { 17405a2f966dSMichael S. Tsirkin if (unlikely(virtqueue_poll(sq->vq, opaque))) { 17415a2f966dSMichael S. Tsirkin if (napi_schedule_prep(napi)) { 17425a2f966dSMichael S. Tsirkin __netif_tx_lock(txq, raw_smp_processor_id()); 17435a2f966dSMichael S. Tsirkin virtqueue_disable_cb(sq->vq); 17445a2f966dSMichael S. Tsirkin __netif_tx_unlock(txq); 17455a2f966dSMichael S. Tsirkin __napi_schedule(napi); 17465a2f966dSMichael S. Tsirkin } 17475a2f966dSMichael S. Tsirkin } 17485a2f966dSMichael S. Tsirkin } 1749b92f1e67SWillem de Bruijn 1750b92f1e67SWillem de Bruijn return 0; 1751b92f1e67SWillem de Bruijn } 1752b92f1e67SWillem de Bruijn 1753e9d7417bSJason Wang static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 1754296f96fcSRusty Russell { 1755012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 1756296f96fcSRusty Russell const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 1757e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv; 1758e2fcad58SJason A. Donenfeld int num_sg; 1759012873d0SMichael S. Tsirkin unsigned hdr_len = vi->hdr_len; 1760e7428e95SMichael S. Tsirkin bool can_push; 1761296f96fcSRusty Russell 1762e174961cSJohannes Berg pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 1763e7428e95SMichael S. Tsirkin 1764e7428e95SMichael S. Tsirkin can_push = vi->any_header_sg && 1765e7428e95SMichael S. Tsirkin !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 1766e7428e95SMichael S. Tsirkin !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 1767e7428e95SMichael S. Tsirkin /* Even if we can, don't push here yet as this would skew 1768e7428e95SMichael S. Tsirkin * csum_start offset below. */ 1769e7428e95SMichael S. Tsirkin if (can_push) 1770012873d0SMichael S. Tsirkin hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 1771e7428e95SMichael S. Tsirkin else 1772e7428e95SMichael S. Tsirkin hdr = skb_vnet_hdr(skb); 1773296f96fcSRusty Russell 1774e858fae2SMike Rapoport if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 1775fd3a8862SWillem de Bruijn virtio_is_little_endian(vi->vdev), false, 1776fd3a8862SWillem de Bruijn 0)) 177785eb1389SXianting Tian return -EPROTO; 1778296f96fcSRusty Russell 1779e7428e95SMichael S. Tsirkin if (vi->mergeable_rx_bufs) 1780012873d0SMichael S. Tsirkin hdr->num_buffers = 0; 17813f2c31d9SMark McLoughlin 1782547c890cSJason Wang sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 1783e7428e95SMichael S. Tsirkin if (can_push) { 1784e7428e95SMichael S. Tsirkin __skb_push(skb, hdr_len); 1785e7428e95SMichael S. Tsirkin num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 1786e2fcad58SJason A. Donenfeld if (unlikely(num_sg < 0)) 1787e2fcad58SJason A. Donenfeld return num_sg; 1788e7428e95SMichael S. Tsirkin /* Pull header back to avoid skew in tx bytes calculations. */ 1789e7428e95SMichael S. Tsirkin __skb_pull(skb, hdr_len); 1790e7428e95SMichael S. Tsirkin } else { 1791e7428e95SMichael S. Tsirkin sg_set_buf(sq->sg, hdr, hdr_len); 1792e2fcad58SJason A. Donenfeld num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 1793e2fcad58SJason A. Donenfeld if (unlikely(num_sg < 0)) 1794e2fcad58SJason A. Donenfeld return num_sg; 1795e2fcad58SJason A. Donenfeld num_sg++; 1796e7428e95SMichael S. Tsirkin } 17979dc7b9e4SRusty Russell return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 179811a3a154SRusty Russell } 179911a3a154SRusty Russell 1800424efe9cSStephen Hemminger static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 180199ffc696SRusty Russell { 180299ffc696SRusty Russell struct virtnet_info *vi = netdev_priv(dev); 1803986a4f4dSJason Wang int qnum = skb_get_queue_mapping(skb); 1804986a4f4dSJason Wang struct send_queue *sq = &vi->sq[qnum]; 18059ed4cb07SRusty Russell int err; 18064b7fd2e6SMichael S. Tsirkin struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 18076b16f9eeSFlorian Westphal bool kick = !netdev_xmit_more(); 1808b92f1e67SWillem de Bruijn bool use_napi = sq->napi.weight; 18092cb9c6baSRusty Russell 18102cb9c6baSRusty Russell /* Free up any pending old buffers before queueing new ones. */ 1811a7766ef1SMichael S. Tsirkin do { 1812a7766ef1SMichael S. Tsirkin if (use_napi) 1813a7766ef1SMichael S. Tsirkin virtqueue_disable_cb(sq->vq); 1814a7766ef1SMichael S. Tsirkin 1815df133f3fSMichael S. Tsirkin free_old_xmit_skbs(sq, false); 181699ffc696SRusty Russell 1817a7766ef1SMichael S. Tsirkin } while (use_napi && kick && 1818a7766ef1SMichael S. Tsirkin unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 1819bdb12e0dSWillem de Bruijn 1820074c3582SJacob Keller /* timestamp packet in software */ 1821074c3582SJacob Keller skb_tx_timestamp(skb); 1822074c3582SJacob Keller 182303f191baSMichael S. Tsirkin /* Try to transmit */ 1824b7dfde95SLinus Torvalds err = xmit_skb(sq, skb); 182599ffc696SRusty Russell 18269ed4cb07SRusty Russell /* This should not happen! */ 1827681daee2SJason Wang if (unlikely(err)) { 182858eba97dSRusty Russell dev->stats.tx_fifo_errors++; 18292e57b79cSRick Jones if (net_ratelimit()) 183058eba97dSRusty Russell dev_warn(&dev->dev, 18317934b481SYuval Shaia "Unexpected TXQ (%d) queue failure: %d\n", 18327934b481SYuval Shaia qnum, err); 183358eba97dSRusty Russell dev->stats.tx_dropped++; 183485e94525SEric W. Biederman dev_kfree_skb_any(skb); 183558eba97dSRusty Russell return NETDEV_TX_OK; 1836296f96fcSRusty Russell } 183703f191baSMichael S. Tsirkin 183848925e37SRusty Russell /* Don't wait up for transmitted skbs to be freed. */ 1839b92f1e67SWillem de Bruijn if (!use_napi) { 184048925e37SRusty Russell skb_orphan(skb); 1841895b5c9fSFlorian Westphal nf_reset_ct(skb); 1842b92f1e67SWillem de Bruijn } 184348925e37SRusty Russell 184460302ff6SMichael S. Tsirkin /* If running out of space, stop queue to avoid getting packets that we 184560302ff6SMichael S. Tsirkin * are then unable to transmit. 184660302ff6SMichael S. Tsirkin * An alternative would be to force queuing layer to requeue the skb by 184760302ff6SMichael S. Tsirkin * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 184860302ff6SMichael S. Tsirkin * returned in a normal path of operation: it means that driver is not 184960302ff6SMichael S. Tsirkin * maintaining the TX queue stop/start state properly, and causes 185060302ff6SMichael S. Tsirkin * the stack to do a non-trivial amount of useless work. 185160302ff6SMichael S. Tsirkin * Since most packets only take 1 or 2 ring slots, stopping the queue 185260302ff6SMichael S. Tsirkin * early means 16 slots are typically wasted. 1853d631b94eSstephen hemminger */ 1854b7dfde95SLinus Torvalds if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 1855986a4f4dSJason Wang netif_stop_subqueue(dev, qnum); 1856b92f1e67SWillem de Bruijn if (!use_napi && 1857b92f1e67SWillem de Bruijn unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 185848925e37SRusty Russell /* More just got used, free them then recheck. */ 1859df133f3fSMichael S. Tsirkin free_old_xmit_skbs(sq, false); 1860b7dfde95SLinus Torvalds if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1861986a4f4dSJason Wang netif_start_subqueue(dev, qnum); 1862e9d7417bSJason Wang virtqueue_disable_cb(sq->vq); 186348925e37SRusty Russell } 186448925e37SRusty Russell } 186548925e37SRusty Russell } 186648925e37SRusty Russell 1867461f03dcSToshiaki Makita if (kick || netif_xmit_stopped(txq)) { 1868461f03dcSToshiaki Makita if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 1869461f03dcSToshiaki Makita u64_stats_update_begin(&sq->stats.syncp); 1870461f03dcSToshiaki Makita sq->stats.kicks++; 1871461f03dcSToshiaki Makita u64_stats_update_end(&sq->stats.syncp); 1872461f03dcSToshiaki Makita } 1873461f03dcSToshiaki Makita } 18740b725a2cSDavid S. Miller 18750b725a2cSDavid S. Miller return NETDEV_TX_OK; 1876c223a078SDavid S. Miller } 1877c223a078SDavid S. Miller 187840cbfc37SAmos Kong /* 187940cbfc37SAmos Kong * Send command via the control virtqueue and check status. Commands 188040cbfc37SAmos Kong * supported by the hypervisor, as indicated by feature bits, should 1881788a8b6dSstephen hemminger * never fail unless improperly formatted. 188240cbfc37SAmos Kong */ 188340cbfc37SAmos Kong static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 1884d24bae32Sstephen hemminger struct scatterlist *out) 188540cbfc37SAmos Kong { 1886f7bc9594SRusty Russell struct scatterlist *sgs[4], hdr, stat; 1887d24bae32Sstephen hemminger unsigned out_num = 0, tmp; 1888222722bcSYunjian Wang int ret; 188940cbfc37SAmos Kong 189040cbfc37SAmos Kong /* Caller should know better */ 1891f7bc9594SRusty Russell BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 189240cbfc37SAmos Kong 189312e57169SMichael S. Tsirkin vi->ctrl->status = ~0; 189412e57169SMichael S. Tsirkin vi->ctrl->hdr.class = class; 189512e57169SMichael S. Tsirkin vi->ctrl->hdr.cmd = cmd; 1896f7bc9594SRusty Russell /* Add header */ 189712e57169SMichael S. Tsirkin sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); 1898f7bc9594SRusty Russell sgs[out_num++] = &hdr; 189940cbfc37SAmos Kong 1900f7bc9594SRusty Russell if (out) 1901f7bc9594SRusty Russell sgs[out_num++] = out; 190240cbfc37SAmos Kong 1903f7bc9594SRusty Russell /* Add return status. */ 190412e57169SMichael S. Tsirkin sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); 1905d24bae32Sstephen hemminger sgs[out_num] = &stat; 190640cbfc37SAmos Kong 1907d24bae32Sstephen hemminger BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1908222722bcSYunjian Wang ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 1909222722bcSYunjian Wang if (ret < 0) { 1910222722bcSYunjian Wang dev_warn(&vi->vdev->dev, 1911222722bcSYunjian Wang "Failed to add sgs for command vq: %d\n.", ret); 1912222722bcSYunjian Wang return false; 1913222722bcSYunjian Wang } 191440cbfc37SAmos Kong 191567975901SHeinz Graalfs if (unlikely(!virtqueue_kick(vi->cvq))) 191612e57169SMichael S. Tsirkin return vi->ctrl->status == VIRTIO_NET_OK; 191740cbfc37SAmos Kong 191840cbfc37SAmos Kong /* Spin for a response, the kick causes an ioport write, trapping 191940cbfc37SAmos Kong * into the hypervisor, so the request should be handled immediately. 192040cbfc37SAmos Kong */ 1921047b9b94SHeinz Graalfs while (!virtqueue_get_buf(vi->cvq, &tmp) && 1922047b9b94SHeinz Graalfs !virtqueue_is_broken(vi->cvq)) 192340cbfc37SAmos Kong cpu_relax(); 192440cbfc37SAmos Kong 192512e57169SMichael S. Tsirkin return vi->ctrl->status == VIRTIO_NET_OK; 192640cbfc37SAmos Kong } 192740cbfc37SAmos Kong 19289c46f6d4SAlex Williamson static int virtnet_set_mac_address(struct net_device *dev, void *p) 19299c46f6d4SAlex Williamson { 19309c46f6d4SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 19319c46f6d4SAlex Williamson struct virtio_device *vdev = vi->vdev; 1932f2f2c8b4SJiri Pirko int ret; 1933e37e2ff3SAndy Lutomirski struct sockaddr *addr; 19347e58d5aeSAmos Kong struct scatterlist sg; 19359c46f6d4SAlex Williamson 1936ba5e4426SSridhar Samudrala if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 1937ba5e4426SSridhar Samudrala return -EOPNOTSUPP; 1938ba5e4426SSridhar Samudrala 1939801822d1SShyam Saini addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 1940e37e2ff3SAndy Lutomirski if (!addr) 1941e37e2ff3SAndy Lutomirski return -ENOMEM; 1942e37e2ff3SAndy Lutomirski 1943e37e2ff3SAndy Lutomirski ret = eth_prepare_mac_addr_change(dev, addr); 1944f2f2c8b4SJiri Pirko if (ret) 1945e37e2ff3SAndy Lutomirski goto out; 19469c46f6d4SAlex Williamson 19477e58d5aeSAmos Kong if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 19487e58d5aeSAmos Kong sg_init_one(&sg, addr->sa_data, dev->addr_len); 19497e58d5aeSAmos Kong if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1950d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 19517e58d5aeSAmos Kong dev_warn(&vdev->dev, 19527e58d5aeSAmos Kong "Failed to set mac address by vq command.\n"); 1953e37e2ff3SAndy Lutomirski ret = -EINVAL; 1954e37e2ff3SAndy Lutomirski goto out; 19557e58d5aeSAmos Kong } 19567e93a02fSMichael S. Tsirkin } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 19577e93a02fSMichael S. Tsirkin !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1958855e0c52SRusty Russell unsigned int i; 1959855e0c52SRusty Russell 1960855e0c52SRusty Russell /* Naturally, this has an atomicity problem. */ 1961855e0c52SRusty Russell for (i = 0; i < dev->addr_len; i++) 1962855e0c52SRusty Russell virtio_cwrite8(vdev, 1963855e0c52SRusty Russell offsetof(struct virtio_net_config, mac) + 1964855e0c52SRusty Russell i, addr->sa_data[i]); 19657e58d5aeSAmos Kong } 19667e58d5aeSAmos Kong 19677e58d5aeSAmos Kong eth_commit_mac_addr_change(dev, p); 1968e37e2ff3SAndy Lutomirski ret = 0; 19699c46f6d4SAlex Williamson 1970e37e2ff3SAndy Lutomirski out: 1971e37e2ff3SAndy Lutomirski kfree(addr); 1972e37e2ff3SAndy Lutomirski return ret; 19739c46f6d4SAlex Williamson } 19749c46f6d4SAlex Williamson 1975bc1f4470Sstephen hemminger static void virtnet_stats(struct net_device *dev, 19763fa2a1dfSstephen hemminger struct rtnl_link_stats64 *tot) 19773fa2a1dfSstephen hemminger { 19783fa2a1dfSstephen hemminger struct virtnet_info *vi = netdev_priv(dev); 19793fa2a1dfSstephen hemminger unsigned int start; 1980d7dfc5cfSToshiaki Makita int i; 19813fa2a1dfSstephen hemminger 1982d7dfc5cfSToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) { 1983a520794bSTony Lu u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops; 1984d7dfc5cfSToshiaki Makita struct receive_queue *rq = &vi->rq[i]; 1985d7dfc5cfSToshiaki Makita struct send_queue *sq = &vi->sq[i]; 19863fa2a1dfSstephen hemminger 19873fa2a1dfSstephen hemminger do { 1988d7dfc5cfSToshiaki Makita start = u64_stats_fetch_begin_irq(&sq->stats.syncp); 1989d7dfc5cfSToshiaki Makita tpackets = sq->stats.packets; 1990d7dfc5cfSToshiaki Makita tbytes = sq->stats.bytes; 1991a520794bSTony Lu terrors = sq->stats.tx_timeouts; 1992d7dfc5cfSToshiaki Makita } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); 199383a27052SEric Dumazet 199483a27052SEric Dumazet do { 1995d7dfc5cfSToshiaki Makita start = u64_stats_fetch_begin_irq(&rq->stats.syncp); 1996d46eeeafSJason Wang rpackets = rq->stats.packets; 1997d46eeeafSJason Wang rbytes = rq->stats.bytes; 1998d46eeeafSJason Wang rdrops = rq->stats.drops; 1999d7dfc5cfSToshiaki Makita } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); 20003fa2a1dfSstephen hemminger 20013fa2a1dfSstephen hemminger tot->rx_packets += rpackets; 20023fa2a1dfSstephen hemminger tot->tx_packets += tpackets; 20033fa2a1dfSstephen hemminger tot->rx_bytes += rbytes; 20043fa2a1dfSstephen hemminger tot->tx_bytes += tbytes; 20052c4a2f7dSToshiaki Makita tot->rx_dropped += rdrops; 2006a520794bSTony Lu tot->tx_errors += terrors; 20073fa2a1dfSstephen hemminger } 20083fa2a1dfSstephen hemminger 20093fa2a1dfSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 2010021ac8d3SRick Jones tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 20113fa2a1dfSstephen hemminger tot->rx_length_errors = dev->stats.rx_length_errors; 20123fa2a1dfSstephen hemminger tot->rx_frame_errors = dev->stats.rx_frame_errors; 20133fa2a1dfSstephen hemminger } 20143fa2a1dfSstephen hemminger 2015586d17c5SJason Wang static void virtnet_ack_link_announce(struct virtnet_info *vi) 2016586d17c5SJason Wang { 2017586d17c5SJason Wang rtnl_lock(); 2018586d17c5SJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 2019d24bae32Sstephen hemminger VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 2020586d17c5SJason Wang dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 2021586d17c5SJason Wang rtnl_unlock(); 2022586d17c5SJason Wang } 2023586d17c5SJason Wang 202447315329SJohn Fastabend static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 2025986a4f4dSJason Wang { 2026986a4f4dSJason Wang struct scatterlist sg; 2027986a4f4dSJason Wang struct net_device *dev = vi->dev; 2028986a4f4dSJason Wang 2029986a4f4dSJason Wang if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 2030986a4f4dSJason Wang return 0; 2031986a4f4dSJason Wang 203212e57169SMichael S. Tsirkin vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 203312e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); 2034986a4f4dSJason Wang 2035986a4f4dSJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 2036d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 2037986a4f4dSJason Wang dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 2038986a4f4dSJason Wang queue_pairs); 2039986a4f4dSJason Wang return -EINVAL; 204055257d72SSasha Levin } else { 2041986a4f4dSJason Wang vi->curr_queue_pairs = queue_pairs; 204235ed159bSJason Wang /* virtnet_open() will refill when device is going to up. */ 204335ed159bSJason Wang if (dev->flags & IFF_UP) 20449b9cd802SJason Wang schedule_delayed_work(&vi->refill, 0); 204555257d72SSasha Levin } 2046986a4f4dSJason Wang 2047986a4f4dSJason Wang return 0; 2048986a4f4dSJason Wang } 2049986a4f4dSJason Wang 205047315329SJohn Fastabend static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 205147315329SJohn Fastabend { 205247315329SJohn Fastabend int err; 205347315329SJohn Fastabend 205447315329SJohn Fastabend rtnl_lock(); 205547315329SJohn Fastabend err = _virtnet_set_queues(vi, queue_pairs); 205647315329SJohn Fastabend rtnl_unlock(); 205747315329SJohn Fastabend return err; 205847315329SJohn Fastabend } 205947315329SJohn Fastabend 2060296f96fcSRusty Russell static int virtnet_close(struct net_device *dev) 2061296f96fcSRusty Russell { 2062296f96fcSRusty Russell struct virtnet_info *vi = netdev_priv(dev); 2063986a4f4dSJason Wang int i; 2064296f96fcSRusty Russell 20655a159128SJason Wang /* Make sure NAPI doesn't schedule refill work */ 20665a159128SJason Wang disable_delayed_refill(vi); 2067b2baed69SRusty Russell /* Make sure refill_work doesn't re-enable napi! */ 2068b2baed69SRusty Russell cancel_delayed_work_sync(&vi->refill); 2069986a4f4dSJason Wang 2070b92f1e67SWillem de Bruijn for (i = 0; i < vi->max_queue_pairs; i++) { 2071754b8a21SJesper Dangaard Brouer xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); 2072986a4f4dSJason Wang napi_disable(&vi->rq[i].napi); 207378a57b48SWillem de Bruijn virtnet_napi_tx_disable(&vi->sq[i].napi); 2074b92f1e67SWillem de Bruijn } 2075296f96fcSRusty Russell 2076296f96fcSRusty Russell return 0; 2077296f96fcSRusty Russell } 2078296f96fcSRusty Russell 20792af7698eSAlex Williamson static void virtnet_set_rx_mode(struct net_device *dev) 20802af7698eSAlex Williamson { 20812af7698eSAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 2082f565a7c2SAlex Williamson struct scatterlist sg[2]; 2083f565a7c2SAlex Williamson struct virtio_net_ctrl_mac *mac_data; 2084ccffad25SJiri Pirko struct netdev_hw_addr *ha; 208532e7bfc4SJiri Pirko int uc_count; 20864cd24eafSJiri Pirko int mc_count; 2087f565a7c2SAlex Williamson void *buf; 2088f565a7c2SAlex Williamson int i; 20892af7698eSAlex Williamson 2090788a8b6dSstephen hemminger /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 20912af7698eSAlex Williamson if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 20922af7698eSAlex Williamson return; 20932af7698eSAlex Williamson 209412e57169SMichael S. Tsirkin vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); 209512e57169SMichael S. Tsirkin vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 20962af7698eSAlex Williamson 209712e57169SMichael S. Tsirkin sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); 20982af7698eSAlex Williamson 20992af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 2100d24bae32Sstephen hemminger VIRTIO_NET_CTRL_RX_PROMISC, sg)) 21012af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 210212e57169SMichael S. Tsirkin vi->ctrl->promisc ? "en" : "dis"); 21032af7698eSAlex Williamson 210412e57169SMichael S. Tsirkin sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); 21052af7698eSAlex Williamson 21062af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 2107d24bae32Sstephen hemminger VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 21082af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 210912e57169SMichael S. Tsirkin vi->ctrl->allmulti ? "en" : "dis"); 2110f565a7c2SAlex Williamson 211132e7bfc4SJiri Pirko uc_count = netdev_uc_count(dev); 21124cd24eafSJiri Pirko mc_count = netdev_mc_count(dev); 2113f565a7c2SAlex Williamson /* MAC filter - use one buffer for both lists */ 21144cd24eafSJiri Pirko buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 2115f565a7c2SAlex Williamson (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 21164cd24eafSJiri Pirko mac_data = buf; 2117e68ed8f0SJoe Perches if (!buf) 2118f565a7c2SAlex Williamson return; 2119f565a7c2SAlex Williamson 212023e258e1SAlex Williamson sg_init_table(sg, 2); 212123e258e1SAlex Williamson 2122f565a7c2SAlex Williamson /* Store the unicast list and count in the front of the buffer */ 2123fdd819b2SMichael S. Tsirkin mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 2124ccffad25SJiri Pirko i = 0; 212532e7bfc4SJiri Pirko netdev_for_each_uc_addr(ha, dev) 2126ccffad25SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 2127f565a7c2SAlex Williamson 2128f565a7c2SAlex Williamson sg_set_buf(&sg[0], mac_data, 212932e7bfc4SJiri Pirko sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 2130f565a7c2SAlex Williamson 2131f565a7c2SAlex Williamson /* multicast list and count fill the end */ 213232e7bfc4SJiri Pirko mac_data = (void *)&mac_data->macs[uc_count][0]; 2133f565a7c2SAlex Williamson 2134fdd819b2SMichael S. Tsirkin mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 2135567ec874SJiri Pirko i = 0; 213622bedad3SJiri Pirko netdev_for_each_mc_addr(ha, dev) 213722bedad3SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 2138f565a7c2SAlex Williamson 2139f565a7c2SAlex Williamson sg_set_buf(&sg[1], mac_data, 21404cd24eafSJiri Pirko sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 2141f565a7c2SAlex Williamson 2142f565a7c2SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 2143d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 214499e872aeSThomas Huth dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 2145f565a7c2SAlex Williamson 2146f565a7c2SAlex Williamson kfree(buf); 21472af7698eSAlex Williamson } 21482af7698eSAlex Williamson 214980d5c368SPatrick McHardy static int virtnet_vlan_rx_add_vid(struct net_device *dev, 215080d5c368SPatrick McHardy __be16 proto, u16 vid) 21510bde9569SAlex Williamson { 21520bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 21530bde9569SAlex Williamson struct scatterlist sg; 21540bde9569SAlex Williamson 2155d7fad4c8SMichael S. Tsirkin vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 215612e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 21570bde9569SAlex Williamson 21580bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 2159d24bae32Sstephen hemminger VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 21600bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 21618e586137SJiri Pirko return 0; 21620bde9569SAlex Williamson } 21630bde9569SAlex Williamson 216480d5c368SPatrick McHardy static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 216580d5c368SPatrick McHardy __be16 proto, u16 vid) 21660bde9569SAlex Williamson { 21670bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 21680bde9569SAlex Williamson struct scatterlist sg; 21690bde9569SAlex Williamson 2170d7fad4c8SMichael S. Tsirkin vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 217112e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 21720bde9569SAlex Williamson 21730bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 2174d24bae32Sstephen hemminger VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 21750bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 21768e586137SJiri Pirko return 0; 21770bde9569SAlex Williamson } 21780bde9569SAlex Williamson 2179310974faSPeter Xu static void virtnet_clean_affinity(struct virtnet_info *vi) 2180986a4f4dSJason Wang { 2181986a4f4dSJason Wang int i; 21828898c21cSWanlong Gao 21838898c21cSWanlong Gao if (vi->affinity_hint_set) { 21848898c21cSWanlong Gao for (i = 0; i < vi->max_queue_pairs; i++) { 218519e226e8SCaleb Raitto virtqueue_set_affinity(vi->rq[i].vq, NULL); 218619e226e8SCaleb Raitto virtqueue_set_affinity(vi->sq[i].vq, NULL); 21878898c21cSWanlong Gao } 21888898c21cSWanlong Gao 21898898c21cSWanlong Gao vi->affinity_hint_set = false; 21908898c21cSWanlong Gao } 21918898c21cSWanlong Gao } 21928898c21cSWanlong Gao 21938898c21cSWanlong Gao static void virtnet_set_affinity(struct virtnet_info *vi) 2194986a4f4dSJason Wang { 21952ca653d6SCaleb Raitto cpumask_var_t mask; 21962ca653d6SCaleb Raitto int stragglers; 21972ca653d6SCaleb Raitto int group_size; 21982ca653d6SCaleb Raitto int i, j, cpu; 21992ca653d6SCaleb Raitto int num_cpu; 22002ca653d6SCaleb Raitto int stride; 2201986a4f4dSJason Wang 22022ca653d6SCaleb Raitto if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 2203310974faSPeter Xu virtnet_clean_affinity(vi); 2204986a4f4dSJason Wang return; 2205986a4f4dSJason Wang } 2206986a4f4dSJason Wang 22072ca653d6SCaleb Raitto num_cpu = num_online_cpus(); 22082ca653d6SCaleb Raitto stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); 22092ca653d6SCaleb Raitto stragglers = num_cpu >= vi->curr_queue_pairs ? 22102ca653d6SCaleb Raitto num_cpu % vi->curr_queue_pairs : 22112ca653d6SCaleb Raitto 0; 22129b51d9d8SYury Norov cpu = cpumask_first(cpu_online_mask); 22134d99f660SAndrei Vagin 22142ca653d6SCaleb Raitto for (i = 0; i < vi->curr_queue_pairs; i++) { 22152ca653d6SCaleb Raitto group_size = stride + (i < stragglers ? 1 : 0); 22162ca653d6SCaleb Raitto 22172ca653d6SCaleb Raitto for (j = 0; j < group_size; j++) { 22182ca653d6SCaleb Raitto cpumask_set_cpu(cpu, mask); 22192ca653d6SCaleb Raitto cpu = cpumask_next_wrap(cpu, cpu_online_mask, 22202ca653d6SCaleb Raitto nr_cpu_ids, false); 22212ca653d6SCaleb Raitto } 22222ca653d6SCaleb Raitto virtqueue_set_affinity(vi->rq[i].vq, mask); 22232ca653d6SCaleb Raitto virtqueue_set_affinity(vi->sq[i].vq, mask); 2224044ab86dSAntoine Tenart __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); 22252ca653d6SCaleb Raitto cpumask_clear(mask); 2226986a4f4dSJason Wang } 2227986a4f4dSJason Wang 2228986a4f4dSJason Wang vi->affinity_hint_set = true; 22292ca653d6SCaleb Raitto free_cpumask_var(mask); 223047be2479SWanlong Gao } 2231986a4f4dSJason Wang 22328017c279SSebastian Andrzej Siewior static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 22338de4b2f3SWanlong Gao { 22348017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 22358017c279SSebastian Andrzej Siewior node); 22368de4b2f3SWanlong Gao virtnet_set_affinity(vi); 22378017c279SSebastian Andrzej Siewior return 0; 22388de4b2f3SWanlong Gao } 22393ab098dfSJason Wang 22408017c279SSebastian Andrzej Siewior static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 22418017c279SSebastian Andrzej Siewior { 22428017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 22438017c279SSebastian Andrzej Siewior node_dead); 22448017c279SSebastian Andrzej Siewior virtnet_set_affinity(vi); 22458017c279SSebastian Andrzej Siewior return 0; 22468017c279SSebastian Andrzej Siewior } 22478017c279SSebastian Andrzej Siewior 22488017c279SSebastian Andrzej Siewior static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 22498017c279SSebastian Andrzej Siewior { 22508017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 22518017c279SSebastian Andrzej Siewior node); 22528017c279SSebastian Andrzej Siewior 2253310974faSPeter Xu virtnet_clean_affinity(vi); 22548017c279SSebastian Andrzej Siewior return 0; 22558017c279SSebastian Andrzej Siewior } 22568017c279SSebastian Andrzej Siewior 22578017c279SSebastian Andrzej Siewior static enum cpuhp_state virtionet_online; 22588017c279SSebastian Andrzej Siewior 22598017c279SSebastian Andrzej Siewior static int virtnet_cpu_notif_add(struct virtnet_info *vi) 22608017c279SSebastian Andrzej Siewior { 22618017c279SSebastian Andrzej Siewior int ret; 22628017c279SSebastian Andrzej Siewior 22638017c279SSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 22648017c279SSebastian Andrzej Siewior if (ret) 22658017c279SSebastian Andrzej Siewior return ret; 22668017c279SSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 22678017c279SSebastian Andrzej Siewior &vi->node_dead); 22688017c279SSebastian Andrzej Siewior if (!ret) 22698017c279SSebastian Andrzej Siewior return ret; 22708017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 22718017c279SSebastian Andrzej Siewior return ret; 22728017c279SSebastian Andrzej Siewior } 22738017c279SSebastian Andrzej Siewior 22748017c279SSebastian Andrzej Siewior static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 22758017c279SSebastian Andrzej Siewior { 22768017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 22778017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 22788017c279SSebastian Andrzej Siewior &vi->node_dead); 2279a9ea3fc6SHerbert Xu } 2280a9ea3fc6SHerbert Xu 22818f9f4668SRick Jones static void virtnet_get_ringparam(struct net_device *dev, 228274624944SHao Chen struct ethtool_ringparam *ring, 228374624944SHao Chen struct kernel_ethtool_ringparam *kernel_ring, 228474624944SHao Chen struct netlink_ext_ack *extack) 22858f9f4668SRick Jones { 22868f9f4668SRick Jones struct virtnet_info *vi = netdev_priv(dev); 22878f9f4668SRick Jones 2288986a4f4dSJason Wang ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); 2289986a4f4dSJason Wang ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); 22908f9f4668SRick Jones ring->rx_pending = ring->rx_max_pending; 22918f9f4668SRick Jones ring->tx_pending = ring->tx_max_pending; 22928f9f4668SRick Jones } 22938f9f4668SRick Jones 2294c7114b12SAndrew Melnychenko static bool virtnet_commit_rss_command(struct virtnet_info *vi) 2295c7114b12SAndrew Melnychenko { 2296c7114b12SAndrew Melnychenko struct net_device *dev = vi->dev; 2297c7114b12SAndrew Melnychenko struct scatterlist sgs[4]; 2298c7114b12SAndrew Melnychenko unsigned int sg_buf_size; 2299c7114b12SAndrew Melnychenko 2300c7114b12SAndrew Melnychenko /* prepare sgs */ 2301c7114b12SAndrew Melnychenko sg_init_table(sgs, 4); 2302c7114b12SAndrew Melnychenko 2303c7114b12SAndrew Melnychenko sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table); 2304c7114b12SAndrew Melnychenko sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); 2305c7114b12SAndrew Melnychenko 2306c7114b12SAndrew Melnychenko sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); 2307c7114b12SAndrew Melnychenko sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); 2308c7114b12SAndrew Melnychenko 2309c7114b12SAndrew Melnychenko sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) 2310c7114b12SAndrew Melnychenko - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); 2311c7114b12SAndrew Melnychenko sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); 2312c7114b12SAndrew Melnychenko 2313c7114b12SAndrew Melnychenko sg_buf_size = vi->rss_key_size; 2314c7114b12SAndrew Melnychenko sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); 2315c7114b12SAndrew Melnychenko 2316c7114b12SAndrew Melnychenko if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 231791f41f01SAndrew Melnychenko vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG 231891f41f01SAndrew Melnychenko : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { 2319c7114b12SAndrew Melnychenko dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); 2320c7114b12SAndrew Melnychenko return false; 2321c7114b12SAndrew Melnychenko } 2322c7114b12SAndrew Melnychenko return true; 2323c7114b12SAndrew Melnychenko } 2324c7114b12SAndrew Melnychenko 2325c7114b12SAndrew Melnychenko static void virtnet_init_default_rss(struct virtnet_info *vi) 2326c7114b12SAndrew Melnychenko { 2327c7114b12SAndrew Melnychenko u32 indir_val = 0; 2328c7114b12SAndrew Melnychenko int i = 0; 2329c7114b12SAndrew Melnychenko 2330c7114b12SAndrew Melnychenko vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; 2331c1170820SAndrew Melnychenko vi->rss_hash_types_saved = vi->rss_hash_types_supported; 2332c7114b12SAndrew Melnychenko vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size 2333c7114b12SAndrew Melnychenko ? vi->rss_indir_table_size - 1 : 0; 2334c7114b12SAndrew Melnychenko vi->ctrl->rss.unclassified_queue = 0; 2335c7114b12SAndrew Melnychenko 2336c7114b12SAndrew Melnychenko for (; i < vi->rss_indir_table_size; ++i) { 2337c7114b12SAndrew Melnychenko indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); 2338c7114b12SAndrew Melnychenko vi->ctrl->rss.indirection_table[i] = indir_val; 2339c7114b12SAndrew Melnychenko } 2340c7114b12SAndrew Melnychenko 2341c7114b12SAndrew Melnychenko vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs; 2342c7114b12SAndrew Melnychenko vi->ctrl->rss.hash_key_length = vi->rss_key_size; 2343c7114b12SAndrew Melnychenko 2344c7114b12SAndrew Melnychenko netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); 2345c7114b12SAndrew Melnychenko } 2346c7114b12SAndrew Melnychenko 2347c1170820SAndrew Melnychenko static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) 2348c1170820SAndrew Melnychenko { 2349c1170820SAndrew Melnychenko info->data = 0; 2350c1170820SAndrew Melnychenko switch (info->flow_type) { 2351c1170820SAndrew Melnychenko case TCP_V4_FLOW: 2352c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { 2353c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST | 2354c1170820SAndrew Melnychenko RXH_L4_B_0_1 | RXH_L4_B_2_3; 2355c1170820SAndrew Melnychenko } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 2356c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST; 2357c1170820SAndrew Melnychenko } 2358c1170820SAndrew Melnychenko break; 2359c1170820SAndrew Melnychenko case TCP_V6_FLOW: 2360c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { 2361c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST | 2362c1170820SAndrew Melnychenko RXH_L4_B_0_1 | RXH_L4_B_2_3; 2363c1170820SAndrew Melnychenko } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 2364c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST; 2365c1170820SAndrew Melnychenko } 2366c1170820SAndrew Melnychenko break; 2367c1170820SAndrew Melnychenko case UDP_V4_FLOW: 2368c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { 2369c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST | 2370c1170820SAndrew Melnychenko RXH_L4_B_0_1 | RXH_L4_B_2_3; 2371c1170820SAndrew Melnychenko } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 2372c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST; 2373c1170820SAndrew Melnychenko } 2374c1170820SAndrew Melnychenko break; 2375c1170820SAndrew Melnychenko case UDP_V6_FLOW: 2376c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { 2377c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST | 2378c1170820SAndrew Melnychenko RXH_L4_B_0_1 | RXH_L4_B_2_3; 2379c1170820SAndrew Melnychenko } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 2380c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST; 2381c1170820SAndrew Melnychenko } 2382c1170820SAndrew Melnychenko break; 2383c1170820SAndrew Melnychenko case IPV4_FLOW: 2384c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) 2385c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST; 2386c1170820SAndrew Melnychenko 2387c1170820SAndrew Melnychenko break; 2388c1170820SAndrew Melnychenko case IPV6_FLOW: 2389c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) 2390c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST; 2391c1170820SAndrew Melnychenko 2392c1170820SAndrew Melnychenko break; 2393c1170820SAndrew Melnychenko default: 2394c1170820SAndrew Melnychenko info->data = 0; 2395c1170820SAndrew Melnychenko break; 2396c1170820SAndrew Melnychenko } 2397c1170820SAndrew Melnychenko } 2398c1170820SAndrew Melnychenko 2399c1170820SAndrew Melnychenko static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) 2400c1170820SAndrew Melnychenko { 2401c1170820SAndrew Melnychenko u32 new_hashtypes = vi->rss_hash_types_saved; 2402c1170820SAndrew Melnychenko bool is_disable = info->data & RXH_DISCARD; 2403c1170820SAndrew Melnychenko bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); 2404c1170820SAndrew Melnychenko 2405c1170820SAndrew Melnychenko /* supports only 'sd', 'sdfn' and 'r' */ 2406c1170820SAndrew Melnychenko if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) 2407c1170820SAndrew Melnychenko return false; 2408c1170820SAndrew Melnychenko 2409c1170820SAndrew Melnychenko switch (info->flow_type) { 2410c1170820SAndrew Melnychenko case TCP_V4_FLOW: 2411c1170820SAndrew Melnychenko new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4); 2412c1170820SAndrew Melnychenko if (!is_disable) 2413c1170820SAndrew Melnychenko new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 2414c1170820SAndrew Melnychenko | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0); 2415c1170820SAndrew Melnychenko break; 2416c1170820SAndrew Melnychenko case UDP_V4_FLOW: 2417c1170820SAndrew Melnychenko new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4); 2418c1170820SAndrew Melnychenko if (!is_disable) 2419c1170820SAndrew Melnychenko new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 2420c1170820SAndrew Melnychenko | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0); 2421c1170820SAndrew Melnychenko break; 2422c1170820SAndrew Melnychenko case IPV4_FLOW: 2423c1170820SAndrew Melnychenko new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4; 2424c1170820SAndrew Melnychenko if (!is_disable) 2425c1170820SAndrew Melnychenko new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4; 2426c1170820SAndrew Melnychenko break; 2427c1170820SAndrew Melnychenko case TCP_V6_FLOW: 2428c1170820SAndrew Melnychenko new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6); 2429c1170820SAndrew Melnychenko if (!is_disable) 2430c1170820SAndrew Melnychenko new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 2431c1170820SAndrew Melnychenko | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0); 2432c1170820SAndrew Melnychenko break; 2433c1170820SAndrew Melnychenko case UDP_V6_FLOW: 2434c1170820SAndrew Melnychenko new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6); 2435c1170820SAndrew Melnychenko if (!is_disable) 2436c1170820SAndrew Melnychenko new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 2437c1170820SAndrew Melnychenko | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0); 2438c1170820SAndrew Melnychenko break; 2439c1170820SAndrew Melnychenko case IPV6_FLOW: 2440c1170820SAndrew Melnychenko new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6; 2441c1170820SAndrew Melnychenko if (!is_disable) 2442c1170820SAndrew Melnychenko new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6; 2443c1170820SAndrew Melnychenko break; 2444c1170820SAndrew Melnychenko default: 2445c1170820SAndrew Melnychenko /* unsupported flow */ 2446c1170820SAndrew Melnychenko return false; 2447c1170820SAndrew Melnychenko } 2448c1170820SAndrew Melnychenko 2449c1170820SAndrew Melnychenko /* if unsupported hashtype was set */ 2450c1170820SAndrew Melnychenko if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) 2451c1170820SAndrew Melnychenko return false; 2452c1170820SAndrew Melnychenko 2453c1170820SAndrew Melnychenko if (new_hashtypes != vi->rss_hash_types_saved) { 2454c1170820SAndrew Melnychenko vi->rss_hash_types_saved = new_hashtypes; 2455c1170820SAndrew Melnychenko vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; 2456c1170820SAndrew Melnychenko if (vi->dev->features & NETIF_F_RXHASH) 2457c1170820SAndrew Melnychenko return virtnet_commit_rss_command(vi); 2458c1170820SAndrew Melnychenko } 2459c1170820SAndrew Melnychenko 2460c1170820SAndrew Melnychenko return true; 2461c1170820SAndrew Melnychenko } 246266846048SRick Jones 246366846048SRick Jones static void virtnet_get_drvinfo(struct net_device *dev, 246466846048SRick Jones struct ethtool_drvinfo *info) 246566846048SRick Jones { 246666846048SRick Jones struct virtnet_info *vi = netdev_priv(dev); 246766846048SRick Jones struct virtio_device *vdev = vi->vdev; 246866846048SRick Jones 246966846048SRick Jones strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 247066846048SRick Jones strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 247166846048SRick Jones strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 247266846048SRick Jones 247366846048SRick Jones } 247466846048SRick Jones 2475d73bcd2cSJason Wang /* TODO: Eliminate OOO packets during switching */ 2476d73bcd2cSJason Wang static int virtnet_set_channels(struct net_device *dev, 2477d73bcd2cSJason Wang struct ethtool_channels *channels) 2478d73bcd2cSJason Wang { 2479d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev); 2480d73bcd2cSJason Wang u16 queue_pairs = channels->combined_count; 2481d73bcd2cSJason Wang int err; 2482d73bcd2cSJason Wang 2483d73bcd2cSJason Wang /* We don't support separate rx/tx channels. 2484d73bcd2cSJason Wang * We don't allow setting 'other' channels. 2485d73bcd2cSJason Wang */ 2486d73bcd2cSJason Wang if (channels->rx_count || channels->tx_count || channels->other_count) 2487d73bcd2cSJason Wang return -EINVAL; 2488d73bcd2cSJason Wang 2489c18e9cd6SAmos Kong if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 2490d73bcd2cSJason Wang return -EINVAL; 2491d73bcd2cSJason Wang 2492f600b690SJohn Fastabend /* For now we don't support modifying channels while XDP is loaded 2493f600b690SJohn Fastabend * also when XDP is loaded all RX queues have XDP programs so we only 2494f600b690SJohn Fastabend * need to check a single RX queue. 2495f600b690SJohn Fastabend */ 2496f600b690SJohn Fastabend if (vi->rq[0].xdp_prog) 2497f600b690SJohn Fastabend return -EINVAL; 2498f600b690SJohn Fastabend 2499a0d1d0f4SSebastian Andrzej Siewior cpus_read_lock(); 250047315329SJohn Fastabend err = _virtnet_set_queues(vi, queue_pairs); 2501de33212fSJeff Dike if (err) { 2502a0d1d0f4SSebastian Andrzej Siewior cpus_read_unlock(); 2503de33212fSJeff Dike goto err; 2504d73bcd2cSJason Wang } 2505de33212fSJeff Dike virtnet_set_affinity(vi); 2506a0d1d0f4SSebastian Andrzej Siewior cpus_read_unlock(); 2507d73bcd2cSJason Wang 2508de33212fSJeff Dike netif_set_real_num_tx_queues(dev, queue_pairs); 2509de33212fSJeff Dike netif_set_real_num_rx_queues(dev, queue_pairs); 2510de33212fSJeff Dike err: 2511d73bcd2cSJason Wang return err; 2512d73bcd2cSJason Wang } 2513d73bcd2cSJason Wang 2514d7dfc5cfSToshiaki Makita static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2515d7dfc5cfSToshiaki Makita { 2516d7dfc5cfSToshiaki Makita struct virtnet_info *vi = netdev_priv(dev); 2517d7dfc5cfSToshiaki Makita unsigned int i, j; 2518d7a9a01bSAlexander Duyck u8 *p = data; 2519d7dfc5cfSToshiaki Makita 2520d7dfc5cfSToshiaki Makita switch (stringset) { 2521d7dfc5cfSToshiaki Makita case ETH_SS_STATS: 2522d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) { 2523d7a9a01bSAlexander Duyck for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) 2524d7a9a01bSAlexander Duyck ethtool_sprintf(&p, "rx_queue_%u_%s", i, 2525d7a9a01bSAlexander Duyck virtnet_rq_stats_desc[j].desc); 2526d7dfc5cfSToshiaki Makita } 2527d7dfc5cfSToshiaki Makita 2528d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) { 2529d7a9a01bSAlexander Duyck for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) 2530d7a9a01bSAlexander Duyck ethtool_sprintf(&p, "tx_queue_%u_%s", i, 2531d7a9a01bSAlexander Duyck virtnet_sq_stats_desc[j].desc); 2532d7dfc5cfSToshiaki Makita } 2533d7dfc5cfSToshiaki Makita break; 2534d7dfc5cfSToshiaki Makita } 2535d7dfc5cfSToshiaki Makita } 2536d7dfc5cfSToshiaki Makita 2537d7dfc5cfSToshiaki Makita static int virtnet_get_sset_count(struct net_device *dev, int sset) 2538d7dfc5cfSToshiaki Makita { 2539d7dfc5cfSToshiaki Makita struct virtnet_info *vi = netdev_priv(dev); 2540d7dfc5cfSToshiaki Makita 2541d7dfc5cfSToshiaki Makita switch (sset) { 2542d7dfc5cfSToshiaki Makita case ETH_SS_STATS: 2543d7dfc5cfSToshiaki Makita return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + 2544d7dfc5cfSToshiaki Makita VIRTNET_SQ_STATS_LEN); 2545d7dfc5cfSToshiaki Makita default: 2546d7dfc5cfSToshiaki Makita return -EOPNOTSUPP; 2547d7dfc5cfSToshiaki Makita } 2548d7dfc5cfSToshiaki Makita } 2549d7dfc5cfSToshiaki Makita 2550d7dfc5cfSToshiaki Makita static void virtnet_get_ethtool_stats(struct net_device *dev, 2551d7dfc5cfSToshiaki Makita struct ethtool_stats *stats, u64 *data) 2552d7dfc5cfSToshiaki Makita { 2553d7dfc5cfSToshiaki Makita struct virtnet_info *vi = netdev_priv(dev); 2554d7dfc5cfSToshiaki Makita unsigned int idx = 0, start, i, j; 2555d7dfc5cfSToshiaki Makita const u8 *stats_base; 2556d7dfc5cfSToshiaki Makita size_t offset; 2557d7dfc5cfSToshiaki Makita 2558d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) { 2559d7dfc5cfSToshiaki Makita struct receive_queue *rq = &vi->rq[i]; 2560d7dfc5cfSToshiaki Makita 2561d46eeeafSJason Wang stats_base = (u8 *)&rq->stats; 2562d7dfc5cfSToshiaki Makita do { 2563d7dfc5cfSToshiaki Makita start = u64_stats_fetch_begin_irq(&rq->stats.syncp); 2564d7dfc5cfSToshiaki Makita for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { 2565d7dfc5cfSToshiaki Makita offset = virtnet_rq_stats_desc[j].offset; 2566d7dfc5cfSToshiaki Makita data[idx + j] = *(u64 *)(stats_base + offset); 2567d7dfc5cfSToshiaki Makita } 2568d7dfc5cfSToshiaki Makita } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); 2569d7dfc5cfSToshiaki Makita idx += VIRTNET_RQ_STATS_LEN; 2570d7dfc5cfSToshiaki Makita } 2571d7dfc5cfSToshiaki Makita 2572d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) { 2573d7dfc5cfSToshiaki Makita struct send_queue *sq = &vi->sq[i]; 2574d7dfc5cfSToshiaki Makita 2575d7dfc5cfSToshiaki Makita stats_base = (u8 *)&sq->stats; 2576d7dfc5cfSToshiaki Makita do { 2577d7dfc5cfSToshiaki Makita start = u64_stats_fetch_begin_irq(&sq->stats.syncp); 2578d7dfc5cfSToshiaki Makita for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { 2579d7dfc5cfSToshiaki Makita offset = virtnet_sq_stats_desc[j].offset; 2580d7dfc5cfSToshiaki Makita data[idx + j] = *(u64 *)(stats_base + offset); 2581d7dfc5cfSToshiaki Makita } 2582d7dfc5cfSToshiaki Makita } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); 2583d7dfc5cfSToshiaki Makita idx += VIRTNET_SQ_STATS_LEN; 2584d7dfc5cfSToshiaki Makita } 2585d7dfc5cfSToshiaki Makita } 2586d7dfc5cfSToshiaki Makita 2587d73bcd2cSJason Wang static void virtnet_get_channels(struct net_device *dev, 2588d73bcd2cSJason Wang struct ethtool_channels *channels) 2589d73bcd2cSJason Wang { 2590d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev); 2591d73bcd2cSJason Wang 2592d73bcd2cSJason Wang channels->combined_count = vi->curr_queue_pairs; 2593d73bcd2cSJason Wang channels->max_combined = vi->max_queue_pairs; 2594d73bcd2cSJason Wang channels->max_other = 0; 2595d73bcd2cSJason Wang channels->rx_count = 0; 2596d73bcd2cSJason Wang channels->tx_count = 0; 2597d73bcd2cSJason Wang channels->other_count = 0; 2598d73bcd2cSJason Wang } 2599d73bcd2cSJason Wang 2600ebb6b4b1SPhilippe Reynes static int virtnet_set_link_ksettings(struct net_device *dev, 2601ebb6b4b1SPhilippe Reynes const struct ethtool_link_ksettings *cmd) 260216032be5SNikolay Aleksandrov { 260316032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 260416032be5SNikolay Aleksandrov 26059aedc6e2SCris Forno return ethtool_virtdev_set_link_ksettings(dev, cmd, 26069aedc6e2SCris Forno &vi->speed, &vi->duplex); 260716032be5SNikolay Aleksandrov } 260816032be5SNikolay Aleksandrov 2609ebb6b4b1SPhilippe Reynes static int virtnet_get_link_ksettings(struct net_device *dev, 2610ebb6b4b1SPhilippe Reynes struct ethtool_link_ksettings *cmd) 261116032be5SNikolay Aleksandrov { 261216032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 261316032be5SNikolay Aleksandrov 2614ebb6b4b1SPhilippe Reynes cmd->base.speed = vi->speed; 2615ebb6b4b1SPhilippe Reynes cmd->base.duplex = vi->duplex; 2616ebb6b4b1SPhilippe Reynes cmd->base.port = PORT_OTHER; 261716032be5SNikolay Aleksandrov 261816032be5SNikolay Aleksandrov return 0; 261916032be5SNikolay Aleksandrov } 262016032be5SNikolay Aleksandrov 26210c465be1SJason Wang static int virtnet_set_coalesce(struct net_device *dev, 2622f3ccfda1SYufeng Mo struct ethtool_coalesce *ec, 2623f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal, 2624f3ccfda1SYufeng Mo struct netlink_ext_ack *extack) 26250c465be1SJason Wang { 26260c465be1SJason Wang struct virtnet_info *vi = netdev_priv(dev); 26270c465be1SJason Wang int i, napi_weight; 26280c465be1SJason Wang 2629a51e5206SJakub Kicinski if (ec->tx_max_coalesced_frames > 1 || 2630a51e5206SJakub Kicinski ec->rx_max_coalesced_frames != 1) 26310c465be1SJason Wang return -EINVAL; 26320c465be1SJason Wang 26330c465be1SJason Wang napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; 26340c465be1SJason Wang if (napi_weight ^ vi->sq[0].napi.weight) { 26350c465be1SJason Wang if (dev->flags & IFF_UP) 26360c465be1SJason Wang return -EBUSY; 26370c465be1SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 26380c465be1SJason Wang vi->sq[i].napi.weight = napi_weight; 26390c465be1SJason Wang } 26400c465be1SJason Wang 26410c465be1SJason Wang return 0; 26420c465be1SJason Wang } 26430c465be1SJason Wang 26440c465be1SJason Wang static int virtnet_get_coalesce(struct net_device *dev, 2645f3ccfda1SYufeng Mo struct ethtool_coalesce *ec, 2646f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal, 2647f3ccfda1SYufeng Mo struct netlink_ext_ack *extack) 26480c465be1SJason Wang { 26490c465be1SJason Wang struct ethtool_coalesce ec_default = { 26500c465be1SJason Wang .cmd = ETHTOOL_GCOALESCE, 26510c465be1SJason Wang .rx_max_coalesced_frames = 1, 26520c465be1SJason Wang }; 26530c465be1SJason Wang struct virtnet_info *vi = netdev_priv(dev); 26540c465be1SJason Wang 26550c465be1SJason Wang memcpy(ec, &ec_default, sizeof(ec_default)); 26560c465be1SJason Wang 26570c465be1SJason Wang if (vi->sq[0].napi.weight) 26580c465be1SJason Wang ec->tx_max_coalesced_frames = 1; 26590c465be1SJason Wang 26600c465be1SJason Wang return 0; 26610c465be1SJason Wang } 26620c465be1SJason Wang 266316032be5SNikolay Aleksandrov static void virtnet_init_settings(struct net_device *dev) 266416032be5SNikolay Aleksandrov { 266516032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 266616032be5SNikolay Aleksandrov 266716032be5SNikolay Aleksandrov vi->speed = SPEED_UNKNOWN; 266816032be5SNikolay Aleksandrov vi->duplex = DUPLEX_UNKNOWN; 266916032be5SNikolay Aleksandrov } 267016032be5SNikolay Aleksandrov 2671faa9b39fSJason Baron static void virtnet_update_settings(struct virtnet_info *vi) 2672faa9b39fSJason Baron { 2673faa9b39fSJason Baron u32 speed; 2674faa9b39fSJason Baron u8 duplex; 2675faa9b39fSJason Baron 2676faa9b39fSJason Baron if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) 2677faa9b39fSJason Baron return; 2678faa9b39fSJason Baron 267964ffa39dSMichael S. Tsirkin virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); 268064ffa39dSMichael S. Tsirkin 2681faa9b39fSJason Baron if (ethtool_validate_speed(speed)) 2682faa9b39fSJason Baron vi->speed = speed; 268364ffa39dSMichael S. Tsirkin 268464ffa39dSMichael S. Tsirkin virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); 268564ffa39dSMichael S. Tsirkin 2686faa9b39fSJason Baron if (ethtool_validate_duplex(duplex)) 2687faa9b39fSJason Baron vi->duplex = duplex; 2688faa9b39fSJason Baron } 2689faa9b39fSJason Baron 2690c7114b12SAndrew Melnychenko static u32 virtnet_get_rxfh_key_size(struct net_device *dev) 2691c7114b12SAndrew Melnychenko { 2692c7114b12SAndrew Melnychenko return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; 2693c7114b12SAndrew Melnychenko } 2694c7114b12SAndrew Melnychenko 2695c7114b12SAndrew Melnychenko static u32 virtnet_get_rxfh_indir_size(struct net_device *dev) 2696c7114b12SAndrew Melnychenko { 2697c7114b12SAndrew Melnychenko return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; 2698c7114b12SAndrew Melnychenko } 2699c7114b12SAndrew Melnychenko 2700c7114b12SAndrew Melnychenko static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) 2701c7114b12SAndrew Melnychenko { 2702c7114b12SAndrew Melnychenko struct virtnet_info *vi = netdev_priv(dev); 2703c7114b12SAndrew Melnychenko int i; 2704c7114b12SAndrew Melnychenko 2705c7114b12SAndrew Melnychenko if (indir) { 2706c7114b12SAndrew Melnychenko for (i = 0; i < vi->rss_indir_table_size; ++i) 2707c7114b12SAndrew Melnychenko indir[i] = vi->ctrl->rss.indirection_table[i]; 2708c7114b12SAndrew Melnychenko } 2709c7114b12SAndrew Melnychenko 2710c7114b12SAndrew Melnychenko if (key) 2711c7114b12SAndrew Melnychenko memcpy(key, vi->ctrl->rss.key, vi->rss_key_size); 2712c7114b12SAndrew Melnychenko 2713c7114b12SAndrew Melnychenko if (hfunc) 2714c7114b12SAndrew Melnychenko *hfunc = ETH_RSS_HASH_TOP; 2715c7114b12SAndrew Melnychenko 2716c7114b12SAndrew Melnychenko return 0; 2717c7114b12SAndrew Melnychenko } 2718c7114b12SAndrew Melnychenko 2719c7114b12SAndrew Melnychenko static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) 2720c7114b12SAndrew Melnychenko { 2721c7114b12SAndrew Melnychenko struct virtnet_info *vi = netdev_priv(dev); 2722c7114b12SAndrew Melnychenko int i; 2723c7114b12SAndrew Melnychenko 2724c7114b12SAndrew Melnychenko if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 2725c7114b12SAndrew Melnychenko return -EOPNOTSUPP; 2726c7114b12SAndrew Melnychenko 2727c7114b12SAndrew Melnychenko if (indir) { 2728c7114b12SAndrew Melnychenko for (i = 0; i < vi->rss_indir_table_size; ++i) 2729c7114b12SAndrew Melnychenko vi->ctrl->rss.indirection_table[i] = indir[i]; 2730c7114b12SAndrew Melnychenko } 2731c7114b12SAndrew Melnychenko if (key) 2732c7114b12SAndrew Melnychenko memcpy(vi->ctrl->rss.key, key, vi->rss_key_size); 2733c7114b12SAndrew Melnychenko 2734c7114b12SAndrew Melnychenko virtnet_commit_rss_command(vi); 2735c7114b12SAndrew Melnychenko 2736c7114b12SAndrew Melnychenko return 0; 2737c7114b12SAndrew Melnychenko } 2738c7114b12SAndrew Melnychenko 2739c7114b12SAndrew Melnychenko static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) 2740c7114b12SAndrew Melnychenko { 2741c7114b12SAndrew Melnychenko struct virtnet_info *vi = netdev_priv(dev); 2742c7114b12SAndrew Melnychenko int rc = 0; 2743c7114b12SAndrew Melnychenko 2744c7114b12SAndrew Melnychenko switch (info->cmd) { 2745c7114b12SAndrew Melnychenko case ETHTOOL_GRXRINGS: 2746c7114b12SAndrew Melnychenko info->data = vi->curr_queue_pairs; 2747c7114b12SAndrew Melnychenko break; 2748c1170820SAndrew Melnychenko case ETHTOOL_GRXFH: 2749c1170820SAndrew Melnychenko virtnet_get_hashflow(vi, info); 2750c1170820SAndrew Melnychenko break; 2751c1170820SAndrew Melnychenko default: 2752c1170820SAndrew Melnychenko rc = -EOPNOTSUPP; 2753c1170820SAndrew Melnychenko } 2754c1170820SAndrew Melnychenko 2755c1170820SAndrew Melnychenko return rc; 2756c1170820SAndrew Melnychenko } 2757c1170820SAndrew Melnychenko 2758c1170820SAndrew Melnychenko static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 2759c1170820SAndrew Melnychenko { 2760c1170820SAndrew Melnychenko struct virtnet_info *vi = netdev_priv(dev); 2761c1170820SAndrew Melnychenko int rc = 0; 2762c1170820SAndrew Melnychenko 2763c1170820SAndrew Melnychenko switch (info->cmd) { 2764c1170820SAndrew Melnychenko case ETHTOOL_SRXFH: 2765c1170820SAndrew Melnychenko if (!virtnet_set_hashflow(vi, info)) 2766c1170820SAndrew Melnychenko rc = -EINVAL; 2767c1170820SAndrew Melnychenko 2768c1170820SAndrew Melnychenko break; 2769c7114b12SAndrew Melnychenko default: 2770c7114b12SAndrew Melnychenko rc = -EOPNOTSUPP; 2771c7114b12SAndrew Melnychenko } 2772c7114b12SAndrew Melnychenko 2773c7114b12SAndrew Melnychenko return rc; 2774c7114b12SAndrew Melnychenko } 2775c7114b12SAndrew Melnychenko 27760fc0b732SStephen Hemminger static const struct ethtool_ops virtnet_ethtool_ops = { 2777a51e5206SJakub Kicinski .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES, 277866846048SRick Jones .get_drvinfo = virtnet_get_drvinfo, 27799f4d26d0SMark McLoughlin .get_link = ethtool_op_get_link, 27808f9f4668SRick Jones .get_ringparam = virtnet_get_ringparam, 2781d7dfc5cfSToshiaki Makita .get_strings = virtnet_get_strings, 2782d7dfc5cfSToshiaki Makita .get_sset_count = virtnet_get_sset_count, 2783d7dfc5cfSToshiaki Makita .get_ethtool_stats = virtnet_get_ethtool_stats, 2784d73bcd2cSJason Wang .set_channels = virtnet_set_channels, 2785d73bcd2cSJason Wang .get_channels = virtnet_get_channels, 2786074c3582SJacob Keller .get_ts_info = ethtool_op_get_ts_info, 2787ebb6b4b1SPhilippe Reynes .get_link_ksettings = virtnet_get_link_ksettings, 2788ebb6b4b1SPhilippe Reynes .set_link_ksettings = virtnet_set_link_ksettings, 27890c465be1SJason Wang .set_coalesce = virtnet_set_coalesce, 27900c465be1SJason Wang .get_coalesce = virtnet_get_coalesce, 2791c7114b12SAndrew Melnychenko .get_rxfh_key_size = virtnet_get_rxfh_key_size, 2792c7114b12SAndrew Melnychenko .get_rxfh_indir_size = virtnet_get_rxfh_indir_size, 2793c7114b12SAndrew Melnychenko .get_rxfh = virtnet_get_rxfh, 2794c7114b12SAndrew Melnychenko .set_rxfh = virtnet_set_rxfh, 2795c7114b12SAndrew Melnychenko .get_rxnfc = virtnet_get_rxnfc, 2796c1170820SAndrew Melnychenko .set_rxnfc = virtnet_set_rxnfc, 2797a9ea3fc6SHerbert Xu }; 2798a9ea3fc6SHerbert Xu 27999fe7bfceSJohn Fastabend static void virtnet_freeze_down(struct virtio_device *vdev) 28009fe7bfceSJohn Fastabend { 28019fe7bfceSJohn Fastabend struct virtnet_info *vi = vdev->priv; 28029fe7bfceSJohn Fastabend 28039fe7bfceSJohn Fastabend /* Make sure no work handler is accessing the device */ 28049fe7bfceSJohn Fastabend flush_work(&vi->config_work); 28059fe7bfceSJohn Fastabend 280605c998b7SAke Koomsin netif_tx_lock_bh(vi->dev); 28079fe7bfceSJohn Fastabend netif_device_detach(vi->dev); 280805c998b7SAke Koomsin netif_tx_unlock_bh(vi->dev); 28098af52fe9SStephan Gerhold if (netif_running(vi->dev)) 28108af52fe9SStephan Gerhold virtnet_close(vi->dev); 28119fe7bfceSJohn Fastabend } 28129fe7bfceSJohn Fastabend 28139fe7bfceSJohn Fastabend static int init_vqs(struct virtnet_info *vi); 28149fe7bfceSJohn Fastabend 28159fe7bfceSJohn Fastabend static int virtnet_restore_up(struct virtio_device *vdev) 28169fe7bfceSJohn Fastabend { 28179fe7bfceSJohn Fastabend struct virtnet_info *vi = vdev->priv; 28188af52fe9SStephan Gerhold int err; 28199fe7bfceSJohn Fastabend 28209fe7bfceSJohn Fastabend err = init_vqs(vi); 28219fe7bfceSJohn Fastabend if (err) 28229fe7bfceSJohn Fastabend return err; 28239fe7bfceSJohn Fastabend 28249fe7bfceSJohn Fastabend virtio_device_ready(vdev); 28259fe7bfceSJohn Fastabend 28265a159128SJason Wang enable_delayed_refill(vi); 28275a159128SJason Wang 28289fe7bfceSJohn Fastabend if (netif_running(vi->dev)) { 28298af52fe9SStephan Gerhold err = virtnet_open(vi->dev); 28308af52fe9SStephan Gerhold if (err) 28318af52fe9SStephan Gerhold return err; 28329fe7bfceSJohn Fastabend } 28339fe7bfceSJohn Fastabend 283405c998b7SAke Koomsin netif_tx_lock_bh(vi->dev); 28359fe7bfceSJohn Fastabend netif_device_attach(vi->dev); 283605c998b7SAke Koomsin netif_tx_unlock_bh(vi->dev); 28379fe7bfceSJohn Fastabend return err; 28389fe7bfceSJohn Fastabend } 28399fe7bfceSJohn Fastabend 28403f93522fSJason Wang static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 28413f93522fSJason Wang { 28423f93522fSJason Wang struct scatterlist sg; 284312e57169SMichael S. Tsirkin vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); 28443f93522fSJason Wang 284512e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); 28463f93522fSJason Wang 28473f93522fSJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 28483f93522fSJason Wang VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { 28493f93522fSJason Wang dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); 28503f93522fSJason Wang return -EINVAL; 28513f93522fSJason Wang } 28523f93522fSJason Wang 28533f93522fSJason Wang return 0; 28543f93522fSJason Wang } 28553f93522fSJason Wang 28563f93522fSJason Wang static int virtnet_clear_guest_offloads(struct virtnet_info *vi) 28573f93522fSJason Wang { 28583f93522fSJason Wang u64 offloads = 0; 28593f93522fSJason Wang 28603f93522fSJason Wang if (!vi->guest_offloads) 28613f93522fSJason Wang return 0; 28623f93522fSJason Wang 28633f93522fSJason Wang return virtnet_set_guest_offloads(vi, offloads); 28643f93522fSJason Wang } 28653f93522fSJason Wang 28663f93522fSJason Wang static int virtnet_restore_guest_offloads(struct virtnet_info *vi) 28673f93522fSJason Wang { 28683f93522fSJason Wang u64 offloads = vi->guest_offloads; 28693f93522fSJason Wang 28703f93522fSJason Wang if (!vi->guest_offloads) 28713f93522fSJason Wang return 0; 28723f93522fSJason Wang 28733f93522fSJason Wang return virtnet_set_guest_offloads(vi, offloads); 28743f93522fSJason Wang } 28753f93522fSJason Wang 28769861ce03SJakub Kicinski static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, 28779861ce03SJakub Kicinski struct netlink_ext_ack *extack) 2878f600b690SJohn Fastabend { 2879f600b690SJohn Fastabend unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); 2880f600b690SJohn Fastabend struct virtnet_info *vi = netdev_priv(dev); 2881f600b690SJohn Fastabend struct bpf_prog *old_prog; 2882017b29c3SJason Wang u16 xdp_qp = 0, curr_qp; 2883672aafd5SJohn Fastabend int i, err; 2884f600b690SJohn Fastabend 28853f93522fSJason Wang if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) 28863f93522fSJason Wang && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 288792502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 288892502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 288918ba58e1SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 289018ba58e1SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { 2891dbcf24d1SJason Wang NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); 2892f600b690SJohn Fastabend return -EOPNOTSUPP; 2893f600b690SJohn Fastabend } 2894f600b690SJohn Fastabend 2895f600b690SJohn Fastabend if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 28964d463c4dSDaniel Borkmann NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); 2897f600b690SJohn Fastabend return -EINVAL; 2898f600b690SJohn Fastabend } 2899f600b690SJohn Fastabend 2900f600b690SJohn Fastabend if (dev->mtu > max_sz) { 29014d463c4dSDaniel Borkmann NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); 2902f600b690SJohn Fastabend netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); 2903f600b690SJohn Fastabend return -EINVAL; 2904f600b690SJohn Fastabend } 2905f600b690SJohn Fastabend 2906672aafd5SJohn Fastabend curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 2907672aafd5SJohn Fastabend if (prog) 2908672aafd5SJohn Fastabend xdp_qp = nr_cpu_ids; 2909672aafd5SJohn Fastabend 2910672aafd5SJohn Fastabend /* XDP requires extra queues for XDP_TX */ 2911672aafd5SJohn Fastabend if (curr_qp + xdp_qp > vi->max_queue_pairs) { 29129ce4e3d6SXuan Zhuo netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", 2913672aafd5SJohn Fastabend curr_qp + xdp_qp, vi->max_queue_pairs); 291497c2c69eSXuan Zhuo xdp_qp = 0; 2915672aafd5SJohn Fastabend } 2916672aafd5SJohn Fastabend 291703aa6d34SToshiaki Makita old_prog = rtnl_dereference(vi->rq[0].xdp_prog); 291803aa6d34SToshiaki Makita if (!prog && !old_prog) 291903aa6d34SToshiaki Makita return 0; 292003aa6d34SToshiaki Makita 292185192dbfSAndrii Nakryiko if (prog) 292285192dbfSAndrii Nakryiko bpf_prog_add(prog, vi->max_queue_pairs - 1); 29232de2f7f4SJohn Fastabend 29244941d472SJason Wang /* Make sure NAPI is not using any XDP TX queues for RX. */ 2925534da5e8SToshiaki Makita if (netif_running(dev)) { 2926534da5e8SToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) { 29274941d472SJason Wang napi_disable(&vi->rq[i].napi); 2928534da5e8SToshiaki Makita virtnet_napi_tx_disable(&vi->sq[i].napi); 2929534da5e8SToshiaki Makita } 2930534da5e8SToshiaki Makita } 29312de2f7f4SJohn Fastabend 293203aa6d34SToshiaki Makita if (!prog) { 293303aa6d34SToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) { 293403aa6d34SToshiaki Makita rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 293503aa6d34SToshiaki Makita if (i == 0) 293603aa6d34SToshiaki Makita virtnet_restore_guest_offloads(vi); 293703aa6d34SToshiaki Makita } 293803aa6d34SToshiaki Makita synchronize_net(); 293903aa6d34SToshiaki Makita } 294003aa6d34SToshiaki Makita 29414941d472SJason Wang err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 29424941d472SJason Wang if (err) 29434941d472SJason Wang goto err; 2944188313c1SToshiaki Makita netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 29454941d472SJason Wang vi->xdp_queue_pairs = xdp_qp; 2946f600b690SJohn Fastabend 294703aa6d34SToshiaki Makita if (prog) { 294897c2c69eSXuan Zhuo vi->xdp_enabled = true; 2949f600b690SJohn Fastabend for (i = 0; i < vi->max_queue_pairs; i++) { 2950f600b690SJohn Fastabend rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 295103aa6d34SToshiaki Makita if (i == 0 && !old_prog) 29523f93522fSJason Wang virtnet_clear_guest_offloads(vi); 29533f93522fSJason Wang } 295497c2c69eSXuan Zhuo } else { 295597c2c69eSXuan Zhuo vi->xdp_enabled = false; 295603aa6d34SToshiaki Makita } 295703aa6d34SToshiaki Makita 295803aa6d34SToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) { 2959f600b690SJohn Fastabend if (old_prog) 2960f600b690SJohn Fastabend bpf_prog_put(old_prog); 2961534da5e8SToshiaki Makita if (netif_running(dev)) { 29624941d472SJason Wang virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2963534da5e8SToshiaki Makita virtnet_napi_tx_enable(vi, vi->sq[i].vq, 2964534da5e8SToshiaki Makita &vi->sq[i].napi); 2965534da5e8SToshiaki Makita } 2966f600b690SJohn Fastabend } 2967f600b690SJohn Fastabend 2968f600b690SJohn Fastabend return 0; 29692de2f7f4SJohn Fastabend 29704941d472SJason Wang err: 297103aa6d34SToshiaki Makita if (!prog) { 297203aa6d34SToshiaki Makita virtnet_clear_guest_offloads(vi); 29734941d472SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 297403aa6d34SToshiaki Makita rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); 297503aa6d34SToshiaki Makita } 297603aa6d34SToshiaki Makita 29778be4d9a4SToshiaki Makita if (netif_running(dev)) { 2978534da5e8SToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) { 29794941d472SJason Wang virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2980534da5e8SToshiaki Makita virtnet_napi_tx_enable(vi, vi->sq[i].vq, 2981534da5e8SToshiaki Makita &vi->sq[i].napi); 2982534da5e8SToshiaki Makita } 29838be4d9a4SToshiaki Makita } 29842de2f7f4SJohn Fastabend if (prog) 29852de2f7f4SJohn Fastabend bpf_prog_sub(prog, vi->max_queue_pairs - 1); 29862de2f7f4SJohn Fastabend return err; 2987f600b690SJohn Fastabend } 2988f600b690SJohn Fastabend 2989f4e63525SJakub Kicinski static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2990f600b690SJohn Fastabend { 2991f600b690SJohn Fastabend switch (xdp->command) { 2992f600b690SJohn Fastabend case XDP_SETUP_PROG: 29939861ce03SJakub Kicinski return virtnet_xdp_set(dev, xdp->prog, xdp->extack); 2994f600b690SJohn Fastabend default: 2995f600b690SJohn Fastabend return -EINVAL; 2996f600b690SJohn Fastabend } 2997f600b690SJohn Fastabend } 2998f600b690SJohn Fastabend 2999ba5e4426SSridhar Samudrala static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, 3000ba5e4426SSridhar Samudrala size_t len) 3001ba5e4426SSridhar Samudrala { 3002ba5e4426SSridhar Samudrala struct virtnet_info *vi = netdev_priv(dev); 3003ba5e4426SSridhar Samudrala int ret; 3004ba5e4426SSridhar Samudrala 3005ba5e4426SSridhar Samudrala if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 3006ba5e4426SSridhar Samudrala return -EOPNOTSUPP; 3007ba5e4426SSridhar Samudrala 3008ba5e4426SSridhar Samudrala ret = snprintf(buf, len, "sby"); 3009ba5e4426SSridhar Samudrala if (ret >= len) 3010ba5e4426SSridhar Samudrala return -EOPNOTSUPP; 3011ba5e4426SSridhar Samudrala 3012ba5e4426SSridhar Samudrala return 0; 3013ba5e4426SSridhar Samudrala } 3014ba5e4426SSridhar Samudrala 3015a02e8964SWillem de Bruijn static int virtnet_set_features(struct net_device *dev, 3016a02e8964SWillem de Bruijn netdev_features_t features) 3017a02e8964SWillem de Bruijn { 3018a02e8964SWillem de Bruijn struct virtnet_info *vi = netdev_priv(dev); 3019cf8691cbSMichael S. Tsirkin u64 offloads; 3020a02e8964SWillem de Bruijn int err; 3021a02e8964SWillem de Bruijn 3022dbcf24d1SJason Wang if ((dev->features ^ features) & NETIF_F_GRO_HW) { 302397c2c69eSXuan Zhuo if (vi->xdp_enabled) 3024a02e8964SWillem de Bruijn return -EBUSY; 3025a02e8964SWillem de Bruijn 3026dbcf24d1SJason Wang if (features & NETIF_F_GRO_HW) 3027cf8691cbSMichael S. Tsirkin offloads = vi->guest_offloads_capable; 3028a02e8964SWillem de Bruijn else 3029cf8691cbSMichael S. Tsirkin offloads = vi->guest_offloads_capable & 3030dbcf24d1SJason Wang ~GUEST_OFFLOAD_GRO_HW_MASK; 3031a02e8964SWillem de Bruijn 3032a02e8964SWillem de Bruijn err = virtnet_set_guest_offloads(vi, offloads); 3033a02e8964SWillem de Bruijn if (err) 3034a02e8964SWillem de Bruijn return err; 30353618ad2aSTonghao Zhang vi->guest_offloads = offloads; 3036cf8691cbSMichael S. Tsirkin } 3037cf8691cbSMichael S. Tsirkin 3038c7114b12SAndrew Melnychenko if ((dev->features ^ features) & NETIF_F_RXHASH) { 3039c7114b12SAndrew Melnychenko if (features & NETIF_F_RXHASH) 3040c1170820SAndrew Melnychenko vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; 3041c7114b12SAndrew Melnychenko else 3042c7114b12SAndrew Melnychenko vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; 3043c7114b12SAndrew Melnychenko 3044c7114b12SAndrew Melnychenko if (!virtnet_commit_rss_command(vi)) 3045c7114b12SAndrew Melnychenko return -EINVAL; 3046c7114b12SAndrew Melnychenko } 3047c7114b12SAndrew Melnychenko 3048a02e8964SWillem de Bruijn return 0; 3049a02e8964SWillem de Bruijn } 3050a02e8964SWillem de Bruijn 3051a520794bSTony Lu static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) 3052a520794bSTony Lu { 3053a520794bSTony Lu struct virtnet_info *priv = netdev_priv(dev); 3054a520794bSTony Lu struct send_queue *sq = &priv->sq[txqueue]; 3055a520794bSTony Lu struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); 3056a520794bSTony Lu 3057a520794bSTony Lu u64_stats_update_begin(&sq->stats.syncp); 3058a520794bSTony Lu sq->stats.tx_timeouts++; 3059a520794bSTony Lu u64_stats_update_end(&sq->stats.syncp); 3060a520794bSTony Lu 3061a520794bSTony Lu netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", 3062a520794bSTony Lu txqueue, sq->name, sq->vq->index, sq->vq->name, 30635337824fSEric Dumazet jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); 3064a520794bSTony Lu } 3065a520794bSTony Lu 306676288b4eSStephen Hemminger static const struct net_device_ops virtnet_netdev = { 306776288b4eSStephen Hemminger .ndo_open = virtnet_open, 306876288b4eSStephen Hemminger .ndo_stop = virtnet_close, 306976288b4eSStephen Hemminger .ndo_start_xmit = start_xmit, 307076288b4eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 30719c46f6d4SAlex Williamson .ndo_set_mac_address = virtnet_set_mac_address, 30722af7698eSAlex Williamson .ndo_set_rx_mode = virtnet_set_rx_mode, 30733fa2a1dfSstephen hemminger .ndo_get_stats64 = virtnet_stats, 30741824a989SAlex Williamson .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 30751824a989SAlex Williamson .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 3076f4e63525SJakub Kicinski .ndo_bpf = virtnet_xdp, 3077186b3c99SJason Wang .ndo_xdp_xmit = virtnet_xdp_xmit, 30782836b4f2SVlad Yasevich .ndo_features_check = passthru_features_check, 3079ba5e4426SSridhar Samudrala .ndo_get_phys_port_name = virtnet_get_phys_port_name, 3080a02e8964SWillem de Bruijn .ndo_set_features = virtnet_set_features, 3081a520794bSTony Lu .ndo_tx_timeout = virtnet_tx_timeout, 308276288b4eSStephen Hemminger }; 308376288b4eSStephen Hemminger 3084586d17c5SJason Wang static void virtnet_config_changed_work(struct work_struct *work) 30859f4d26d0SMark McLoughlin { 3086586d17c5SJason Wang struct virtnet_info *vi = 3087586d17c5SJason Wang container_of(work, struct virtnet_info, config_work); 30889f4d26d0SMark McLoughlin u16 v; 30899f4d26d0SMark McLoughlin 3090855e0c52SRusty Russell if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 3091855e0c52SRusty Russell struct virtio_net_config, status, &v) < 0) 3092507613bfSMichael S. Tsirkin return; 3093586d17c5SJason Wang 3094586d17c5SJason Wang if (v & VIRTIO_NET_S_ANNOUNCE) { 3095ee89bab1SAmerigo Wang netdev_notify_peers(vi->dev); 3096586d17c5SJason Wang virtnet_ack_link_announce(vi); 3097586d17c5SJason Wang } 30989f4d26d0SMark McLoughlin 30999f4d26d0SMark McLoughlin /* Ignore unknown (future) status bits */ 31009f4d26d0SMark McLoughlin v &= VIRTIO_NET_S_LINK_UP; 31019f4d26d0SMark McLoughlin 31029f4d26d0SMark McLoughlin if (vi->status == v) 3103507613bfSMichael S. Tsirkin return; 31049f4d26d0SMark McLoughlin 31059f4d26d0SMark McLoughlin vi->status = v; 31069f4d26d0SMark McLoughlin 31079f4d26d0SMark McLoughlin if (vi->status & VIRTIO_NET_S_LINK_UP) { 3108faa9b39fSJason Baron virtnet_update_settings(vi); 31099f4d26d0SMark McLoughlin netif_carrier_on(vi->dev); 3110986a4f4dSJason Wang netif_tx_wake_all_queues(vi->dev); 31119f4d26d0SMark McLoughlin } else { 31129f4d26d0SMark McLoughlin netif_carrier_off(vi->dev); 3113986a4f4dSJason Wang netif_tx_stop_all_queues(vi->dev); 31149f4d26d0SMark McLoughlin } 31159f4d26d0SMark McLoughlin } 31169f4d26d0SMark McLoughlin 31179f4d26d0SMark McLoughlin static void virtnet_config_changed(struct virtio_device *vdev) 31189f4d26d0SMark McLoughlin { 31199f4d26d0SMark McLoughlin struct virtnet_info *vi = vdev->priv; 31209f4d26d0SMark McLoughlin 31213b07e9caSTejun Heo schedule_work(&vi->config_work); 31229f4d26d0SMark McLoughlin } 31239f4d26d0SMark McLoughlin 3124986a4f4dSJason Wang static void virtnet_free_queues(struct virtnet_info *vi) 3125986a4f4dSJason Wang { 3126d4fb84eeSAndrey Vagin int i; 3127d4fb84eeSAndrey Vagin 3128ab3971b1SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 31295198d545SJakub Kicinski __netif_napi_del(&vi->rq[i].napi); 31305198d545SJakub Kicinski __netif_napi_del(&vi->sq[i].napi); 3131ab3971b1SJason Wang } 3132d4fb84eeSAndrey Vagin 31335198d545SJakub Kicinski /* We called __netif_napi_del(), 3134963abe5cSEric Dumazet * we need to respect an RCU grace period before freeing vi->rq 3135963abe5cSEric Dumazet */ 3136963abe5cSEric Dumazet synchronize_net(); 3137963abe5cSEric Dumazet 3138986a4f4dSJason Wang kfree(vi->rq); 3139986a4f4dSJason Wang kfree(vi->sq); 314012e57169SMichael S. Tsirkin kfree(vi->ctrl); 3141986a4f4dSJason Wang } 3142986a4f4dSJason Wang 314347315329SJohn Fastabend static void _free_receive_bufs(struct virtnet_info *vi) 3144986a4f4dSJason Wang { 3145f600b690SJohn Fastabend struct bpf_prog *old_prog; 3146986a4f4dSJason Wang int i; 3147986a4f4dSJason Wang 3148986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 3149986a4f4dSJason Wang while (vi->rq[i].pages) 3150986a4f4dSJason Wang __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 3151f600b690SJohn Fastabend 3152f600b690SJohn Fastabend old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 3153f600b690SJohn Fastabend RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 3154f600b690SJohn Fastabend if (old_prog) 3155f600b690SJohn Fastabend bpf_prog_put(old_prog); 3156986a4f4dSJason Wang } 315747315329SJohn Fastabend } 315847315329SJohn Fastabend 315947315329SJohn Fastabend static void free_receive_bufs(struct virtnet_info *vi) 316047315329SJohn Fastabend { 316147315329SJohn Fastabend rtnl_lock(); 316247315329SJohn Fastabend _free_receive_bufs(vi); 3163f600b690SJohn Fastabend rtnl_unlock(); 3164986a4f4dSJason Wang } 3165986a4f4dSJason Wang 3166fb51879dSMichael Dalton static void free_receive_page_frags(struct virtnet_info *vi) 3167fb51879dSMichael Dalton { 3168fb51879dSMichael Dalton int i; 3169fb51879dSMichael Dalton for (i = 0; i < vi->max_queue_pairs; i++) 3170fb51879dSMichael Dalton if (vi->rq[i].alloc_frag.page) 3171fb51879dSMichael Dalton put_page(vi->rq[i].alloc_frag.page); 3172fb51879dSMichael Dalton } 3173fb51879dSMichael Dalton 3174986a4f4dSJason Wang static void free_unused_bufs(struct virtnet_info *vi) 3175986a4f4dSJason Wang { 3176986a4f4dSJason Wang void *buf; 3177986a4f4dSJason Wang int i; 3178986a4f4dSJason Wang 3179986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 3180986a4f4dSJason Wang struct virtqueue *vq = vi->sq[i].vq; 318156434a01SJohn Fastabend while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 31825050471dSToshiaki Makita if (!is_xdp_frame(buf)) 3183986a4f4dSJason Wang dev_kfree_skb(buf); 318456434a01SJohn Fastabend else 31855050471dSToshiaki Makita xdp_return_frame(ptr_to_xdp(buf)); 318656434a01SJohn Fastabend } 3187986a4f4dSJason Wang } 3188986a4f4dSJason Wang 3189986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 3190986a4f4dSJason Wang struct virtqueue *vq = vi->rq[i].vq; 3191986a4f4dSJason Wang 3192986a4f4dSJason Wang while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 3193ab7db917SMichael Dalton if (vi->mergeable_rx_bufs) { 3194680557cfSMichael S. Tsirkin put_page(virt_to_head_page(buf)); 3195ab7db917SMichael Dalton } else if (vi->big_packets) { 3196fa9fac17SAndrey Vagin give_pages(&vi->rq[i], buf); 3197ab7db917SMichael Dalton } else { 3198f6b10209SJason Wang put_page(virt_to_head_page(buf)); 3199986a4f4dSJason Wang } 3200986a4f4dSJason Wang } 3201986a4f4dSJason Wang } 3202ab7db917SMichael Dalton } 3203986a4f4dSJason Wang 3204e9d7417bSJason Wang static void virtnet_del_vqs(struct virtnet_info *vi) 3205e9d7417bSJason Wang { 3206e9d7417bSJason Wang struct virtio_device *vdev = vi->vdev; 3207e9d7417bSJason Wang 3208310974faSPeter Xu virtnet_clean_affinity(vi); 3209986a4f4dSJason Wang 3210e9d7417bSJason Wang vdev->config->del_vqs(vdev); 3211986a4f4dSJason Wang 3212986a4f4dSJason Wang virtnet_free_queues(vi); 3213986a4f4dSJason Wang } 3214986a4f4dSJason Wang 3215d85b758fSMichael S. Tsirkin /* How large should a single buffer be so a queue full of these can fit at 3216d85b758fSMichael S. Tsirkin * least one full packet? 3217d85b758fSMichael S. Tsirkin * Logic below assumes the mergeable buffer header is used. 3218d85b758fSMichael S. Tsirkin */ 3219d85b758fSMichael S. Tsirkin static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) 3220d85b758fSMichael S. Tsirkin { 3221c1ddc42dSAndrew Melnychenko const unsigned int hdr_len = vi->hdr_len; 3222d85b758fSMichael S. Tsirkin unsigned int rq_size = virtqueue_get_vring_size(vq); 3223d85b758fSMichael S. Tsirkin unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; 3224d85b758fSMichael S. Tsirkin unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 3225d85b758fSMichael S. Tsirkin unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 3226d85b758fSMichael S. Tsirkin 3227f0c3192cSMichael S. Tsirkin return max(max(min_buf_len, hdr_len) - hdr_len, 3228f0c3192cSMichael S. Tsirkin (unsigned int)GOOD_PACKET_LEN); 3229d85b758fSMichael S. Tsirkin } 3230d85b758fSMichael S. Tsirkin 3231986a4f4dSJason Wang static int virtnet_find_vqs(struct virtnet_info *vi) 3232986a4f4dSJason Wang { 3233986a4f4dSJason Wang vq_callback_t **callbacks; 3234986a4f4dSJason Wang struct virtqueue **vqs; 3235986a4f4dSJason Wang int ret = -ENOMEM; 3236986a4f4dSJason Wang int i, total_vqs; 3237986a4f4dSJason Wang const char **names; 3238d45b897bSMichael S. Tsirkin bool *ctx; 3239986a4f4dSJason Wang 3240986a4f4dSJason Wang /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 3241986a4f4dSJason Wang * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 3242986a4f4dSJason Wang * possible control vq. 3243986a4f4dSJason Wang */ 3244986a4f4dSJason Wang total_vqs = vi->max_queue_pairs * 2 + 3245986a4f4dSJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 3246986a4f4dSJason Wang 3247986a4f4dSJason Wang /* Allocate space for find_vqs parameters */ 32486396bb22SKees Cook vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); 3249986a4f4dSJason Wang if (!vqs) 3250986a4f4dSJason Wang goto err_vq; 32516da2ec56SKees Cook callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); 3252986a4f4dSJason Wang if (!callbacks) 3253986a4f4dSJason Wang goto err_callback; 32546da2ec56SKees Cook names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); 3255986a4f4dSJason Wang if (!names) 3256986a4f4dSJason Wang goto err_names; 3257192f68cfSJason Wang if (!vi->big_packets || vi->mergeable_rx_bufs) { 32586396bb22SKees Cook ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); 3259d45b897bSMichael S. Tsirkin if (!ctx) 3260d45b897bSMichael S. Tsirkin goto err_ctx; 3261d45b897bSMichael S. Tsirkin } else { 3262d45b897bSMichael S. Tsirkin ctx = NULL; 3263d45b897bSMichael S. Tsirkin } 3264986a4f4dSJason Wang 3265986a4f4dSJason Wang /* Parameters for control virtqueue, if any */ 3266986a4f4dSJason Wang if (vi->has_cvq) { 3267986a4f4dSJason Wang callbacks[total_vqs - 1] = NULL; 3268986a4f4dSJason Wang names[total_vqs - 1] = "control"; 3269986a4f4dSJason Wang } 3270986a4f4dSJason Wang 3271986a4f4dSJason Wang /* Allocate/initialize parameters for send/receive virtqueues */ 3272986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 3273986a4f4dSJason Wang callbacks[rxq2vq(i)] = skb_recv_done; 3274986a4f4dSJason Wang callbacks[txq2vq(i)] = skb_xmit_done; 3275986a4f4dSJason Wang sprintf(vi->rq[i].name, "input.%d", i); 3276986a4f4dSJason Wang sprintf(vi->sq[i].name, "output.%d", i); 3277986a4f4dSJason Wang names[rxq2vq(i)] = vi->rq[i].name; 3278986a4f4dSJason Wang names[txq2vq(i)] = vi->sq[i].name; 3279d45b897bSMichael S. Tsirkin if (ctx) 3280d45b897bSMichael S. Tsirkin ctx[rxq2vq(i)] = true; 3281986a4f4dSJason Wang } 3282986a4f4dSJason Wang 3283a2f7dc00SXianting Tian ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks, 3284d45b897bSMichael S. Tsirkin names, ctx, NULL); 3285986a4f4dSJason Wang if (ret) 3286986a4f4dSJason Wang goto err_find; 3287986a4f4dSJason Wang 3288986a4f4dSJason Wang if (vi->has_cvq) { 3289986a4f4dSJason Wang vi->cvq = vqs[total_vqs - 1]; 3290986a4f4dSJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 3291f646968fSPatrick McHardy vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3292986a4f4dSJason Wang } 3293986a4f4dSJason Wang 3294986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 3295986a4f4dSJason Wang vi->rq[i].vq = vqs[rxq2vq(i)]; 3296d85b758fSMichael S. Tsirkin vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); 3297986a4f4dSJason Wang vi->sq[i].vq = vqs[txq2vq(i)]; 3298986a4f4dSJason Wang } 3299986a4f4dSJason Wang 33002fa3c8a8STonghao Zhang /* run here: ret == 0. */ 3301986a4f4dSJason Wang 3302986a4f4dSJason Wang 3303986a4f4dSJason Wang err_find: 3304d45b897bSMichael S. Tsirkin kfree(ctx); 3305d45b897bSMichael S. Tsirkin err_ctx: 3306986a4f4dSJason Wang kfree(names); 3307986a4f4dSJason Wang err_names: 3308986a4f4dSJason Wang kfree(callbacks); 3309986a4f4dSJason Wang err_callback: 3310986a4f4dSJason Wang kfree(vqs); 3311986a4f4dSJason Wang err_vq: 3312986a4f4dSJason Wang return ret; 3313986a4f4dSJason Wang } 3314986a4f4dSJason Wang 3315986a4f4dSJason Wang static int virtnet_alloc_queues(struct virtnet_info *vi) 3316986a4f4dSJason Wang { 3317986a4f4dSJason Wang int i; 3318986a4f4dSJason Wang 3319122b84a1SMax Gurtovoy if (vi->has_cvq) { 332012e57169SMichael S. Tsirkin vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); 332112e57169SMichael S. Tsirkin if (!vi->ctrl) 332212e57169SMichael S. Tsirkin goto err_ctrl; 3323122b84a1SMax Gurtovoy } else { 3324122b84a1SMax Gurtovoy vi->ctrl = NULL; 3325122b84a1SMax Gurtovoy } 33266396bb22SKees Cook vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); 3327986a4f4dSJason Wang if (!vi->sq) 3328986a4f4dSJason Wang goto err_sq; 33296396bb22SKees Cook vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); 3330008d4278SAmerigo Wang if (!vi->rq) 3331986a4f4dSJason Wang goto err_rq; 3332986a4f4dSJason Wang 3333986a4f4dSJason Wang INIT_DELAYED_WORK(&vi->refill, refill_work); 3334986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 3335986a4f4dSJason Wang vi->rq[i].pages = NULL; 3336d484735dSJakub Kicinski netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, 3337986a4f4dSJason Wang napi_weight); 33388d602e1aSJakub Kicinski netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, 33398d602e1aSJakub Kicinski virtnet_poll_tx, 3340b92f1e67SWillem de Bruijn napi_tx ? napi_weight : 0); 3341986a4f4dSJason Wang 3342986a4f4dSJason Wang sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 33435377d758SJohannes Berg ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 3344986a4f4dSJason Wang sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 3345d7dfc5cfSToshiaki Makita 3346d7dfc5cfSToshiaki Makita u64_stats_init(&vi->rq[i].stats.syncp); 3347d7dfc5cfSToshiaki Makita u64_stats_init(&vi->sq[i].stats.syncp); 3348986a4f4dSJason Wang } 3349986a4f4dSJason Wang 3350986a4f4dSJason Wang return 0; 3351986a4f4dSJason Wang 3352986a4f4dSJason Wang err_rq: 3353986a4f4dSJason Wang kfree(vi->sq); 3354986a4f4dSJason Wang err_sq: 335512e57169SMichael S. Tsirkin kfree(vi->ctrl); 335612e57169SMichael S. Tsirkin err_ctrl: 3357986a4f4dSJason Wang return -ENOMEM; 3358e9d7417bSJason Wang } 3359e9d7417bSJason Wang 33603f9c10b0SAmit Shah static int init_vqs(struct virtnet_info *vi) 33613f9c10b0SAmit Shah { 3362986a4f4dSJason Wang int ret; 33633f9c10b0SAmit Shah 3364986a4f4dSJason Wang /* Allocate send & receive queues */ 3365986a4f4dSJason Wang ret = virtnet_alloc_queues(vi); 3366986a4f4dSJason Wang if (ret) 3367986a4f4dSJason Wang goto err; 33683f9c10b0SAmit Shah 3369986a4f4dSJason Wang ret = virtnet_find_vqs(vi); 3370986a4f4dSJason Wang if (ret) 3371986a4f4dSJason Wang goto err_free; 33723f9c10b0SAmit Shah 3373a0d1d0f4SSebastian Andrzej Siewior cpus_read_lock(); 33748898c21cSWanlong Gao virtnet_set_affinity(vi); 3375a0d1d0f4SSebastian Andrzej Siewior cpus_read_unlock(); 337647be2479SWanlong Gao 33773f9c10b0SAmit Shah return 0; 3378986a4f4dSJason Wang 3379986a4f4dSJason Wang err_free: 3380986a4f4dSJason Wang virtnet_free_queues(vi); 3381986a4f4dSJason Wang err: 3382986a4f4dSJason Wang return ret; 33833f9c10b0SAmit Shah } 33843f9c10b0SAmit Shah 3385fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS 3386fbf28d78SMichael Dalton static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 3387718ad681Sstephen hemminger char *buf) 3388fbf28d78SMichael Dalton { 3389fbf28d78SMichael Dalton struct virtnet_info *vi = netdev_priv(queue->dev); 3390fbf28d78SMichael Dalton unsigned int queue_index = get_netdev_rx_queue_index(queue); 33913cc81a9aSJason Wang unsigned int headroom = virtnet_get_headroom(vi); 33923cc81a9aSJason Wang unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 33935377d758SJohannes Berg struct ewma_pkt_len *avg; 3394fbf28d78SMichael Dalton 3395fbf28d78SMichael Dalton BUG_ON(queue_index >= vi->max_queue_pairs); 3396fbf28d78SMichael Dalton avg = &vi->rq[queue_index].mrg_avg_pkt_len; 3397d85b758fSMichael S. Tsirkin return sprintf(buf, "%u\n", 33983cc81a9aSJason Wang get_mergeable_buf_len(&vi->rq[queue_index], avg, 33993cc81a9aSJason Wang SKB_DATA_ALIGN(headroom + tailroom))); 3400fbf28d78SMichael Dalton } 3401fbf28d78SMichael Dalton 3402fbf28d78SMichael Dalton static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 3403fbf28d78SMichael Dalton __ATTR_RO(mergeable_rx_buffer_size); 3404fbf28d78SMichael Dalton 3405fbf28d78SMichael Dalton static struct attribute *virtio_net_mrg_rx_attrs[] = { 3406fbf28d78SMichael Dalton &mergeable_rx_buffer_size_attribute.attr, 3407fbf28d78SMichael Dalton NULL 3408fbf28d78SMichael Dalton }; 3409fbf28d78SMichael Dalton 3410fbf28d78SMichael Dalton static const struct attribute_group virtio_net_mrg_rx_group = { 3411fbf28d78SMichael Dalton .name = "virtio_net", 3412fbf28d78SMichael Dalton .attrs = virtio_net_mrg_rx_attrs 3413fbf28d78SMichael Dalton }; 3414fbf28d78SMichael Dalton #endif 3415fbf28d78SMichael Dalton 3416892d6eb1SJason Wang static bool virtnet_fail_on_feature(struct virtio_device *vdev, 3417892d6eb1SJason Wang unsigned int fbit, 3418892d6eb1SJason Wang const char *fname, const char *dname) 3419892d6eb1SJason Wang { 3420892d6eb1SJason Wang if (!virtio_has_feature(vdev, fbit)) 3421892d6eb1SJason Wang return false; 3422892d6eb1SJason Wang 3423892d6eb1SJason Wang dev_err(&vdev->dev, "device advertises feature %s but not %s", 3424892d6eb1SJason Wang fname, dname); 3425892d6eb1SJason Wang 3426892d6eb1SJason Wang return true; 3427892d6eb1SJason Wang } 3428892d6eb1SJason Wang 3429892d6eb1SJason Wang #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 3430892d6eb1SJason Wang virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 3431892d6eb1SJason Wang 3432892d6eb1SJason Wang static bool virtnet_validate_features(struct virtio_device *vdev) 3433892d6eb1SJason Wang { 3434892d6eb1SJason Wang if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 3435892d6eb1SJason Wang (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 3436892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 3437892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 3438892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 3439892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 3440892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 3441892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 3442892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 3443c7114b12SAndrew Melnychenko "VIRTIO_NET_F_CTRL_VQ") || 3444c7114b12SAndrew Melnychenko VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS, 344591f41f01SAndrew Melnychenko "VIRTIO_NET_F_CTRL_VQ") || 344691f41f01SAndrew Melnychenko VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT, 3447892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ"))) { 3448892d6eb1SJason Wang return false; 3449892d6eb1SJason Wang } 3450892d6eb1SJason Wang 3451892d6eb1SJason Wang return true; 3452892d6eb1SJason Wang } 3453892d6eb1SJason Wang 3454d0c2c997SJarod Wilson #define MIN_MTU ETH_MIN_MTU 3455d0c2c997SJarod Wilson #define MAX_MTU ETH_MAX_MTU 3456d0c2c997SJarod Wilson 3457fe36cbe0SMichael S. Tsirkin static int virtnet_validate(struct virtio_device *vdev) 3458296f96fcSRusty Russell { 34596ba42248SMichael S. Tsirkin if (!vdev->config->get) { 34606ba42248SMichael S. Tsirkin dev_err(&vdev->dev, "%s failure: config access disabled\n", 34616ba42248SMichael S. Tsirkin __func__); 34626ba42248SMichael S. Tsirkin return -EINVAL; 34636ba42248SMichael S. Tsirkin } 34646ba42248SMichael S. Tsirkin 3465892d6eb1SJason Wang if (!virtnet_validate_features(vdev)) 3466892d6eb1SJason Wang return -EINVAL; 3467892d6eb1SJason Wang 3468fe36cbe0SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 3469fe36cbe0SMichael S. Tsirkin int mtu = virtio_cread16(vdev, 3470fe36cbe0SMichael S. Tsirkin offsetof(struct virtio_net_config, 3471fe36cbe0SMichael S. Tsirkin mtu)); 3472fe36cbe0SMichael S. Tsirkin if (mtu < MIN_MTU) 3473fe36cbe0SMichael S. Tsirkin __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 3474fe36cbe0SMichael S. Tsirkin } 3475fe36cbe0SMichael S. Tsirkin 3476fe36cbe0SMichael S. Tsirkin return 0; 3477fe36cbe0SMichael S. Tsirkin } 3478fe36cbe0SMichael S. Tsirkin 3479fe36cbe0SMichael S. Tsirkin static int virtnet_probe(struct virtio_device *vdev) 3480fe36cbe0SMichael S. Tsirkin { 3481d7dfc5cfSToshiaki Makita int i, err = -ENOMEM; 3482fe36cbe0SMichael S. Tsirkin struct net_device *dev; 3483fe36cbe0SMichael S. Tsirkin struct virtnet_info *vi; 3484fe36cbe0SMichael S. Tsirkin u16 max_queue_pairs; 3485fe36cbe0SMichael S. Tsirkin int mtu; 3486fe36cbe0SMichael S. Tsirkin 3487c7114b12SAndrew Melnychenko /* Find if host supports multiqueue/rss virtio_net device */ 3488c7114b12SAndrew Melnychenko max_queue_pairs = 1; 3489c7114b12SAndrew Melnychenko if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 3490c7114b12SAndrew Melnychenko max_queue_pairs = 3491c7114b12SAndrew Melnychenko virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); 3492986a4f4dSJason Wang 3493986a4f4dSJason Wang /* We need at least 2 queue's */ 3494c7114b12SAndrew Melnychenko if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 3495986a4f4dSJason Wang max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 3496986a4f4dSJason Wang !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 3497986a4f4dSJason Wang max_queue_pairs = 1; 3498296f96fcSRusty Russell 3499296f96fcSRusty Russell /* Allocate ourselves a network device with room for our info */ 3500986a4f4dSJason Wang dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 3501296f96fcSRusty Russell if (!dev) 3502296f96fcSRusty Russell return -ENOMEM; 3503296f96fcSRusty Russell 3504296f96fcSRusty Russell /* Set up network device as normal. */ 3505ab5bd583SXuan Zhuo dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | 3506ab5bd583SXuan Zhuo IFF_TX_SKB_NO_LINEAR; 350776288b4eSStephen Hemminger dev->netdev_ops = &virtnet_netdev; 3508296f96fcSRusty Russell dev->features = NETIF_F_HIGHDMA; 35093fa2a1dfSstephen hemminger 35107ad24ea4SWilfried Klaebe dev->ethtool_ops = &virtnet_ethtool_ops; 3511296f96fcSRusty Russell SET_NETDEV_DEV(dev, &vdev->dev); 3512296f96fcSRusty Russell 3513296f96fcSRusty Russell /* Do we support "hardware" checksums? */ 351498e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 3515296f96fcSRusty Russell /* This opens up the world of extra features. */ 351648900cb6SJason Wang dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 351798e778c9SMichał Mirosław if (csum) 351848900cb6SJason Wang dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 351998e778c9SMichał Mirosław 352098e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 3521e078de03SDavid S. Miller dev->hw_features |= NETIF_F_TSO 352234a48579SRusty Russell | NETIF_F_TSO_ECN | NETIF_F_TSO6; 352334a48579SRusty Russell } 35245539ae96SRusty Russell /* Individual feature bits: what can host handle? */ 352598e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 352698e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO; 352798e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 352898e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO6; 352998e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 353098e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO_ECN; 353198e778c9SMichał Mirosław 353241f2f127SJason Wang dev->features |= NETIF_F_GSO_ROBUST; 353341f2f127SJason Wang 353498e778c9SMichał Mirosław if (gso) 3535e078de03SDavid S. Miller dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 353698e778c9SMichał Mirosław /* (!csum && gso) case will be fixed by register_netdev() */ 3537296f96fcSRusty Russell } 35384f49129bSThomas Huth if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 35394f49129bSThomas Huth dev->features |= NETIF_F_RXCSUM; 3540a02e8964SWillem de Bruijn if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 3541a02e8964SWillem de Bruijn virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) 3542dbcf24d1SJason Wang dev->features |= NETIF_F_GRO_HW; 3543cf8691cbSMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) 3544dbcf24d1SJason Wang dev->hw_features |= NETIF_F_GRO_HW; 3545296f96fcSRusty Russell 35464fda8302SJason Wang dev->vlan_features = dev->features; 35474fda8302SJason Wang 3548d0c2c997SJarod Wilson /* MTU range: 68 - 65535 */ 3549d0c2c997SJarod Wilson dev->min_mtu = MIN_MTU; 3550d0c2c997SJarod Wilson dev->max_mtu = MAX_MTU; 3551d0c2c997SJarod Wilson 3552296f96fcSRusty Russell /* Configuration may specify what MAC to use. Otherwise random. */ 3553f2edaa4aSJakub Kicinski if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 3554f2edaa4aSJakub Kicinski u8 addr[ETH_ALEN]; 3555f2edaa4aSJakub Kicinski 3556855e0c52SRusty Russell virtio_cread_bytes(vdev, 3557a586d4f6SRusty Russell offsetof(struct virtio_net_config, mac), 3558f2edaa4aSJakub Kicinski addr, ETH_ALEN); 3559f2edaa4aSJakub Kicinski eth_hw_addr_set(dev, addr); 3560f2edaa4aSJakub Kicinski } else { 3561f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 3562f2edaa4aSJakub Kicinski } 3563296f96fcSRusty Russell 3564296f96fcSRusty Russell /* Set up our device-specific information */ 3565296f96fcSRusty Russell vi = netdev_priv(dev); 3566296f96fcSRusty Russell vi->dev = dev; 3567296f96fcSRusty Russell vi->vdev = vdev; 3568d9d5dcc8SChristian Borntraeger vdev->priv = vi; 3569827da44cSJohn Stultz 3570586d17c5SJason Wang INIT_WORK(&vi->config_work, virtnet_config_changed_work); 35715a159128SJason Wang spin_lock_init(&vi->refill_lock); 3572296f96fcSRusty Russell 357397402b96SHerbert Xu /* If we can receive ANY GSO packets, we must allocate large ones. */ 35748e95a202SJoe Perches if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 35758e95a202SJoe Perches virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 3576e3e3c423SVlad Yasevich virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || 3577e3e3c423SVlad Yasevich virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) 357897402b96SHerbert Xu vi->big_packets = true; 357997402b96SHerbert Xu 35803f2c31d9SMark McLoughlin if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 35813f2c31d9SMark McLoughlin vi->mergeable_rx_bufs = true; 35823f2c31d9SMark McLoughlin 358391f41f01SAndrew Melnychenko if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) 358491f41f01SAndrew Melnychenko vi->has_rss_hash_report = true; 358591f41f01SAndrew Melnychenko 358691f41f01SAndrew Melnychenko if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 3587c7114b12SAndrew Melnychenko vi->has_rss = true; 358891f41f01SAndrew Melnychenko 358991f41f01SAndrew Melnychenko if (vi->has_rss || vi->has_rss_hash_report) { 3590c7114b12SAndrew Melnychenko vi->rss_indir_table_size = 3591c7114b12SAndrew Melnychenko virtio_cread16(vdev, offsetof(struct virtio_net_config, 3592c7114b12SAndrew Melnychenko rss_max_indirection_table_length)); 3593c7114b12SAndrew Melnychenko vi->rss_key_size = 3594c7114b12SAndrew Melnychenko virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size)); 3595c7114b12SAndrew Melnychenko 3596c7114b12SAndrew Melnychenko vi->rss_hash_types_supported = 3597c7114b12SAndrew Melnychenko virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types)); 3598c7114b12SAndrew Melnychenko vi->rss_hash_types_supported &= 3599c7114b12SAndrew Melnychenko ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX | 3600c7114b12SAndrew Melnychenko VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | 3601c7114b12SAndrew Melnychenko VIRTIO_NET_RSS_HASH_TYPE_UDP_EX); 3602c7114b12SAndrew Melnychenko 3603c7114b12SAndrew Melnychenko dev->hw_features |= NETIF_F_RXHASH; 3604c7114b12SAndrew Melnychenko } 360591f41f01SAndrew Melnychenko 360691f41f01SAndrew Melnychenko if (vi->has_rss_hash_report) 360791f41f01SAndrew Melnychenko vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); 360891f41f01SAndrew Melnychenko else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 3609d04302b3SMichael S. Tsirkin virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 3610012873d0SMichael S. Tsirkin vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 3611012873d0SMichael S. Tsirkin else 3612012873d0SMichael S. Tsirkin vi->hdr_len = sizeof(struct virtio_net_hdr); 3613012873d0SMichael S. Tsirkin 361475993300SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 361575993300SMichael S. Tsirkin virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 3616e7428e95SMichael S. Tsirkin vi->any_header_sg = true; 3617e7428e95SMichael S. Tsirkin 3618986a4f4dSJason Wang if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 3619986a4f4dSJason Wang vi->has_cvq = true; 3620986a4f4dSJason Wang 362114de9d11SAaron Conole if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 362214de9d11SAaron Conole mtu = virtio_cread16(vdev, 362314de9d11SAaron Conole offsetof(struct virtio_net_config, 362414de9d11SAaron Conole mtu)); 362593a205eeSAaron Conole if (mtu < dev->min_mtu) { 3626fe36cbe0SMichael S. Tsirkin /* Should never trigger: MTU was previously validated 3627fe36cbe0SMichael S. Tsirkin * in virtnet_validate. 3628fe36cbe0SMichael S. Tsirkin */ 36297934b481SYuval Shaia dev_err(&vdev->dev, 36307934b481SYuval Shaia "device MTU appears to have changed it is now %d < %d", 36317934b481SYuval Shaia mtu, dev->min_mtu); 3632411ea23aSDan Carpenter err = -EINVAL; 3633d7dfc5cfSToshiaki Makita goto free; 3634fe36cbe0SMichael S. Tsirkin } 3635fe36cbe0SMichael S. Tsirkin 3636d0c2c997SJarod Wilson dev->mtu = mtu; 363793a205eeSAaron Conole dev->max_mtu = mtu; 36382e123b44SMichael S. Tsirkin 36392e123b44SMichael S. Tsirkin /* TODO: size buffers correctly in this case. */ 36402e123b44SMichael S. Tsirkin if (dev->mtu > ETH_DATA_LEN) 36412e123b44SMichael S. Tsirkin vi->big_packets = true; 364214de9d11SAaron Conole } 364314de9d11SAaron Conole 3644012873d0SMichael S. Tsirkin if (vi->any_header_sg) 3645012873d0SMichael S. Tsirkin dev->needed_headroom = vi->hdr_len; 36466ebbc1a6SZhangjie \(HZ\) 364744900010SJason Wang /* Enable multiqueue by default */ 364844900010SJason Wang if (num_online_cpus() >= max_queue_pairs) 364944900010SJason Wang vi->curr_queue_pairs = max_queue_pairs; 365044900010SJason Wang else 365144900010SJason Wang vi->curr_queue_pairs = num_online_cpus(); 3652986a4f4dSJason Wang vi->max_queue_pairs = max_queue_pairs; 3653986a4f4dSJason Wang 3654986a4f4dSJason Wang /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 36553f9c10b0SAmit Shah err = init_vqs(vi); 3656d2a7dddaSMichael S. Tsirkin if (err) 3657d7dfc5cfSToshiaki Makita goto free; 3658d2a7dddaSMichael S. Tsirkin 3659fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS 3660fbf28d78SMichael Dalton if (vi->mergeable_rx_bufs) 3661fbf28d78SMichael Dalton dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 3662fbf28d78SMichael Dalton #endif 36630f13b66bSZhi Yong Wu netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 36640f13b66bSZhi Yong Wu netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 3665986a4f4dSJason Wang 366616032be5SNikolay Aleksandrov virtnet_init_settings(dev); 366716032be5SNikolay Aleksandrov 3668ba5e4426SSridhar Samudrala if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { 3669ba5e4426SSridhar Samudrala vi->failover = net_failover_create(vi->dev); 36704b8e6ac4SWei Yongjun if (IS_ERR(vi->failover)) { 36714b8e6ac4SWei Yongjun err = PTR_ERR(vi->failover); 3672ba5e4426SSridhar Samudrala goto free_vqs; 3673ba5e4426SSridhar Samudrala } 36744b8e6ac4SWei Yongjun } 3675ba5e4426SSridhar Samudrala 367691f41f01SAndrew Melnychenko if (vi->has_rss || vi->has_rss_hash_report) 3677c7114b12SAndrew Melnychenko virtnet_init_default_rss(vi); 3678c7114b12SAndrew Melnychenko 367950c0ada6SJason Wang /* serialize netdev register + virtio_device_ready() with ndo_open() */ 368050c0ada6SJason Wang rtnl_lock(); 368150c0ada6SJason Wang 368250c0ada6SJason Wang err = register_netdevice(dev); 3683296f96fcSRusty Russell if (err) { 3684296f96fcSRusty Russell pr_debug("virtio_net: registering device failed\n"); 368550c0ada6SJason Wang rtnl_unlock(); 3686ba5e4426SSridhar Samudrala goto free_failover; 3687296f96fcSRusty Russell } 3688b3369c1fSRusty Russell 36894baf1e33SMichael S. Tsirkin virtio_device_ready(vdev); 36904baf1e33SMichael S. Tsirkin 369150c0ada6SJason Wang rtnl_unlock(); 369250c0ada6SJason Wang 36938017c279SSebastian Andrzej Siewior err = virtnet_cpu_notif_add(vi); 36948de4b2f3SWanlong Gao if (err) { 36958de4b2f3SWanlong Gao pr_debug("virtio_net: registering cpu notifier failed\n"); 3696f00e35e2Swangyunjian goto free_unregister_netdev; 36978de4b2f3SWanlong Gao } 36988de4b2f3SWanlong Gao 3699a220871bSJason Wang virtnet_set_queues(vi, vi->curr_queue_pairs); 370044900010SJason Wang 3701167c25e4SJason Wang /* Assume link up if device can't report link status, 3702167c25e4SJason Wang otherwise get link status from config. */ 3703167c25e4SJason Wang netif_carrier_off(dev); 3704bda7fab5SJay Vosburgh if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 37053b07e9caSTejun Heo schedule_work(&vi->config_work); 3706167c25e4SJason Wang } else { 3707167c25e4SJason Wang vi->status = VIRTIO_NET_S_LINK_UP; 3708faa9b39fSJason Baron virtnet_update_settings(vi); 37094783256eSPantelis Koukousoulas netif_carrier_on(dev); 3710167c25e4SJason Wang } 37119f4d26d0SMark McLoughlin 37123f93522fSJason Wang for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) 37133f93522fSJason Wang if (virtio_has_feature(vi->vdev, guest_offloads[i])) 37143f93522fSJason Wang set_bit(guest_offloads[i], &vi->guest_offloads); 3715a02e8964SWillem de Bruijn vi->guest_offloads_capable = vi->guest_offloads; 37163f93522fSJason Wang 3717986a4f4dSJason Wang pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 3718986a4f4dSJason Wang dev->name, max_queue_pairs); 3719986a4f4dSJason Wang 3720296f96fcSRusty Russell return 0; 3721296f96fcSRusty Russell 3722f00e35e2Swangyunjian free_unregister_netdev: 3723d9679d00SMichael S. Tsirkin virtio_reset_device(vdev); 372402465555SMichael S. Tsirkin 3725b3369c1fSRusty Russell unregister_netdev(dev); 3726ba5e4426SSridhar Samudrala free_failover: 3727ba5e4426SSridhar Samudrala net_failover_destroy(vi->failover); 3728d2a7dddaSMichael S. Tsirkin free_vqs: 3729986a4f4dSJason Wang cancel_delayed_work_sync(&vi->refill); 3730fb51879dSMichael Dalton free_receive_page_frags(vi); 3731e9d7417bSJason Wang virtnet_del_vqs(vi); 3732296f96fcSRusty Russell free: 3733296f96fcSRusty Russell free_netdev(dev); 3734296f96fcSRusty Russell return err; 3735296f96fcSRusty Russell } 3736296f96fcSRusty Russell 373704486ed0SAmit Shah static void remove_vq_common(struct virtnet_info *vi) 3738296f96fcSRusty Russell { 3739d9679d00SMichael S. Tsirkin virtio_reset_device(vi->vdev); 3740830a8a97SShirley Ma 3741830a8a97SShirley Ma /* Free unused buffers in both send and recv, if any. */ 37429ab86bbcSShirley Ma free_unused_bufs(vi); 3743fb6813f4SRusty Russell 3744986a4f4dSJason Wang free_receive_bufs(vi); 3745d2a7dddaSMichael S. Tsirkin 3746fb51879dSMichael Dalton free_receive_page_frags(vi); 3747fb51879dSMichael Dalton 3748986a4f4dSJason Wang virtnet_del_vqs(vi); 374904486ed0SAmit Shah } 375004486ed0SAmit Shah 37518cc085d6SBill Pemberton static void virtnet_remove(struct virtio_device *vdev) 375204486ed0SAmit Shah { 375304486ed0SAmit Shah struct virtnet_info *vi = vdev->priv; 375404486ed0SAmit Shah 37558017c279SSebastian Andrzej Siewior virtnet_cpu_notif_remove(vi); 37568de4b2f3SWanlong Gao 3757102a2786SMichael S. Tsirkin /* Make sure no work handler is accessing the device. */ 3758102a2786SMichael S. Tsirkin flush_work(&vi->config_work); 3759586d17c5SJason Wang 376004486ed0SAmit Shah unregister_netdev(vi->dev); 376104486ed0SAmit Shah 3762ba5e4426SSridhar Samudrala net_failover_destroy(vi->failover); 3763ba5e4426SSridhar Samudrala 376404486ed0SAmit Shah remove_vq_common(vi); 3765fb6813f4SRusty Russell 376674b2553fSRusty Russell free_netdev(vi->dev); 3767296f96fcSRusty Russell } 3768296f96fcSRusty Russell 376967a75194SArnd Bergmann static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) 37700741bcb5SAmit Shah { 37710741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 37720741bcb5SAmit Shah 37738017c279SSebastian Andrzej Siewior virtnet_cpu_notif_remove(vi); 37749fe7bfceSJohn Fastabend virtnet_freeze_down(vdev); 37750741bcb5SAmit Shah remove_vq_common(vi); 37760741bcb5SAmit Shah 37770741bcb5SAmit Shah return 0; 37780741bcb5SAmit Shah } 37790741bcb5SAmit Shah 378067a75194SArnd Bergmann static __maybe_unused int virtnet_restore(struct virtio_device *vdev) 37810741bcb5SAmit Shah { 37820741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 37839fe7bfceSJohn Fastabend int err; 37840741bcb5SAmit Shah 37859fe7bfceSJohn Fastabend err = virtnet_restore_up(vdev); 37860741bcb5SAmit Shah if (err) 37870741bcb5SAmit Shah return err; 3788986a4f4dSJason Wang virtnet_set_queues(vi, vi->curr_queue_pairs); 3789986a4f4dSJason Wang 37908017c279SSebastian Andrzej Siewior err = virtnet_cpu_notif_add(vi); 37913f2869caSXie Yongji if (err) { 37923f2869caSXie Yongji virtnet_freeze_down(vdev); 37933f2869caSXie Yongji remove_vq_common(vi); 3794ec9debbdSJason Wang return err; 37953f2869caSXie Yongji } 3796ec9debbdSJason Wang 37970741bcb5SAmit Shah return 0; 37980741bcb5SAmit Shah } 37990741bcb5SAmit Shah 3800296f96fcSRusty Russell static struct virtio_device_id id_table[] = { 3801296f96fcSRusty Russell { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 3802296f96fcSRusty Russell { 0 }, 3803296f96fcSRusty Russell }; 3804296f96fcSRusty Russell 3805f3358507SMichael S. Tsirkin #define VIRTNET_FEATURES \ 3806f3358507SMichael S. Tsirkin VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 3807f3358507SMichael S. Tsirkin VIRTIO_NET_F_MAC, \ 3808f3358507SMichael S. Tsirkin VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 3809f3358507SMichael S. Tsirkin VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 3810f3358507SMichael S. Tsirkin VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 3811f3358507SMichael S. Tsirkin VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 3812f3358507SMichael S. Tsirkin VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 3813f3358507SMichael S. Tsirkin VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 3814f3358507SMichael S. Tsirkin VIRTIO_NET_F_CTRL_MAC_ADDR, \ 3815faa9b39fSJason Baron VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ 3816c7114b12SAndrew Melnychenko VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ 381791f41f01SAndrew Melnychenko VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT 3818f3358507SMichael S. Tsirkin 3819c45a6816SRusty Russell static unsigned int features[] = { 3820f3358507SMichael S. Tsirkin VIRTNET_FEATURES, 3821f3358507SMichael S. Tsirkin }; 3822f3358507SMichael S. Tsirkin 3823f3358507SMichael S. Tsirkin static unsigned int features_legacy[] = { 3824f3358507SMichael S. Tsirkin VIRTNET_FEATURES, 3825f3358507SMichael S. Tsirkin VIRTIO_NET_F_GSO, 3826e7428e95SMichael S. Tsirkin VIRTIO_F_ANY_LAYOUT, 3827c45a6816SRusty Russell }; 3828c45a6816SRusty Russell 382922402529SUwe Kleine-König static struct virtio_driver virtio_net_driver = { 3830c45a6816SRusty Russell .feature_table = features, 3831c45a6816SRusty Russell .feature_table_size = ARRAY_SIZE(features), 3832f3358507SMichael S. Tsirkin .feature_table_legacy = features_legacy, 3833f3358507SMichael S. Tsirkin .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 3834296f96fcSRusty Russell .driver.name = KBUILD_MODNAME, 3835296f96fcSRusty Russell .driver.owner = THIS_MODULE, 3836296f96fcSRusty Russell .id_table = id_table, 3837fe36cbe0SMichael S. Tsirkin .validate = virtnet_validate, 3838296f96fcSRusty Russell .probe = virtnet_probe, 38398cc085d6SBill Pemberton .remove = virtnet_remove, 38409f4d26d0SMark McLoughlin .config_changed = virtnet_config_changed, 384189107000SAaron Lu #ifdef CONFIG_PM_SLEEP 38420741bcb5SAmit Shah .freeze = virtnet_freeze, 38430741bcb5SAmit Shah .restore = virtnet_restore, 38440741bcb5SAmit Shah #endif 3845296f96fcSRusty Russell }; 3846296f96fcSRusty Russell 38478017c279SSebastian Andrzej Siewior static __init int virtio_net_driver_init(void) 38488017c279SSebastian Andrzej Siewior { 38498017c279SSebastian Andrzej Siewior int ret; 38508017c279SSebastian Andrzej Siewior 385173c1b41eSThomas Gleixner ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 38528017c279SSebastian Andrzej Siewior virtnet_cpu_online, 38538017c279SSebastian Andrzej Siewior virtnet_cpu_down_prep); 38548017c279SSebastian Andrzej Siewior if (ret < 0) 38558017c279SSebastian Andrzej Siewior goto out; 38568017c279SSebastian Andrzej Siewior virtionet_online = ret; 385773c1b41eSThomas Gleixner ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 38588017c279SSebastian Andrzej Siewior NULL, virtnet_cpu_dead); 38598017c279SSebastian Andrzej Siewior if (ret) 38608017c279SSebastian Andrzej Siewior goto err_dead; 38618017c279SSebastian Andrzej Siewior ret = register_virtio_driver(&virtio_net_driver); 38628017c279SSebastian Andrzej Siewior if (ret) 38638017c279SSebastian Andrzej Siewior goto err_virtio; 38648017c279SSebastian Andrzej Siewior return 0; 38658017c279SSebastian Andrzej Siewior err_virtio: 38668017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 38678017c279SSebastian Andrzej Siewior err_dead: 38688017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(virtionet_online); 38698017c279SSebastian Andrzej Siewior out: 38708017c279SSebastian Andrzej Siewior return ret; 38718017c279SSebastian Andrzej Siewior } 38728017c279SSebastian Andrzej Siewior module_init(virtio_net_driver_init); 38738017c279SSebastian Andrzej Siewior 38748017c279SSebastian Andrzej Siewior static __exit void virtio_net_driver_exit(void) 38758017c279SSebastian Andrzej Siewior { 3876cfa0ebc9SAndrew Jones unregister_virtio_driver(&virtio_net_driver); 38778017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 38788017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(virtionet_online); 38798017c279SSebastian Andrzej Siewior } 38808017c279SSebastian Andrzej Siewior module_exit(virtio_net_driver_exit); 3881296f96fcSRusty Russell 3882296f96fcSRusty Russell MODULE_DEVICE_TABLE(virtio, id_table); 3883296f96fcSRusty Russell MODULE_DESCRIPTION("Virtio network driver"); 3884296f96fcSRusty Russell MODULE_LICENSE("GPL"); 3885