xref: /openbmc/linux/drivers/net/virtio_net.c (revision 829cce76cf2d96d4291fa1a3f15f40604f783089)
11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
248925e37SRusty Russell /* A network driver using virtio.
3296f96fcSRusty Russell  *
4296f96fcSRusty Russell  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5296f96fcSRusty Russell  */
6296f96fcSRusty Russell //#define DEBUG
7296f96fcSRusty Russell #include <linux/netdevice.h>
8296f96fcSRusty Russell #include <linux/etherdevice.h>
9a9ea3fc6SHerbert Xu #include <linux/ethtool.h>
10296f96fcSRusty Russell #include <linux/module.h>
11296f96fcSRusty Russell #include <linux/virtio.h>
12296f96fcSRusty Russell #include <linux/virtio_net.h>
13f600b690SJohn Fastabend #include <linux/bpf.h>
14a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h>
15296f96fcSRusty Russell #include <linux/scatterlist.h>
16e918085aSAlex Williamson #include <linux/if_vlan.h>
175a0e3ad6STejun Heo #include <linux/slab.h>
188de4b2f3SWanlong Gao #include <linux/cpu.h>
19ab7db917SMichael Dalton #include <linux/average.h>
20186b3c99SJason Wang #include <linux/filter.h>
212ca653d6SCaleb Raitto #include <linux/kernel.h>
22d85b758fSMichael S. Tsirkin #include <net/route.h>
23754b8a21SJesper Dangaard Brouer #include <net/xdp.h>
24ba5e4426SSridhar Samudrala #include <net/net_failover.h>
2549e47a5bSJakub Kicinski #include <net/netdev_rx_queue.h>
26296f96fcSRusty Russell 
27d34710e3SAmerigo Wang static int napi_weight = NAPI_POLL_WEIGHT;
286c0cd7c0SDor Laor module_param(napi_weight, int, 0444);
296c0cd7c0SDor Laor 
3031c03aefSWillem de Bruijn static bool csum = true, gso = true, napi_tx = true;
3134a48579SRusty Russell module_param(csum, bool, 0444);
3234a48579SRusty Russell module_param(gso, bool, 0444);
33b92f1e67SWillem de Bruijn module_param(napi_tx, bool, 0644);
3434a48579SRusty Russell 
35296f96fcSRusty Russell /* FIXME: MTU in config. */
365061de36SMichael Dalton #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
373f2c31d9SMark McLoughlin #define GOOD_COPY_LEN	128
38296f96fcSRusty Russell 
39f6b10209SJason Wang #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
40f6b10209SJason Wang 
412de2f7f4SJohn Fastabend /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
422de2f7f4SJohn Fastabend #define VIRTIO_XDP_HEADROOM 256
432de2f7f4SJohn Fastabend 
442471c75eSJesper Dangaard Brouer /* Separating two types of XDP xmit */
452471c75eSJesper Dangaard Brouer #define VIRTIO_XDP_TX		BIT(0)
462471c75eSJesper Dangaard Brouer #define VIRTIO_XDP_REDIR	BIT(1)
472471c75eSJesper Dangaard Brouer 
485050471dSToshiaki Makita #define VIRTIO_XDP_FLAG	BIT(0)
495050471dSToshiaki Makita 
505377d758SJohannes Berg /* RX packet size EWMA. The average packet size is used to determine the packet
515377d758SJohannes Berg  * buffer size when refilling RX rings. As the entire RX ring may be refilled
525377d758SJohannes Berg  * at once, the weight is chosen so that the EWMA will be insensitive to short-
535377d758SJohannes Berg  * term, transient changes in packet size.
54ab7db917SMichael Dalton  */
55eb1e011aSJohannes Berg DECLARE_EWMA(pkt_len, 0, 64)
56ab7db917SMichael Dalton 
5766846048SRick Jones #define VIRTNET_DRIVER_VERSION "1.0.0"
582a41f71dSAlex Williamson 
597acd4329SColin Ian King static const unsigned long guest_offloads[] = {
607acd4329SColin Ian King 	VIRTIO_NET_F_GUEST_TSO4,
613f93522fSJason Wang 	VIRTIO_NET_F_GUEST_TSO6,
623f93522fSJason Wang 	VIRTIO_NET_F_GUEST_ECN,
63e59ff2c4SJason Wang 	VIRTIO_NET_F_GUEST_UFO,
64418044e1SAndrew Melnychenko 	VIRTIO_NET_F_GUEST_CSUM,
65418044e1SAndrew Melnychenko 	VIRTIO_NET_F_GUEST_USO4,
66be50da3eSJiri Pirko 	VIRTIO_NET_F_GUEST_USO6,
67be50da3eSJiri Pirko 	VIRTIO_NET_F_GUEST_HDRLEN
687acd4329SColin Ian King };
693f93522fSJason Wang 
70dbcf24d1SJason Wang #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
711a03b8a3STonghao Zhang 				(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
721a03b8a3STonghao Zhang 				(1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
73418044e1SAndrew Melnychenko 				(1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
74418044e1SAndrew Melnychenko 				(1ULL << VIRTIO_NET_F_GUEST_USO4) | \
75418044e1SAndrew Melnychenko 				(1ULL << VIRTIO_NET_F_GUEST_USO6))
761a03b8a3STonghao Zhang 
77d7dfc5cfSToshiaki Makita struct virtnet_stat_desc {
78d7dfc5cfSToshiaki Makita 	char desc[ETH_GSTRING_LEN];
79d7dfc5cfSToshiaki Makita 	size_t offset;
803fa2a1dfSstephen hemminger };
813fa2a1dfSstephen hemminger 
82d7dfc5cfSToshiaki Makita struct virtnet_sq_stats {
83d7dfc5cfSToshiaki Makita 	struct u64_stats_sync syncp;
84d7dfc5cfSToshiaki Makita 	u64 packets;
85d7dfc5cfSToshiaki Makita 	u64 bytes;
865b8f3c8dSToshiaki Makita 	u64 xdp_tx;
875b8f3c8dSToshiaki Makita 	u64 xdp_tx_drops;
88461f03dcSToshiaki Makita 	u64 kicks;
89a520794bSTony Lu 	u64 tx_timeouts;
90d7dfc5cfSToshiaki Makita };
91d7dfc5cfSToshiaki Makita 
92d46eeeafSJason Wang struct virtnet_rq_stats {
93d46eeeafSJason Wang 	struct u64_stats_sync syncp;
94d7dfc5cfSToshiaki Makita 	u64 packets;
95d7dfc5cfSToshiaki Makita 	u64 bytes;
962c4a2f7dSToshiaki Makita 	u64 drops;
975b8f3c8dSToshiaki Makita 	u64 xdp_packets;
985b8f3c8dSToshiaki Makita 	u64 xdp_tx;
995b8f3c8dSToshiaki Makita 	u64 xdp_redirects;
1005b8f3c8dSToshiaki Makita 	u64 xdp_drops;
101461f03dcSToshiaki Makita 	u64 kicks;
102d7dfc5cfSToshiaki Makita };
103d7dfc5cfSToshiaki Makita 
104d7dfc5cfSToshiaki Makita #define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
105d46eeeafSJason Wang #define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
106d7dfc5cfSToshiaki Makita 
107d7dfc5cfSToshiaki Makita static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
108d7dfc5cfSToshiaki Makita 	{ "packets",		VIRTNET_SQ_STAT(packets) },
109d7dfc5cfSToshiaki Makita 	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
1105b8f3c8dSToshiaki Makita 	{ "xdp_tx",		VIRTNET_SQ_STAT(xdp_tx) },
1115b8f3c8dSToshiaki Makita 	{ "xdp_tx_drops",	VIRTNET_SQ_STAT(xdp_tx_drops) },
112461f03dcSToshiaki Makita 	{ "kicks",		VIRTNET_SQ_STAT(kicks) },
113a520794bSTony Lu 	{ "tx_timeouts",	VIRTNET_SQ_STAT(tx_timeouts) },
114d7dfc5cfSToshiaki Makita };
115d7dfc5cfSToshiaki Makita 
116d7dfc5cfSToshiaki Makita static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
117d7dfc5cfSToshiaki Makita 	{ "packets",		VIRTNET_RQ_STAT(packets) },
118d7dfc5cfSToshiaki Makita 	{ "bytes",		VIRTNET_RQ_STAT(bytes) },
1192c4a2f7dSToshiaki Makita 	{ "drops",		VIRTNET_RQ_STAT(drops) },
1205b8f3c8dSToshiaki Makita 	{ "xdp_packets",	VIRTNET_RQ_STAT(xdp_packets) },
1215b8f3c8dSToshiaki Makita 	{ "xdp_tx",		VIRTNET_RQ_STAT(xdp_tx) },
1225b8f3c8dSToshiaki Makita 	{ "xdp_redirects",	VIRTNET_RQ_STAT(xdp_redirects) },
1235b8f3c8dSToshiaki Makita 	{ "xdp_drops",		VIRTNET_RQ_STAT(xdp_drops) },
124461f03dcSToshiaki Makita 	{ "kicks",		VIRTNET_RQ_STAT(kicks) },
125d7dfc5cfSToshiaki Makita };
126d7dfc5cfSToshiaki Makita 
127d7dfc5cfSToshiaki Makita #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
128d7dfc5cfSToshiaki Makita #define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
129d7dfc5cfSToshiaki Makita 
130308d7982SGavin Li struct virtnet_interrupt_coalesce {
131308d7982SGavin Li 	u32 max_packets;
132308d7982SGavin Li 	u32 max_usecs;
133308d7982SGavin Li };
134308d7982SGavin Li 
135295525e2SXuan Zhuo /* The dma information of pages allocated at a time. */
136295525e2SXuan Zhuo struct virtnet_rq_dma {
137295525e2SXuan Zhuo 	dma_addr_t addr;
138295525e2SXuan Zhuo 	u32 ref;
139295525e2SXuan Zhuo 	u16 len;
140295525e2SXuan Zhuo 	u16 need_sync;
141295525e2SXuan Zhuo };
142295525e2SXuan Zhuo 
143e9d7417bSJason Wang /* Internal representation of a send virtqueue */
144e9d7417bSJason Wang struct send_queue {
145e9d7417bSJason Wang 	/* Virtqueue associated with this send _queue */
146e9d7417bSJason Wang 	struct virtqueue *vq;
147e9d7417bSJason Wang 
148e9d7417bSJason Wang 	/* TX: fragments + linear part + virtio header */
149e9d7417bSJason Wang 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
150986a4f4dSJason Wang 
151986a4f4dSJason Wang 	/* Name of the send queue: output.$index */
152d0671115SParav Pandit 	char name[16];
153b92f1e67SWillem de Bruijn 
154d7dfc5cfSToshiaki Makita 	struct virtnet_sq_stats stats;
155d7dfc5cfSToshiaki Makita 
156394bd877SGavin Li 	struct virtnet_interrupt_coalesce intr_coal;
157394bd877SGavin Li 
158b92f1e67SWillem de Bruijn 	struct napi_struct napi;
159ebcce492SXuan Zhuo 
160ebcce492SXuan Zhuo 	/* Record whether sq is in reset state. */
161ebcce492SXuan Zhuo 	bool reset;
162e9d7417bSJason Wang };
163e9d7417bSJason Wang 
164e9d7417bSJason Wang /* Internal representation of a receive virtqueue */
165e9d7417bSJason Wang struct receive_queue {
166e9d7417bSJason Wang 	/* Virtqueue associated with this receive_queue */
167e9d7417bSJason Wang 	struct virtqueue *vq;
168e9d7417bSJason Wang 
169296f96fcSRusty Russell 	struct napi_struct napi;
170296f96fcSRusty Russell 
171f600b690SJohn Fastabend 	struct bpf_prog __rcu *xdp_prog;
172f600b690SJohn Fastabend 
173d7dfc5cfSToshiaki Makita 	struct virtnet_rq_stats stats;
174d7dfc5cfSToshiaki Makita 
175394bd877SGavin Li 	struct virtnet_interrupt_coalesce intr_coal;
176394bd877SGavin Li 
177e9d7417bSJason Wang 	/* Chain pages by the private ptr. */
178e9d7417bSJason Wang 	struct page *pages;
179e9d7417bSJason Wang 
180ab7db917SMichael Dalton 	/* Average packet length for mergeable receive buffers. */
1815377d758SJohannes Berg 	struct ewma_pkt_len mrg_avg_pkt_len;
182ab7db917SMichael Dalton 
183fb51879dSMichael Dalton 	/* Page frag for packet buffer allocation. */
184fb51879dSMichael Dalton 	struct page_frag alloc_frag;
185fb51879dSMichael Dalton 
186e9d7417bSJason Wang 	/* RX: fragments + linear part + virtio header */
187e9d7417bSJason Wang 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
188986a4f4dSJason Wang 
189d85b758fSMichael S. Tsirkin 	/* Min single buffer size for mergeable buffers case. */
190d85b758fSMichael S. Tsirkin 	unsigned int min_buf_len;
191d85b758fSMichael S. Tsirkin 
192986a4f4dSJason Wang 	/* Name of this receive queue: input.$index */
193d0671115SParav Pandit 	char name[16];
194754b8a21SJesper Dangaard Brouer 
195754b8a21SJesper Dangaard Brouer 	struct xdp_rxq_info xdp_rxq;
196295525e2SXuan Zhuo 
197295525e2SXuan Zhuo 	/* Record the last dma info to free after new pages is allocated. */
198295525e2SXuan Zhuo 	struct virtnet_rq_dma *last_dma;
199295525e2SXuan Zhuo 
200295525e2SXuan Zhuo 	/* Do dma by self */
201295525e2SXuan Zhuo 	bool do_dma;
202e9d7417bSJason Wang };
203e9d7417bSJason Wang 
204c7114b12SAndrew Melnychenko /* This structure can contain rss message with maximum settings for indirection table and keysize
205c7114b12SAndrew Melnychenko  * Note, that default structure that describes RSS configuration virtio_net_rss_config
206c7114b12SAndrew Melnychenko  * contains same info but can't handle table values.
207c7114b12SAndrew Melnychenko  * In any case, structure would be passed to virtio hw through sg_buf split by parts
208c7114b12SAndrew Melnychenko  * because table sizes may be differ according to the device configuration.
209c7114b12SAndrew Melnychenko  */
210c7114b12SAndrew Melnychenko #define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
211c7114b12SAndrew Melnychenko #define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
212c7114b12SAndrew Melnychenko struct virtio_net_ctrl_rss {
213c7114b12SAndrew Melnychenko 	u32 hash_types;
214c7114b12SAndrew Melnychenko 	u16 indirection_table_mask;
215c7114b12SAndrew Melnychenko 	u16 unclassified_queue;
216c7114b12SAndrew Melnychenko 	u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
217c7114b12SAndrew Melnychenko 	u16 max_tx_vq;
218c7114b12SAndrew Melnychenko 	u8 hash_key_length;
219c7114b12SAndrew Melnychenko 	u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
220c7114b12SAndrew Melnychenko };
221c7114b12SAndrew Melnychenko 
22212e57169SMichael S. Tsirkin /* Control VQ buffers: protected by the rtnl lock */
22312e57169SMichael S. Tsirkin struct control_buf {
22412e57169SMichael S. Tsirkin 	struct virtio_net_ctrl_hdr hdr;
22512e57169SMichael S. Tsirkin 	virtio_net_ctrl_ack status;
22612e57169SMichael S. Tsirkin 	struct virtio_net_ctrl_mq mq;
22712e57169SMichael S. Tsirkin 	u8 promisc;
22812e57169SMichael S. Tsirkin 	u8 allmulti;
229d7fad4c8SMichael S. Tsirkin 	__virtio16 vid;
230f4ee703aSMichael S. Tsirkin 	__virtio64 offloads;
231c7114b12SAndrew Melnychenko 	struct virtio_net_ctrl_rss rss;
232accc1bf2SBrett Creeley 	struct virtio_net_ctrl_coal_tx coal_tx;
233accc1bf2SBrett Creeley 	struct virtio_net_ctrl_coal_rx coal_rx;
234394bd877SGavin Li 	struct virtio_net_ctrl_coal_vq coal_vq;
23512e57169SMichael S. Tsirkin };
23612e57169SMichael S. Tsirkin 
237e9d7417bSJason Wang struct virtnet_info {
238e9d7417bSJason Wang 	struct virtio_device *vdev;
239e9d7417bSJason Wang 	struct virtqueue *cvq;
240e9d7417bSJason Wang 	struct net_device *dev;
241986a4f4dSJason Wang 	struct send_queue *sq;
242986a4f4dSJason Wang 	struct receive_queue *rq;
243e9d7417bSJason Wang 	unsigned int status;
244e9d7417bSJason Wang 
245986a4f4dSJason Wang 	/* Max # of queue pairs supported by the device */
246986a4f4dSJason Wang 	u16 max_queue_pairs;
247986a4f4dSJason Wang 
248986a4f4dSJason Wang 	/* # of queue pairs currently used by the driver */
249986a4f4dSJason Wang 	u16 curr_queue_pairs;
250986a4f4dSJason Wang 
251672aafd5SJohn Fastabend 	/* # of XDP queue pairs currently used by the driver */
252672aafd5SJohn Fastabend 	u16 xdp_queue_pairs;
253672aafd5SJohn Fastabend 
25497c2c69eSXuan Zhuo 	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
25597c2c69eSXuan Zhuo 	bool xdp_enabled;
25697c2c69eSXuan Zhuo 
25797402b96SHerbert Xu 	/* I like... big packets and I cannot lie! */
25897402b96SHerbert Xu 	bool big_packets;
25997402b96SHerbert Xu 
2604959aebbSGavin Li 	/* number of sg entries allocated for big packets */
2614959aebbSGavin Li 	unsigned int big_packets_num_skbfrags;
2624959aebbSGavin Li 
2633f2c31d9SMark McLoughlin 	/* Host will merge rx buffers for big packets (shake it! shake it!) */
2643f2c31d9SMark McLoughlin 	bool mergeable_rx_bufs;
2653f2c31d9SMark McLoughlin 
266c7114b12SAndrew Melnychenko 	/* Host supports rss and/or hash report */
267c7114b12SAndrew Melnychenko 	bool has_rss;
26891f41f01SAndrew Melnychenko 	bool has_rss_hash_report;
269c7114b12SAndrew Melnychenko 	u8 rss_key_size;
270c7114b12SAndrew Melnychenko 	u16 rss_indir_table_size;
271c7114b12SAndrew Melnychenko 	u32 rss_hash_types_supported;
272c1170820SAndrew Melnychenko 	u32 rss_hash_types_saved;
273c7114b12SAndrew Melnychenko 
274986a4f4dSJason Wang 	/* Has control virtqueue */
275986a4f4dSJason Wang 	bool has_cvq;
276986a4f4dSJason Wang 
277e7428e95SMichael S. Tsirkin 	/* Host can handle any s/g split between our header and packet data */
278e7428e95SMichael S. Tsirkin 	bool any_header_sg;
279e7428e95SMichael S. Tsirkin 
280012873d0SMichael S. Tsirkin 	/* Packet virtio header size */
281012873d0SMichael S. Tsirkin 	u8 hdr_len;
282012873d0SMichael S. Tsirkin 
2835a159128SJason Wang 	/* Work struct for delayed refilling if we run low on memory. */
2843161e453SRusty Russell 	struct delayed_work refill;
2853161e453SRusty Russell 
2865a159128SJason Wang 	/* Is delayed refill enabled? */
2875a159128SJason Wang 	bool refill_enabled;
2885a159128SJason Wang 
2895a159128SJason Wang 	/* The lock to synchronize the access to refill_enabled */
2905a159128SJason Wang 	spinlock_t refill_lock;
2915a159128SJason Wang 
292586d17c5SJason Wang 	/* Work struct for config space updates */
293586d17c5SJason Wang 	struct work_struct config_work;
294586d17c5SJason Wang 
295986a4f4dSJason Wang 	/* Does the affinity hint is set for virtqueues? */
296986a4f4dSJason Wang 	bool affinity_hint_set;
29747be2479SWanlong Gao 
2988017c279SSebastian Andrzej Siewior 	/* CPU hotplug instances for online & dead */
2998017c279SSebastian Andrzej Siewior 	struct hlist_node node;
3008017c279SSebastian Andrzej Siewior 	struct hlist_node node_dead;
3012ac46030SMichael S. Tsirkin 
30212e57169SMichael S. Tsirkin 	struct control_buf *ctrl;
30316032be5SNikolay Aleksandrov 
30416032be5SNikolay Aleksandrov 	/* Ethtool settings */
30516032be5SNikolay Aleksandrov 	u8 duplex;
30616032be5SNikolay Aleksandrov 	u32 speed;
3073f93522fSJason Wang 
308699b045aSAlvaro Karsz 	/* Interrupt coalescing settings */
309308d7982SGavin Li 	struct virtnet_interrupt_coalesce intr_coal_tx;
310308d7982SGavin Li 	struct virtnet_interrupt_coalesce intr_coal_rx;
311699b045aSAlvaro Karsz 
3123f93522fSJason Wang 	unsigned long guest_offloads;
313a02e8964SWillem de Bruijn 	unsigned long guest_offloads_capable;
314ba5e4426SSridhar Samudrala 
315ba5e4426SSridhar Samudrala 	/* failover when STANDBY feature enabled */
316ba5e4426SSridhar Samudrala 	struct failover *failover;
317296f96fcSRusty Russell };
318296f96fcSRusty Russell 
3199ab86bbcSShirley Ma struct padded_vnet_hdr {
320c1ddc42dSAndrew Melnychenko 	struct virtio_net_hdr_v1_hash hdr;
3219ab86bbcSShirley Ma 	/*
322012873d0SMichael S. Tsirkin 	 * hdr is in a separate sg buffer, and data sg buffer shares same page
323012873d0SMichael S. Tsirkin 	 * with this header sg. This padding makes next sg 16 byte aligned
324012873d0SMichael S. Tsirkin 	 * after the header.
3259ab86bbcSShirley Ma 	 */
326c1ddc42dSAndrew Melnychenko 	char padding[12];
3279ab86bbcSShirley Ma };
3289ab86bbcSShirley Ma 
329dae64749SFeng Liu struct virtio_net_common_hdr {
330dae64749SFeng Liu 	union {
331dae64749SFeng Liu 		struct virtio_net_hdr hdr;
332dae64749SFeng Liu 		struct virtio_net_hdr_mrg_rxbuf	mrg_hdr;
333dae64749SFeng Liu 		struct virtio_net_hdr_v1_hash hash_v1_hdr;
334dae64749SFeng Liu 	};
335dae64749SFeng Liu };
336dae64749SFeng Liu 
3376a4763e2SXuan Zhuo static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
338ebcce492SXuan Zhuo static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
3396a4763e2SXuan Zhuo 
3405050471dSToshiaki Makita static bool is_xdp_frame(void *ptr)
3415050471dSToshiaki Makita {
3425050471dSToshiaki Makita 	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
3435050471dSToshiaki Makita }
3445050471dSToshiaki Makita 
3455050471dSToshiaki Makita static void *xdp_to_ptr(struct xdp_frame *ptr)
3465050471dSToshiaki Makita {
3475050471dSToshiaki Makita 	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
3485050471dSToshiaki Makita }
3495050471dSToshiaki Makita 
3505050471dSToshiaki Makita static struct xdp_frame *ptr_to_xdp(void *ptr)
3515050471dSToshiaki Makita {
3525050471dSToshiaki Makita 	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
3535050471dSToshiaki Makita }
3545050471dSToshiaki Makita 
355986a4f4dSJason Wang /* Converting between virtqueue no. and kernel tx/rx queue no.
356986a4f4dSJason Wang  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
357986a4f4dSJason Wang  */
358986a4f4dSJason Wang static int vq2txq(struct virtqueue *vq)
359986a4f4dSJason Wang {
3609d0ca6edSRusty Russell 	return (vq->index - 1) / 2;
361986a4f4dSJason Wang }
362986a4f4dSJason Wang 
363986a4f4dSJason Wang static int txq2vq(int txq)
364986a4f4dSJason Wang {
365986a4f4dSJason Wang 	return txq * 2 + 1;
366986a4f4dSJason Wang }
367986a4f4dSJason Wang 
368986a4f4dSJason Wang static int vq2rxq(struct virtqueue *vq)
369986a4f4dSJason Wang {
3709d0ca6edSRusty Russell 	return vq->index / 2;
371986a4f4dSJason Wang }
372986a4f4dSJason Wang 
373986a4f4dSJason Wang static int rxq2vq(int rxq)
374986a4f4dSJason Wang {
375986a4f4dSJason Wang 	return rxq * 2;
376986a4f4dSJason Wang }
377986a4f4dSJason Wang 
378dae64749SFeng Liu static inline struct virtio_net_common_hdr *
379dae64749SFeng Liu skb_vnet_common_hdr(struct sk_buff *skb)
380296f96fcSRusty Russell {
381dae64749SFeng Liu 	return (struct virtio_net_common_hdr *)skb->cb;
382296f96fcSRusty Russell }
383296f96fcSRusty Russell 
3849ab86bbcSShirley Ma /*
3859ab86bbcSShirley Ma  * private is used to chain pages for big packets, put the whole
3869ab86bbcSShirley Ma  * most recent used list in the beginning for reuse
3879ab86bbcSShirley Ma  */
388e9d7417bSJason Wang static void give_pages(struct receive_queue *rq, struct page *page)
389fb6813f4SRusty Russell {
3909ab86bbcSShirley Ma 	struct page *end;
3919ab86bbcSShirley Ma 
392e9d7417bSJason Wang 	/* Find end of list, sew whole thing into vi->rq.pages. */
3939ab86bbcSShirley Ma 	for (end = page; end->private; end = (struct page *)end->private);
394e9d7417bSJason Wang 	end->private = (unsigned long)rq->pages;
395e9d7417bSJason Wang 	rq->pages = page;
396fb6813f4SRusty Russell }
397fb6813f4SRusty Russell 
398e9d7417bSJason Wang static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
399fb6813f4SRusty Russell {
400e9d7417bSJason Wang 	struct page *p = rq->pages;
401fb6813f4SRusty Russell 
4029ab86bbcSShirley Ma 	if (p) {
403e9d7417bSJason Wang 		rq->pages = (struct page *)p->private;
4049ab86bbcSShirley Ma 		/* clear private here, it is used to chain pages */
4059ab86bbcSShirley Ma 		p->private = 0;
4069ab86bbcSShirley Ma 	} else
407fb6813f4SRusty Russell 		p = alloc_page(gfp_mask);
408fb6813f4SRusty Russell 	return p;
409fb6813f4SRusty Russell }
410fb6813f4SRusty Russell 
4115a159128SJason Wang static void enable_delayed_refill(struct virtnet_info *vi)
4125a159128SJason Wang {
4135a159128SJason Wang 	spin_lock_bh(&vi->refill_lock);
4145a159128SJason Wang 	vi->refill_enabled = true;
4155a159128SJason Wang 	spin_unlock_bh(&vi->refill_lock);
4165a159128SJason Wang }
4175a159128SJason Wang 
4185a159128SJason Wang static void disable_delayed_refill(struct virtnet_info *vi)
4195a159128SJason Wang {
4205a159128SJason Wang 	spin_lock_bh(&vi->refill_lock);
4215a159128SJason Wang 	vi->refill_enabled = false;
4225a159128SJason Wang 	spin_unlock_bh(&vi->refill_lock);
4235a159128SJason Wang }
4245a159128SJason Wang 
425e4e8452aSWillem de Bruijn static void virtqueue_napi_schedule(struct napi_struct *napi,
426e4e8452aSWillem de Bruijn 				    struct virtqueue *vq)
427e4e8452aSWillem de Bruijn {
428e4e8452aSWillem de Bruijn 	if (napi_schedule_prep(napi)) {
429e4e8452aSWillem de Bruijn 		virtqueue_disable_cb(vq);
430e4e8452aSWillem de Bruijn 		__napi_schedule(napi);
431e4e8452aSWillem de Bruijn 	}
432e4e8452aSWillem de Bruijn }
433e4e8452aSWillem de Bruijn 
434e4e8452aSWillem de Bruijn static void virtqueue_napi_complete(struct napi_struct *napi,
435e4e8452aSWillem de Bruijn 				    struct virtqueue *vq, int processed)
436e4e8452aSWillem de Bruijn {
437e4e8452aSWillem de Bruijn 	int opaque;
438e4e8452aSWillem de Bruijn 
439e4e8452aSWillem de Bruijn 	opaque = virtqueue_enable_cb_prepare(vq);
440fdaa767aSToshiaki Makita 	if (napi_complete_done(napi, processed)) {
441fdaa767aSToshiaki Makita 		if (unlikely(virtqueue_poll(vq, opaque)))
442e4e8452aSWillem de Bruijn 			virtqueue_napi_schedule(napi, vq);
443fdaa767aSToshiaki Makita 	} else {
444fdaa767aSToshiaki Makita 		virtqueue_disable_cb(vq);
445fdaa767aSToshiaki Makita 	}
446e4e8452aSWillem de Bruijn }
447e4e8452aSWillem de Bruijn 
448e9d7417bSJason Wang static void skb_xmit_done(struct virtqueue *vq)
449296f96fcSRusty Russell {
450e9d7417bSJason Wang 	struct virtnet_info *vi = vq->vdev->priv;
451b92f1e67SWillem de Bruijn 	struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
452296f96fcSRusty Russell 
4532cb9c6baSRusty Russell 	/* Suppress further interrupts. */
454e9d7417bSJason Wang 	virtqueue_disable_cb(vq);
45511a3a154SRusty Russell 
456b92f1e67SWillem de Bruijn 	if (napi->weight)
457b92f1e67SWillem de Bruijn 		virtqueue_napi_schedule(napi, vq);
458b92f1e67SWillem de Bruijn 	else
459363f1514SRusty Russell 		/* We were probably waiting for more output buffers. */
460986a4f4dSJason Wang 		netif_wake_subqueue(vi->dev, vq2txq(vq));
461296f96fcSRusty Russell }
462296f96fcSRusty Russell 
46328b39bc7SJason Wang #define MRG_CTX_HEADER_SHIFT 22
46428b39bc7SJason Wang static void *mergeable_len_to_ctx(unsigned int truesize,
46528b39bc7SJason Wang 				  unsigned int headroom)
46628b39bc7SJason Wang {
46728b39bc7SJason Wang 	return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
46828b39bc7SJason Wang }
46928b39bc7SJason Wang 
47028b39bc7SJason Wang static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
47128b39bc7SJason Wang {
47228b39bc7SJason Wang 	return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
47328b39bc7SJason Wang }
47428b39bc7SJason Wang 
47528b39bc7SJason Wang static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
47628b39bc7SJason Wang {
47728b39bc7SJason Wang 	return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
47828b39bc7SJason Wang }
47928b39bc7SJason Wang 
48021e26a71SXuan Zhuo static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
48121e26a71SXuan Zhuo 					 unsigned int headroom,
48221e26a71SXuan Zhuo 					 unsigned int len)
48321e26a71SXuan Zhuo {
48421e26a71SXuan Zhuo 	struct sk_buff *skb;
48521e26a71SXuan Zhuo 
48621e26a71SXuan Zhuo 	skb = build_skb(buf, buflen);
48721e26a71SXuan Zhuo 	if (unlikely(!skb))
48821e26a71SXuan Zhuo 		return NULL;
48921e26a71SXuan Zhuo 
49021e26a71SXuan Zhuo 	skb_reserve(skb, headroom);
49121e26a71SXuan Zhuo 	skb_put(skb, len);
49221e26a71SXuan Zhuo 
49321e26a71SXuan Zhuo 	return skb;
49421e26a71SXuan Zhuo }
49521e26a71SXuan Zhuo 
4963464645aSMike Waychison /* Called from bottom half context */
497946fa564SMichael S. Tsirkin static struct sk_buff *page_to_skb(struct virtnet_info *vi,
498946fa564SMichael S. Tsirkin 				   struct receive_queue *rq,
4992613af0eSMichael Dalton 				   struct page *page, unsigned int offset,
500fa0f1ba7SXuan Zhuo 				   unsigned int len, unsigned int truesize,
501fa0f1ba7SXuan Zhuo 				   unsigned int headroom)
5029ab86bbcSShirley Ma {
5039ab86bbcSShirley Ma 	struct sk_buff *skb;
504dae64749SFeng Liu 	struct virtio_net_common_hdr *hdr;
5052613af0eSMichael Dalton 	unsigned int copy, hdr_len, hdr_padded_len;
506af39c8f7SEric Dumazet 	struct page *page_to_free = NULL;
507fb32856bSXuan Zhuo 	int tailroom, shinfo_size;
508f80bd740SXuan Zhuo 	char *p, *hdr_p, *buf;
5099ab86bbcSShirley Ma 
5102613af0eSMichael Dalton 	p = page_address(page) + offset;
511fb32856bSXuan Zhuo 	hdr_p = p;
5129ab86bbcSShirley Ma 
513012873d0SMichael S. Tsirkin 	hdr_len = vi->hdr_len;
514012873d0SMichael S. Tsirkin 	if (vi->mergeable_rx_bufs)
515c1ddc42dSAndrew Melnychenko 		hdr_padded_len = hdr_len;
516012873d0SMichael S. Tsirkin 	else
5172613af0eSMichael Dalton 		hdr_padded_len = sizeof(struct padded_vnet_hdr);
5183f2c31d9SMark McLoughlin 
519fa0f1ba7SXuan Zhuo 	buf = p - headroom;
5209ab86bbcSShirley Ma 	len -= hdr_len;
5212613af0eSMichael Dalton 	offset += hdr_padded_len;
5222613af0eSMichael Dalton 	p += hdr_padded_len;
523fa0f1ba7SXuan Zhuo 	tailroom = truesize - headroom  - hdr_padded_len - len;
5243f2c31d9SMark McLoughlin 
525fb32856bSXuan Zhuo 	shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
526fb32856bSXuan Zhuo 
527f80bd740SXuan Zhuo 	/* copy small packet so we can reuse these pages */
528f5d7872aSEric Dumazet 	if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
52921e26a71SXuan Zhuo 		skb = virtnet_build_skb(buf, truesize, p - buf, len);
530fb32856bSXuan Zhuo 		if (unlikely(!skb))
531fb32856bSXuan Zhuo 			return NULL;
532fb32856bSXuan Zhuo 
533afd92d82SJason Wang 		page = (struct page *)page->private;
534afd92d82SJason Wang 		if (page)
535afd92d82SJason Wang 			give_pages(rq, page);
536fb32856bSXuan Zhuo 		goto ok;
537fb32856bSXuan Zhuo 	}
538fb32856bSXuan Zhuo 
539fb32856bSXuan Zhuo 	/* copy small packet so we can reuse these pages for small data */
540fb32856bSXuan Zhuo 	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
541fb32856bSXuan Zhuo 	if (unlikely(!skb))
542fb32856bSXuan Zhuo 		return NULL;
543fb32856bSXuan Zhuo 
5440f6925b3SEric Dumazet 	/* Copy all frame if it fits skb->head, otherwise
5450f6925b3SEric Dumazet 	 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
5460f6925b3SEric Dumazet 	 */
5470f6925b3SEric Dumazet 	if (len <= skb_tailroom(skb))
5483f2c31d9SMark McLoughlin 		copy = len;
5490f6925b3SEric Dumazet 	else
55018117a84SHeng Qi 		copy = ETH_HLEN;
55159ae1d12SJohannes Berg 	skb_put_data(skb, p, copy);
5523f2c31d9SMark McLoughlin 
5533f2c31d9SMark McLoughlin 	len -= copy;
5549ab86bbcSShirley Ma 	offset += copy;
5553f2c31d9SMark McLoughlin 
5562613af0eSMichael Dalton 	if (vi->mergeable_rx_bufs) {
5572613af0eSMichael Dalton 		if (len)
5582613af0eSMichael Dalton 			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
5592613af0eSMichael Dalton 		else
560af39c8f7SEric Dumazet 			page_to_free = page;
561fb32856bSXuan Zhuo 		goto ok;
5622613af0eSMichael Dalton 	}
5632613af0eSMichael Dalton 
564e878d78bSSasha Levin 	/*
565e878d78bSSasha Levin 	 * Verify that we can indeed put this data into a skb.
566e878d78bSSasha Levin 	 * This is here to handle cases when the device erroneously
567e878d78bSSasha Levin 	 * tries to receive more than is possible. This is usually
568e878d78bSSasha Levin 	 * the case of a broken device.
569e878d78bSSasha Levin 	 */
570e878d78bSSasha Levin 	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
571be443899SAmerigo Wang 		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
572e878d78bSSasha Levin 		dev_kfree_skb(skb);
573e878d78bSSasha Levin 		return NULL;
574e878d78bSSasha Levin 	}
5752613af0eSMichael Dalton 	BUG_ON(offset >= PAGE_SIZE);
5769ab86bbcSShirley Ma 	while (len) {
5772613af0eSMichael Dalton 		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
5782613af0eSMichael Dalton 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
5792613af0eSMichael Dalton 				frag_size, truesize);
5802613af0eSMichael Dalton 		len -= frag_size;
5819ab86bbcSShirley Ma 		page = (struct page *)page->private;
5829ab86bbcSShirley Ma 		offset = 0;
5833f2c31d9SMark McLoughlin 	}
5843f2c31d9SMark McLoughlin 
5859ab86bbcSShirley Ma 	if (page)
586e9d7417bSJason Wang 		give_pages(rq, page);
5873f2c31d9SMark McLoughlin 
588fb32856bSXuan Zhuo ok:
589dae64749SFeng Liu 	hdr = skb_vnet_common_hdr(skb);
590fb32856bSXuan Zhuo 	memcpy(hdr, hdr_p, hdr_len);
591af39c8f7SEric Dumazet 	if (page_to_free)
592af39c8f7SEric Dumazet 		put_page(page_to_free);
593fb32856bSXuan Zhuo 
5949ab86bbcSShirley Ma 	return skb;
5959ab86bbcSShirley Ma }
5969ab86bbcSShirley Ma 
597295525e2SXuan Zhuo static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
598295525e2SXuan Zhuo {
599295525e2SXuan Zhuo 	struct page *page = virt_to_head_page(buf);
600295525e2SXuan Zhuo 	struct virtnet_rq_dma *dma;
601295525e2SXuan Zhuo 	void *head;
602295525e2SXuan Zhuo 	int offset;
603295525e2SXuan Zhuo 
604295525e2SXuan Zhuo 	head = page_address(page);
605295525e2SXuan Zhuo 
606295525e2SXuan Zhuo 	dma = head;
607295525e2SXuan Zhuo 
608295525e2SXuan Zhuo 	--dma->ref;
609295525e2SXuan Zhuo 
610295525e2SXuan Zhuo 	if (dma->need_sync && len) {
611295525e2SXuan Zhuo 		offset = buf - (head + sizeof(*dma));
612295525e2SXuan Zhuo 
6135720c43dSXuan Zhuo 		virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
6145720c43dSXuan Zhuo 							offset, len,
6155720c43dSXuan Zhuo 							DMA_FROM_DEVICE);
616295525e2SXuan Zhuo 	}
617295525e2SXuan Zhuo 
6185720c43dSXuan Zhuo 	if (dma->ref)
619295525e2SXuan Zhuo 		return;
620295525e2SXuan Zhuo 
621295525e2SXuan Zhuo 	virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
622295525e2SXuan Zhuo 					 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
623295525e2SXuan Zhuo 	put_page(page);
624295525e2SXuan Zhuo }
625295525e2SXuan Zhuo 
626295525e2SXuan Zhuo static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
627295525e2SXuan Zhuo {
628295525e2SXuan Zhuo 	void *buf;
629295525e2SXuan Zhuo 
630295525e2SXuan Zhuo 	buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
631295525e2SXuan Zhuo 	if (buf && rq->do_dma)
632295525e2SXuan Zhuo 		virtnet_rq_unmap(rq, buf, *len);
633295525e2SXuan Zhuo 
634295525e2SXuan Zhuo 	return buf;
635295525e2SXuan Zhuo }
636295525e2SXuan Zhuo 
637295525e2SXuan Zhuo static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
638295525e2SXuan Zhuo {
639295525e2SXuan Zhuo 	void *buf;
640295525e2SXuan Zhuo 
641295525e2SXuan Zhuo 	buf = virtqueue_detach_unused_buf(rq->vq);
642295525e2SXuan Zhuo 	if (buf && rq->do_dma)
643295525e2SXuan Zhuo 		virtnet_rq_unmap(rq, buf, 0);
644295525e2SXuan Zhuo 
645295525e2SXuan Zhuo 	return buf;
646295525e2SXuan Zhuo }
647295525e2SXuan Zhuo 
648295525e2SXuan Zhuo static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
649295525e2SXuan Zhuo {
650295525e2SXuan Zhuo 	struct virtnet_rq_dma *dma;
651295525e2SXuan Zhuo 	dma_addr_t addr;
652295525e2SXuan Zhuo 	u32 offset;
653295525e2SXuan Zhuo 	void *head;
654295525e2SXuan Zhuo 
655295525e2SXuan Zhuo 	if (!rq->do_dma) {
656295525e2SXuan Zhuo 		sg_init_one(rq->sg, buf, len);
657295525e2SXuan Zhuo 		return;
658295525e2SXuan Zhuo 	}
659295525e2SXuan Zhuo 
660295525e2SXuan Zhuo 	head = page_address(rq->alloc_frag.page);
661295525e2SXuan Zhuo 
662295525e2SXuan Zhuo 	offset = buf - head;
663295525e2SXuan Zhuo 
664295525e2SXuan Zhuo 	dma = head;
665295525e2SXuan Zhuo 
666295525e2SXuan Zhuo 	addr = dma->addr - sizeof(*dma) + offset;
667295525e2SXuan Zhuo 
668295525e2SXuan Zhuo 	sg_init_table(rq->sg, 1);
669295525e2SXuan Zhuo 	rq->sg[0].dma_address = addr;
670295525e2SXuan Zhuo 	rq->sg[0].length = len;
671295525e2SXuan Zhuo }
672295525e2SXuan Zhuo 
673295525e2SXuan Zhuo static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
674295525e2SXuan Zhuo {
675295525e2SXuan Zhuo 	struct page_frag *alloc_frag = &rq->alloc_frag;
676295525e2SXuan Zhuo 	struct virtnet_rq_dma *dma;
677295525e2SXuan Zhuo 	void *buf, *head;
678295525e2SXuan Zhuo 	dma_addr_t addr;
679295525e2SXuan Zhuo 
680295525e2SXuan Zhuo 	if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
681295525e2SXuan Zhuo 		return NULL;
682295525e2SXuan Zhuo 
683295525e2SXuan Zhuo 	head = page_address(alloc_frag->page);
684295525e2SXuan Zhuo 
685295525e2SXuan Zhuo 	if (rq->do_dma) {
686295525e2SXuan Zhuo 		dma = head;
687295525e2SXuan Zhuo 
688295525e2SXuan Zhuo 		/* new pages */
689295525e2SXuan Zhuo 		if (!alloc_frag->offset) {
690295525e2SXuan Zhuo 			if (rq->last_dma) {
691295525e2SXuan Zhuo 				/* Now, the new page is allocated, the last dma
692295525e2SXuan Zhuo 				 * will not be used. So the dma can be unmapped
693295525e2SXuan Zhuo 				 * if the ref is 0.
694295525e2SXuan Zhuo 				 */
695295525e2SXuan Zhuo 				virtnet_rq_unmap(rq, rq->last_dma, 0);
696295525e2SXuan Zhuo 				rq->last_dma = NULL;
697295525e2SXuan Zhuo 			}
698295525e2SXuan Zhuo 
699295525e2SXuan Zhuo 			dma->len = alloc_frag->size - sizeof(*dma);
700295525e2SXuan Zhuo 
701295525e2SXuan Zhuo 			addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
702295525e2SXuan Zhuo 							      dma->len, DMA_FROM_DEVICE, 0);
703295525e2SXuan Zhuo 			if (virtqueue_dma_mapping_error(rq->vq, addr))
704295525e2SXuan Zhuo 				return NULL;
705295525e2SXuan Zhuo 
706295525e2SXuan Zhuo 			dma->addr = addr;
707295525e2SXuan Zhuo 			dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
708295525e2SXuan Zhuo 
709295525e2SXuan Zhuo 			/* Add a reference to dma to prevent the entire dma from
710295525e2SXuan Zhuo 			 * being released during error handling. This reference
711295525e2SXuan Zhuo 			 * will be freed after the pages are no longer used.
712295525e2SXuan Zhuo 			 */
713295525e2SXuan Zhuo 			get_page(alloc_frag->page);
714295525e2SXuan Zhuo 			dma->ref = 1;
715295525e2SXuan Zhuo 			alloc_frag->offset = sizeof(*dma);
716295525e2SXuan Zhuo 
717295525e2SXuan Zhuo 			rq->last_dma = dma;
718295525e2SXuan Zhuo 		}
719295525e2SXuan Zhuo 
720295525e2SXuan Zhuo 		++dma->ref;
721295525e2SXuan Zhuo 	}
722295525e2SXuan Zhuo 
723295525e2SXuan Zhuo 	buf = head + alloc_frag->offset;
724295525e2SXuan Zhuo 
725295525e2SXuan Zhuo 	get_page(alloc_frag->page);
726295525e2SXuan Zhuo 	alloc_frag->offset += size;
727295525e2SXuan Zhuo 
728295525e2SXuan Zhuo 	return buf;
729295525e2SXuan Zhuo }
730295525e2SXuan Zhuo 
731295525e2SXuan Zhuo static void virtnet_rq_set_premapped(struct virtnet_info *vi)
732295525e2SXuan Zhuo {
733295525e2SXuan Zhuo 	int i;
734295525e2SXuan Zhuo 
735295525e2SXuan Zhuo 	/* disable for big mode */
736295525e2SXuan Zhuo 	if (!vi->mergeable_rx_bufs && vi->big_packets)
737295525e2SXuan Zhuo 		return;
738295525e2SXuan Zhuo 
739295525e2SXuan Zhuo 	for (i = 0; i < vi->max_queue_pairs; i++) {
740295525e2SXuan Zhuo 		if (virtqueue_set_dma_premapped(vi->rq[i].vq))
741295525e2SXuan Zhuo 			continue;
742295525e2SXuan Zhuo 
743295525e2SXuan Zhuo 		vi->rq[i].do_dma = true;
744295525e2SXuan Zhuo 	}
745295525e2SXuan Zhuo }
746295525e2SXuan Zhuo 
74725074a44SXuan Zhuo static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
74825074a44SXuan Zhuo {
74925074a44SXuan Zhuo 	unsigned int len;
75025074a44SXuan Zhuo 	unsigned int packets = 0;
75125074a44SXuan Zhuo 	unsigned int bytes = 0;
75225074a44SXuan Zhuo 	void *ptr;
75325074a44SXuan Zhuo 
75425074a44SXuan Zhuo 	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
75525074a44SXuan Zhuo 		if (likely(!is_xdp_frame(ptr))) {
75625074a44SXuan Zhuo 			struct sk_buff *skb = ptr;
75725074a44SXuan Zhuo 
75825074a44SXuan Zhuo 			pr_debug("Sent skb %p\n", skb);
75925074a44SXuan Zhuo 
76025074a44SXuan Zhuo 			bytes += skb->len;
76125074a44SXuan Zhuo 			napi_consume_skb(skb, in_napi);
76225074a44SXuan Zhuo 		} else {
76325074a44SXuan Zhuo 			struct xdp_frame *frame = ptr_to_xdp(ptr);
76425074a44SXuan Zhuo 
76525074a44SXuan Zhuo 			bytes += xdp_get_frame_len(frame);
76625074a44SXuan Zhuo 			xdp_return_frame(frame);
76725074a44SXuan Zhuo 		}
76825074a44SXuan Zhuo 		packets++;
76925074a44SXuan Zhuo 	}
77025074a44SXuan Zhuo 
77125074a44SXuan Zhuo 	/* Avoid overhead when no packets have been processed
77225074a44SXuan Zhuo 	 * happens when called speculatively from start_xmit.
77325074a44SXuan Zhuo 	 */
77425074a44SXuan Zhuo 	if (!packets)
77525074a44SXuan Zhuo 		return;
77625074a44SXuan Zhuo 
77725074a44SXuan Zhuo 	u64_stats_update_begin(&sq->stats.syncp);
77825074a44SXuan Zhuo 	sq->stats.bytes += bytes;
77925074a44SXuan Zhuo 	sq->stats.packets += packets;
78025074a44SXuan Zhuo 	u64_stats_update_end(&sq->stats.syncp);
78125074a44SXuan Zhuo }
78225074a44SXuan Zhuo 
78325074a44SXuan Zhuo static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
78425074a44SXuan Zhuo {
78525074a44SXuan Zhuo 	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
78625074a44SXuan Zhuo 		return false;
78725074a44SXuan Zhuo 	else if (q < vi->curr_queue_pairs)
78825074a44SXuan Zhuo 		return true;
78925074a44SXuan Zhuo 	else
79025074a44SXuan Zhuo 		return false;
79125074a44SXuan Zhuo }
79225074a44SXuan Zhuo 
793b8ef4809SXuan Zhuo static void check_sq_full_and_disable(struct virtnet_info *vi,
794b8ef4809SXuan Zhuo 				      struct net_device *dev,
795b8ef4809SXuan Zhuo 				      struct send_queue *sq)
796b8ef4809SXuan Zhuo {
797b8ef4809SXuan Zhuo 	bool use_napi = sq->napi.weight;
798b8ef4809SXuan Zhuo 	int qnum;
799b8ef4809SXuan Zhuo 
800b8ef4809SXuan Zhuo 	qnum = sq - vi->sq;
801b8ef4809SXuan Zhuo 
802b8ef4809SXuan Zhuo 	/* If running out of space, stop queue to avoid getting packets that we
803b8ef4809SXuan Zhuo 	 * are then unable to transmit.
804b8ef4809SXuan Zhuo 	 * An alternative would be to force queuing layer to requeue the skb by
805b8ef4809SXuan Zhuo 	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
806b8ef4809SXuan Zhuo 	 * returned in a normal path of operation: it means that driver is not
807b8ef4809SXuan Zhuo 	 * maintaining the TX queue stop/start state properly, and causes
808b8ef4809SXuan Zhuo 	 * the stack to do a non-trivial amount of useless work.
809b8ef4809SXuan Zhuo 	 * Since most packets only take 1 or 2 ring slots, stopping the queue
810b8ef4809SXuan Zhuo 	 * early means 16 slots are typically wasted.
811b8ef4809SXuan Zhuo 	 */
812b8ef4809SXuan Zhuo 	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
813b8ef4809SXuan Zhuo 		netif_stop_subqueue(dev, qnum);
814b8ef4809SXuan Zhuo 		if (use_napi) {
815b8ef4809SXuan Zhuo 			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
816b8ef4809SXuan Zhuo 				virtqueue_napi_schedule(&sq->napi, sq->vq);
817b8ef4809SXuan Zhuo 		} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
818b8ef4809SXuan Zhuo 			/* More just got used, free them then recheck. */
819b8ef4809SXuan Zhuo 			free_old_xmit_skbs(sq, false);
820b8ef4809SXuan Zhuo 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
821b8ef4809SXuan Zhuo 				netif_start_subqueue(dev, qnum);
822b8ef4809SXuan Zhuo 				virtqueue_disable_cb(sq->vq);
823b8ef4809SXuan Zhuo 			}
824b8ef4809SXuan Zhuo 		}
825b8ef4809SXuan Zhuo 	}
826b8ef4809SXuan Zhuo }
827b8ef4809SXuan Zhuo 
828735fc405SJesper Dangaard Brouer static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
829735fc405SJesper Dangaard Brouer 				   struct send_queue *sq,
83044fa2dbdSJesper Dangaard Brouer 				   struct xdp_frame *xdpf)
83156434a01SJohn Fastabend {
83256434a01SJohn Fastabend 	struct virtio_net_hdr_mrg_rxbuf *hdr;
83397717e8dSHeng Qi 	struct skb_shared_info *shinfo;
83497717e8dSHeng Qi 	u8 nr_frags = 0;
83597717e8dSHeng Qi 	int err, i;
83656434a01SJohn Fastabend 
837cac320c8SJesper Dangaard Brouer 	if (unlikely(xdpf->headroom < vi->hdr_len))
838cac320c8SJesper Dangaard Brouer 		return -EOVERFLOW;
839cac320c8SJesper Dangaard Brouer 
84097717e8dSHeng Qi 	if (unlikely(xdp_frame_has_frags(xdpf))) {
84197717e8dSHeng Qi 		shinfo = xdp_get_shared_info_from_frame(xdpf);
84297717e8dSHeng Qi 		nr_frags = shinfo->nr_frags;
84397717e8dSHeng Qi 	}
84497717e8dSHeng Qi 
84597717e8dSHeng Qi 	/* In wrapping function virtnet_xdp_xmit(), we need to free
84697717e8dSHeng Qi 	 * up the pending old buffers, where we need to calculate the
84797717e8dSHeng Qi 	 * position of skb_shared_info in xdp_get_frame_len() and
84897717e8dSHeng Qi 	 * xdp_return_frame(), which will involve to xdpf->data and
84997717e8dSHeng Qi 	 * xdpf->headroom. Therefore, we need to update the value of
85097717e8dSHeng Qi 	 * headroom synchronously here.
85197717e8dSHeng Qi 	 */
85297717e8dSHeng Qi 	xdpf->headroom -= vi->hdr_len;
853cac320c8SJesper Dangaard Brouer 	xdpf->data -= vi->hdr_len;
85456434a01SJohn Fastabend 	/* Zero header and leave csum up to XDP layers */
855cac320c8SJesper Dangaard Brouer 	hdr = xdpf->data;
85656434a01SJohn Fastabend 	memset(hdr, 0, vi->hdr_len);
857cac320c8SJesper Dangaard Brouer 	xdpf->len   += vi->hdr_len;
85856434a01SJohn Fastabend 
85997717e8dSHeng Qi 	sg_init_table(sq->sg, nr_frags + 1);
86097717e8dSHeng Qi 	sg_set_buf(sq->sg, xdpf->data, xdpf->len);
86197717e8dSHeng Qi 	for (i = 0; i < nr_frags; i++) {
86297717e8dSHeng Qi 		skb_frag_t *frag = &shinfo->frags[i];
863bb91accfSJason Wang 
86497717e8dSHeng Qi 		sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
86597717e8dSHeng Qi 			    skb_frag_size(frag), skb_frag_off(frag));
86697717e8dSHeng Qi 	}
86797717e8dSHeng Qi 
86897717e8dSHeng Qi 	err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
86997717e8dSHeng Qi 				   xdp_to_ptr(xdpf), GFP_ATOMIC);
87011b7d897SJesper Dangaard Brouer 	if (unlikely(err))
871cac320c8SJesper Dangaard Brouer 		return -ENOSPC; /* Caller handle free/refcnt */
87256434a01SJohn Fastabend 
873cac320c8SJesper Dangaard Brouer 	return 0;
87456434a01SJohn Fastabend }
87556434a01SJohn Fastabend 
87697c2c69eSXuan Zhuo /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
87797c2c69eSXuan Zhuo  * the current cpu, so it does not need to be locked.
87897c2c69eSXuan Zhuo  *
87997c2c69eSXuan Zhuo  * Here we use marco instead of inline functions because we have to deal with
88097c2c69eSXuan Zhuo  * three issues at the same time: 1. the choice of sq. 2. judge and execute the
88197c2c69eSXuan Zhuo  * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
88297c2c69eSXuan Zhuo  * functions to perfectly solve these three problems at the same time.
88397c2c69eSXuan Zhuo  */
88497c2c69eSXuan Zhuo #define virtnet_xdp_get_sq(vi) ({                                       \
8853dcc1edcSLi RongQing 	int cpu = smp_processor_id();                                   \
88697c2c69eSXuan Zhuo 	struct netdev_queue *txq;                                       \
88797c2c69eSXuan Zhuo 	typeof(vi) v = (vi);                                            \
88897c2c69eSXuan Zhuo 	unsigned int qp;                                                \
88997c2c69eSXuan Zhuo 									\
89097c2c69eSXuan Zhuo 	if (v->curr_queue_pairs > nr_cpu_ids) {                         \
89197c2c69eSXuan Zhuo 		qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
8923dcc1edcSLi RongQing 		qp += cpu;                                              \
89397c2c69eSXuan Zhuo 		txq = netdev_get_tx_queue(v->dev, qp);                  \
89497c2c69eSXuan Zhuo 		__netif_tx_acquire(txq);                                \
89597c2c69eSXuan Zhuo 	} else {                                                        \
8963dcc1edcSLi RongQing 		qp = cpu % v->curr_queue_pairs;                         \
89797c2c69eSXuan Zhuo 		txq = netdev_get_tx_queue(v->dev, qp);                  \
8983dcc1edcSLi RongQing 		__netif_tx_lock(txq, cpu);                              \
89997c2c69eSXuan Zhuo 	}                                                               \
90097c2c69eSXuan Zhuo 	v->sq + qp;                                                     \
90197c2c69eSXuan Zhuo })
9022a43565cSToshiaki Makita 
90397c2c69eSXuan Zhuo #define virtnet_xdp_put_sq(vi, q) {                                     \
90497c2c69eSXuan Zhuo 	struct netdev_queue *txq;                                       \
90597c2c69eSXuan Zhuo 	typeof(vi) v = (vi);                                            \
90697c2c69eSXuan Zhuo 									\
90797c2c69eSXuan Zhuo 	txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
90897c2c69eSXuan Zhuo 	if (v->curr_queue_pairs > nr_cpu_ids)                           \
90997c2c69eSXuan Zhuo 		__netif_tx_release(txq);                                \
91097c2c69eSXuan Zhuo 	else                                                            \
91197c2c69eSXuan Zhuo 		__netif_tx_unlock(txq);                                 \
9122a43565cSToshiaki Makita }
9132a43565cSToshiaki Makita 
914735fc405SJesper Dangaard Brouer static int virtnet_xdp_xmit(struct net_device *dev,
91542b33468SJesper Dangaard Brouer 			    int n, struct xdp_frame **frames, u32 flags)
916186b3c99SJason Wang {
917186b3c99SJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
9188dcc5b0aSJesper Dangaard Brouer 	struct receive_queue *rq = vi->rq;
9198dcc5b0aSJesper Dangaard Brouer 	struct bpf_prog *xdp_prog;
920735fc405SJesper Dangaard Brouer 	struct send_queue *sq;
921735fc405SJesper Dangaard Brouer 	unsigned int len;
922546f2897SToshiaki Makita 	int packets = 0;
923546f2897SToshiaki Makita 	int bytes = 0;
924fdc13979SLorenzo Bianconi 	int nxmit = 0;
925461f03dcSToshiaki Makita 	int kicks = 0;
9265050471dSToshiaki Makita 	void *ptr;
927fdc13979SLorenzo Bianconi 	int ret;
928735fc405SJesper Dangaard Brouer 	int i;
929735fc405SJesper Dangaard Brouer 
9308dcc5b0aSJesper Dangaard Brouer 	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
9318dcc5b0aSJesper Dangaard Brouer 	 * indicate XDP resources have been successfully allocated.
9328dcc5b0aSJesper Dangaard Brouer 	 */
9339719c6b9SJohn Fastabend 	xdp_prog = rcu_access_pointer(rq->xdp_prog);
9341667c08aSToshiaki Makita 	if (!xdp_prog)
9351667c08aSToshiaki Makita 		return -ENXIO;
9361667c08aSToshiaki Makita 
93797c2c69eSXuan Zhuo 	sq = virtnet_xdp_get_sq(vi);
9389ab86bbcSShirley Ma 
9399ab86bbcSShirley Ma 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
940186b3c99SJason Wang 		ret = -EINVAL;
941186b3c99SJason Wang 		goto out;
942186b3c99SJason Wang 	}
943186b3c99SJason Wang 
944735fc405SJesper Dangaard Brouer 	/* Free up any pending old buffers before queueing new ones. */
9455050471dSToshiaki Makita 	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
946546f2897SToshiaki Makita 		if (likely(is_xdp_frame(ptr))) {
947546f2897SToshiaki Makita 			struct xdp_frame *frame = ptr_to_xdp(ptr);
948546f2897SToshiaki Makita 
94950bd14bcSHeng Qi 			bytes += xdp_get_frame_len(frame);
950546f2897SToshiaki Makita 			xdp_return_frame(frame);
951546f2897SToshiaki Makita 		} else {
952546f2897SToshiaki Makita 			struct sk_buff *skb = ptr;
953546f2897SToshiaki Makita 
954546f2897SToshiaki Makita 			bytes += skb->len;
955546f2897SToshiaki Makita 			napi_consume_skb(skb, false);
956546f2897SToshiaki Makita 		}
957546f2897SToshiaki Makita 		packets++;
9585050471dSToshiaki Makita 	}
959735fc405SJesper Dangaard Brouer 
960735fc405SJesper Dangaard Brouer 	for (i = 0; i < n; i++) {
961735fc405SJesper Dangaard Brouer 		struct xdp_frame *xdpf = frames[i];
962735fc405SJesper Dangaard Brouer 
963fdc13979SLorenzo Bianconi 		if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
964fdc13979SLorenzo Bianconi 			break;
965fdc13979SLorenzo Bianconi 		nxmit++;
966735fc405SJesper Dangaard Brouer 	}
967fdc13979SLorenzo Bianconi 	ret = nxmit;
9685d274cb4SJesper Dangaard Brouer 
969cd1c604aSXuan Zhuo 	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
970cd1c604aSXuan Zhuo 		check_sq_full_and_disable(vi, dev, sq);
971cd1c604aSXuan Zhuo 
972461f03dcSToshiaki Makita 	if (flags & XDP_XMIT_FLUSH) {
973461f03dcSToshiaki Makita 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
974461f03dcSToshiaki Makita 			kicks = 1;
975461f03dcSToshiaki Makita 	}
9765b8f3c8dSToshiaki Makita out:
9775b8f3c8dSToshiaki Makita 	u64_stats_update_begin(&sq->stats.syncp);
978546f2897SToshiaki Makita 	sq->stats.bytes += bytes;
979546f2897SToshiaki Makita 	sq->stats.packets += packets;
9805b8f3c8dSToshiaki Makita 	sq->stats.xdp_tx += n;
981fdc13979SLorenzo Bianconi 	sq->stats.xdp_tx_drops += n - nxmit;
982461f03dcSToshiaki Makita 	sq->stats.kicks += kicks;
9835b8f3c8dSToshiaki Makita 	u64_stats_update_end(&sq->stats.syncp);
9845d274cb4SJesper Dangaard Brouer 
98597c2c69eSXuan Zhuo 	virtnet_xdp_put_sq(vi, sq);
9865b8f3c8dSToshiaki Makita 	return ret;
987186b3c99SJason Wang }
988186b3c99SJason Wang 
989bb2c1e9eSXuan Zhuo static void put_xdp_frags(struct xdp_buff *xdp)
990bb2c1e9eSXuan Zhuo {
991bb2c1e9eSXuan Zhuo 	struct skb_shared_info *shinfo;
992bb2c1e9eSXuan Zhuo 	struct page *xdp_page;
993bb2c1e9eSXuan Zhuo 	int i;
994bb2c1e9eSXuan Zhuo 
995bb2c1e9eSXuan Zhuo 	if (xdp_buff_has_frags(xdp)) {
996bb2c1e9eSXuan Zhuo 		shinfo = xdp_get_shared_info_from_buff(xdp);
997bb2c1e9eSXuan Zhuo 		for (i = 0; i < shinfo->nr_frags; i++) {
998bb2c1e9eSXuan Zhuo 			xdp_page = skb_frag_page(&shinfo->frags[i]);
999bb2c1e9eSXuan Zhuo 			put_page(xdp_page);
1000bb2c1e9eSXuan Zhuo 		}
1001bb2c1e9eSXuan Zhuo 	}
1002bb2c1e9eSXuan Zhuo }
1003bb2c1e9eSXuan Zhuo 
100400765f8eSXuan Zhuo static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
100500765f8eSXuan Zhuo 			       struct net_device *dev,
100600765f8eSXuan Zhuo 			       unsigned int *xdp_xmit,
100700765f8eSXuan Zhuo 			       struct virtnet_rq_stats *stats)
100800765f8eSXuan Zhuo {
100900765f8eSXuan Zhuo 	struct xdp_frame *xdpf;
101000765f8eSXuan Zhuo 	int err;
101100765f8eSXuan Zhuo 	u32 act;
101200765f8eSXuan Zhuo 
101300765f8eSXuan Zhuo 	act = bpf_prog_run_xdp(xdp_prog, xdp);
101400765f8eSXuan Zhuo 	stats->xdp_packets++;
101500765f8eSXuan Zhuo 
101600765f8eSXuan Zhuo 	switch (act) {
101700765f8eSXuan Zhuo 	case XDP_PASS:
101800765f8eSXuan Zhuo 		return act;
101900765f8eSXuan Zhuo 
102000765f8eSXuan Zhuo 	case XDP_TX:
102100765f8eSXuan Zhuo 		stats->xdp_tx++;
102200765f8eSXuan Zhuo 		xdpf = xdp_convert_buff_to_frame(xdp);
102300765f8eSXuan Zhuo 		if (unlikely(!xdpf)) {
102400765f8eSXuan Zhuo 			netdev_dbg(dev, "convert buff to frame failed for xdp\n");
102500765f8eSXuan Zhuo 			return XDP_DROP;
102600765f8eSXuan Zhuo 		}
102700765f8eSXuan Zhuo 
102800765f8eSXuan Zhuo 		err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
102900765f8eSXuan Zhuo 		if (unlikely(!err)) {
103000765f8eSXuan Zhuo 			xdp_return_frame_rx_napi(xdpf);
103100765f8eSXuan Zhuo 		} else if (unlikely(err < 0)) {
103200765f8eSXuan Zhuo 			trace_xdp_exception(dev, xdp_prog, act);
103300765f8eSXuan Zhuo 			return XDP_DROP;
103400765f8eSXuan Zhuo 		}
103500765f8eSXuan Zhuo 		*xdp_xmit |= VIRTIO_XDP_TX;
103600765f8eSXuan Zhuo 		return act;
103700765f8eSXuan Zhuo 
103800765f8eSXuan Zhuo 	case XDP_REDIRECT:
103900765f8eSXuan Zhuo 		stats->xdp_redirects++;
104000765f8eSXuan Zhuo 		err = xdp_do_redirect(dev, xdp, xdp_prog);
104100765f8eSXuan Zhuo 		if (err)
104200765f8eSXuan Zhuo 			return XDP_DROP;
104300765f8eSXuan Zhuo 
104400765f8eSXuan Zhuo 		*xdp_xmit |= VIRTIO_XDP_REDIR;
104500765f8eSXuan Zhuo 		return act;
104600765f8eSXuan Zhuo 
104700765f8eSXuan Zhuo 	default:
104800765f8eSXuan Zhuo 		bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
104900765f8eSXuan Zhuo 		fallthrough;
105000765f8eSXuan Zhuo 	case XDP_ABORTED:
105100765f8eSXuan Zhuo 		trace_xdp_exception(dev, xdp_prog, act);
105200765f8eSXuan Zhuo 		fallthrough;
105300765f8eSXuan Zhuo 	case XDP_DROP:
105400765f8eSXuan Zhuo 		return XDP_DROP;
105500765f8eSXuan Zhuo 	}
105600765f8eSXuan Zhuo }
105700765f8eSXuan Zhuo 
1058f6b10209SJason Wang static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1059f6b10209SJason Wang {
106097c2c69eSXuan Zhuo 	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1061f6b10209SJason Wang }
1062f6b10209SJason Wang 
10634941d472SJason Wang /* We copy the packet for XDP in the following cases:
10644941d472SJason Wang  *
10654941d472SJason Wang  * 1) Packet is scattered across multiple rx buffers.
10664941d472SJason Wang  * 2) Headroom space is insufficient.
10674941d472SJason Wang  *
10684941d472SJason Wang  * This is inefficient but it's a temporary condition that
10694941d472SJason Wang  * we hit right after XDP is enabled and until queue is refilled
10704941d472SJason Wang  * with large buffers with sufficient headroom - so it should affect
10714941d472SJason Wang  * at most queue size packets.
10724941d472SJason Wang  * Afterwards, the conditions to enable
10734941d472SJason Wang  * XDP should preclude the underlying device from sending packets
10744941d472SJason Wang  * across multiple buffers (num_buf > 1), and we make sure buffers
10754941d472SJason Wang  * have enough headroom.
107672979a6cSJohn Fastabend  */
107772979a6cSJohn Fastabend static struct page *xdp_linearize_page(struct receive_queue *rq,
1078981f14d4SHeng Qi 				       int *num_buf,
107972979a6cSJohn Fastabend 				       struct page *p,
108072979a6cSJohn Fastabend 				       int offset,
10814941d472SJason Wang 				       int page_off,
108272979a6cSJohn Fastabend 				       unsigned int *len)
108372979a6cSJohn Fastabend {
1084853618d5SXuan Zhuo 	int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1085853618d5SXuan Zhuo 	struct page *page;
108672979a6cSJohn Fastabend 
1087853618d5SXuan Zhuo 	if (page_off + *len + tailroom > PAGE_SIZE)
1088853618d5SXuan Zhuo 		return NULL;
1089853618d5SXuan Zhuo 
1090853618d5SXuan Zhuo 	page = alloc_page(GFP_ATOMIC);
109172979a6cSJohn Fastabend 	if (!page)
109272979a6cSJohn Fastabend 		return NULL;
109372979a6cSJohn Fastabend 
109472979a6cSJohn Fastabend 	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
109572979a6cSJohn Fastabend 	page_off += *len;
109672979a6cSJohn Fastabend 
109756a86f84SJason Wang 	while (--*num_buf) {
109872979a6cSJohn Fastabend 		unsigned int buflen;
109972979a6cSJohn Fastabend 		void *buf;
110072979a6cSJohn Fastabend 		int off;
110172979a6cSJohn Fastabend 
1102295525e2SXuan Zhuo 		buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1103680557cfSMichael S. Tsirkin 		if (unlikely(!buf))
110472979a6cSJohn Fastabend 			goto err_buf;
110572979a6cSJohn Fastabend 
110672979a6cSJohn Fastabend 		p = virt_to_head_page(buf);
110772979a6cSJohn Fastabend 		off = buf - page_address(p);
110872979a6cSJohn Fastabend 
110956a86f84SJason Wang 		/* guard against a misconfigured or uncooperative backend that
111056a86f84SJason Wang 		 * is sending packet larger than the MTU.
111156a86f84SJason Wang 		 */
11123cc81a9aSJason Wang 		if ((page_off + buflen + tailroom) > PAGE_SIZE) {
111356a86f84SJason Wang 			put_page(p);
111456a86f84SJason Wang 			goto err_buf;
111556a86f84SJason Wang 		}
111656a86f84SJason Wang 
111772979a6cSJohn Fastabend 		memcpy(page_address(page) + page_off,
111872979a6cSJohn Fastabend 		       page_address(p) + off, buflen);
111972979a6cSJohn Fastabend 		page_off += buflen;
112056a86f84SJason Wang 		put_page(p);
112172979a6cSJohn Fastabend 	}
112272979a6cSJohn Fastabend 
11232de2f7f4SJohn Fastabend 	/* Headroom does not contribute to packet length */
11242de2f7f4SJohn Fastabend 	*len = page_off - VIRTIO_XDP_HEADROOM;
112572979a6cSJohn Fastabend 	return page;
112672979a6cSJohn Fastabend err_buf:
112772979a6cSJohn Fastabend 	__free_pages(page, 0);
112872979a6cSJohn Fastabend 	return NULL;
112972979a6cSJohn Fastabend }
113072979a6cSJohn Fastabend 
113119e8c85eSXuan Zhuo static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
113219e8c85eSXuan Zhuo 					       unsigned int xdp_headroom,
113319e8c85eSXuan Zhuo 					       void *buf,
113419e8c85eSXuan Zhuo 					       unsigned int len)
113519e8c85eSXuan Zhuo {
113619e8c85eSXuan Zhuo 	unsigned int header_offset;
113719e8c85eSXuan Zhuo 	unsigned int headroom;
113819e8c85eSXuan Zhuo 	unsigned int buflen;
113919e8c85eSXuan Zhuo 	struct sk_buff *skb;
114019e8c85eSXuan Zhuo 
114119e8c85eSXuan Zhuo 	header_offset = VIRTNET_RX_PAD + xdp_headroom;
114219e8c85eSXuan Zhuo 	headroom = vi->hdr_len + header_offset;
114319e8c85eSXuan Zhuo 	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
114419e8c85eSXuan Zhuo 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
114519e8c85eSXuan Zhuo 
114621e26a71SXuan Zhuo 	skb = virtnet_build_skb(buf, buflen, headroom, len);
114721e26a71SXuan Zhuo 	if (unlikely(!skb))
114819e8c85eSXuan Zhuo 		return NULL;
114919e8c85eSXuan Zhuo 
115019e8c85eSXuan Zhuo 	buf += header_offset;
1151dae64749SFeng Liu 	memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
115219e8c85eSXuan Zhuo 
115319e8c85eSXuan Zhuo 	return skb;
115419e8c85eSXuan Zhuo }
115519e8c85eSXuan Zhuo 
1156c5f3e72fSXuan Zhuo static struct sk_buff *receive_small_xdp(struct net_device *dev,
11574941d472SJason Wang 					 struct virtnet_info *vi,
11584941d472SJason Wang 					 struct receive_queue *rq,
1159c5f3e72fSXuan Zhuo 					 struct bpf_prog *xdp_prog,
1160c5f3e72fSXuan Zhuo 					 void *buf,
1161c5f3e72fSXuan Zhuo 					 unsigned int xdp_headroom,
1162186b3c99SJason Wang 					 unsigned int len,
11637d9d60fdSToshiaki Makita 					 unsigned int *xdp_xmit,
1164d46eeeafSJason Wang 					 struct virtnet_rq_stats *stats)
11654941d472SJason Wang {
11664941d472SJason Wang 	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
11674941d472SJason Wang 	unsigned int headroom = vi->hdr_len + header_offset;
11684941d472SJason Wang 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1169c5f3e72fSXuan Zhuo 	struct page *page = virt_to_head_page(buf);
1170c5f3e72fSXuan Zhuo 	struct page *xdp_page;
1171c5f3e72fSXuan Zhuo 	unsigned int buflen;
11724941d472SJason Wang 	struct xdp_buff xdp;
1173c5f3e72fSXuan Zhuo 	struct sk_buff *skb;
1174c5f3e72fSXuan Zhuo 	unsigned int metasize = 0;
11754941d472SJason Wang 	u32 act;
11764941d472SJason Wang 
117795dbe9e7SJesper Dangaard Brouer 	if (unlikely(hdr->hdr.gso_type))
11784941d472SJason Wang 		goto err_xdp;
11794941d472SJason Wang 
1180c5f3e72fSXuan Zhuo 	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1181c5f3e72fSXuan Zhuo 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1182c5f3e72fSXuan Zhuo 
11834941d472SJason Wang 	if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
11844941d472SJason Wang 		int offset = buf - page_address(page) + header_offset;
11854941d472SJason Wang 		unsigned int tlen = len + vi->hdr_len;
1186981f14d4SHeng Qi 		int num_buf = 1;
11874941d472SJason Wang 
11884941d472SJason Wang 		xdp_headroom = virtnet_get_headroom(vi);
11894941d472SJason Wang 		header_offset = VIRTNET_RX_PAD + xdp_headroom;
11904941d472SJason Wang 		headroom = vi->hdr_len + header_offset;
11914941d472SJason Wang 		buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
11924941d472SJason Wang 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
11934941d472SJason Wang 		xdp_page = xdp_linearize_page(rq, &num_buf, page,
11944941d472SJason Wang 					      offset, header_offset,
11954941d472SJason Wang 					      &tlen);
11964941d472SJason Wang 		if (!xdp_page)
11974941d472SJason Wang 			goto err_xdp;
11984941d472SJason Wang 
11994941d472SJason Wang 		buf = page_address(xdp_page);
12004941d472SJason Wang 		put_page(page);
12014941d472SJason Wang 		page = xdp_page;
12024941d472SJason Wang 	}
12034941d472SJason Wang 
120443b5169dSLorenzo Bianconi 	xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1205be9df4afSLorenzo Bianconi 	xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1206be9df4afSLorenzo Bianconi 			 xdp_headroom, len, true);
120700765f8eSXuan Zhuo 
120800765f8eSXuan Zhuo 	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
12094941d472SJason Wang 
12104941d472SJason Wang 	switch (act) {
12114941d472SJason Wang 	case XDP_PASS:
12124941d472SJason Wang 		/* Recalculate length in case bpf program changed it */
12136870de43SNikita V. Shirokov 		len = xdp.data_end - xdp.data;
1214503d539aSYuya Kusakabe 		metasize = xdp.data - xdp.data_meta;
12154941d472SJason Wang 		break;
1216c5f3e72fSXuan Zhuo 
12174941d472SJason Wang 	case XDP_TX:
1218186b3c99SJason Wang 	case XDP_REDIRECT:
12194941d472SJason Wang 		goto xdp_xmit;
1220c5f3e72fSXuan Zhuo 
12214941d472SJason Wang 	default:
12224941d472SJason Wang 		goto err_xdp;
12234941d472SJason Wang 	}
1224c5f3e72fSXuan Zhuo 
122521e26a71SXuan Zhuo 	skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
122621e26a71SXuan Zhuo 	if (unlikely(!skb))
1227c5f3e72fSXuan Zhuo 		goto err;
1228c5f3e72fSXuan Zhuo 
1229c5f3e72fSXuan Zhuo 	if (metasize)
1230c5f3e72fSXuan Zhuo 		skb_metadata_set(skb, metasize);
1231c5f3e72fSXuan Zhuo 
1232c5f3e72fSXuan Zhuo 	return skb;
1233c5f3e72fSXuan Zhuo 
1234c5f3e72fSXuan Zhuo err_xdp:
1235c5f3e72fSXuan Zhuo 	stats->xdp_drops++;
1236c5f3e72fSXuan Zhuo err:
1237c5f3e72fSXuan Zhuo 	stats->drops++;
1238c5f3e72fSXuan Zhuo 	put_page(page);
1239c5f3e72fSXuan Zhuo xdp_xmit:
1240c5f3e72fSXuan Zhuo 	return NULL;
1241c5f3e72fSXuan Zhuo }
1242c5f3e72fSXuan Zhuo 
1243c5f3e72fSXuan Zhuo static struct sk_buff *receive_small(struct net_device *dev,
1244c5f3e72fSXuan Zhuo 				     struct virtnet_info *vi,
1245c5f3e72fSXuan Zhuo 				     struct receive_queue *rq,
1246c5f3e72fSXuan Zhuo 				     void *buf, void *ctx,
1247c5f3e72fSXuan Zhuo 				     unsigned int len,
1248c5f3e72fSXuan Zhuo 				     unsigned int *xdp_xmit,
1249c5f3e72fSXuan Zhuo 				     struct virtnet_rq_stats *stats)
1250c5f3e72fSXuan Zhuo {
1251c5f3e72fSXuan Zhuo 	unsigned int xdp_headroom = (unsigned long)ctx;
1252c5f3e72fSXuan Zhuo 	struct page *page = virt_to_head_page(buf);
1253aef76506SXuan Zhuo 	struct sk_buff *skb;
1254c5f3e72fSXuan Zhuo 
1255c5f3e72fSXuan Zhuo 	len -= vi->hdr_len;
1256c5f3e72fSXuan Zhuo 	stats->bytes += len;
1257c5f3e72fSXuan Zhuo 
1258c5f3e72fSXuan Zhuo 	if (unlikely(len > GOOD_PACKET_LEN)) {
1259c5f3e72fSXuan Zhuo 		pr_debug("%s: rx error: len %u exceeds max size %d\n",
1260c5f3e72fSXuan Zhuo 			 dev->name, len, GOOD_PACKET_LEN);
1261c5f3e72fSXuan Zhuo 		dev->stats.rx_length_errors++;
1262c5f3e72fSXuan Zhuo 		goto err;
1263c5f3e72fSXuan Zhuo 	}
1264c5f3e72fSXuan Zhuo 
1265aef76506SXuan Zhuo 	if (unlikely(vi->xdp_enabled)) {
1266aef76506SXuan Zhuo 		struct bpf_prog *xdp_prog;
1267c5f3e72fSXuan Zhuo 
1268c5f3e72fSXuan Zhuo 		rcu_read_lock();
1269c5f3e72fSXuan Zhuo 		xdp_prog = rcu_dereference(rq->xdp_prog);
1270c5f3e72fSXuan Zhuo 		if (xdp_prog) {
1271aef76506SXuan Zhuo 			skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1272aef76506SXuan Zhuo 						xdp_headroom, len, xdp_xmit,
1273aef76506SXuan Zhuo 						stats);
1274c5f3e72fSXuan Zhuo 			rcu_read_unlock();
1275c5f3e72fSXuan Zhuo 			return skb;
12764941d472SJason Wang 		}
12774941d472SJason Wang 		rcu_read_unlock();
1278aef76506SXuan Zhuo 	}
12794941d472SJason Wang 
128019e8c85eSXuan Zhuo 	skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
128119e8c85eSXuan Zhuo 	if (likely(skb))
12824941d472SJason Wang 		return skb;
12834941d472SJason Wang 
1284053c9e18SWenliang Wang err:
1285d46eeeafSJason Wang 	stats->drops++;
12864941d472SJason Wang 	put_page(page);
12874941d472SJason Wang 	return NULL;
12884941d472SJason Wang }
12894941d472SJason Wang 
12904941d472SJason Wang static struct sk_buff *receive_big(struct net_device *dev,
12914941d472SJason Wang 				   struct virtnet_info *vi,
12924941d472SJason Wang 				   struct receive_queue *rq,
12934941d472SJason Wang 				   void *buf,
12947d9d60fdSToshiaki Makita 				   unsigned int len,
1295d46eeeafSJason Wang 				   struct virtnet_rq_stats *stats)
12964941d472SJason Wang {
12974941d472SJason Wang 	struct page *page = buf;
1298503d539aSYuya Kusakabe 	struct sk_buff *skb =
1299fa0f1ba7SXuan Zhuo 		page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
13004941d472SJason Wang 
1301d46eeeafSJason Wang 	stats->bytes += len - vi->hdr_len;
13024941d472SJason Wang 	if (unlikely(!skb))
13034941d472SJason Wang 		goto err;
13044941d472SJason Wang 
13054941d472SJason Wang 	return skb;
13064941d472SJason Wang 
13074941d472SJason Wang err:
1308d46eeeafSJason Wang 	stats->drops++;
13094941d472SJason Wang 	give_pages(rq, page);
13104941d472SJason Wang 	return NULL;
13114941d472SJason Wang }
13124941d472SJason Wang 
131380f50f91SXuan Zhuo static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
131480f50f91SXuan Zhuo 			       struct net_device *dev,
131580f50f91SXuan Zhuo 			       struct virtnet_rq_stats *stats)
131680f50f91SXuan Zhuo {
131780f50f91SXuan Zhuo 	struct page *page;
131880f50f91SXuan Zhuo 	void *buf;
131980f50f91SXuan Zhuo 	int len;
132080f50f91SXuan Zhuo 
132180f50f91SXuan Zhuo 	while (num_buf-- > 1) {
1322295525e2SXuan Zhuo 		buf = virtnet_rq_get_buf(rq, &len, NULL);
132380f50f91SXuan Zhuo 		if (unlikely(!buf)) {
132480f50f91SXuan Zhuo 			pr_debug("%s: rx error: %d buffers missing\n",
132580f50f91SXuan Zhuo 				 dev->name, num_buf);
132680f50f91SXuan Zhuo 			dev->stats.rx_length_errors++;
132780f50f91SXuan Zhuo 			break;
132880f50f91SXuan Zhuo 		}
132980f50f91SXuan Zhuo 		stats->bytes += len;
133080f50f91SXuan Zhuo 		page = virt_to_head_page(buf);
133180f50f91SXuan Zhuo 		put_page(page);
133280f50f91SXuan Zhuo 	}
133380f50f91SXuan Zhuo }
133480f50f91SXuan Zhuo 
1335b26aa481SHeng Qi /* Why not use xdp_build_skb_from_frame() ?
1336b26aa481SHeng Qi  * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1337b26aa481SHeng Qi  * virtio-net there are 2 points that do not match its requirements:
1338b26aa481SHeng Qi  *  1. The size of the prefilled buffer is not fixed before xdp is set.
1339b26aa481SHeng Qi  *  2. xdp_build_skb_from_frame() does more checks that we don't need,
1340b26aa481SHeng Qi  *     like eth_type_trans() (which virtio-net does in receive_buf()).
1341b26aa481SHeng Qi  */
1342b26aa481SHeng Qi static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1343b26aa481SHeng Qi 					       struct virtnet_info *vi,
1344b26aa481SHeng Qi 					       struct xdp_buff *xdp,
1345b26aa481SHeng Qi 					       unsigned int xdp_frags_truesz)
1346b26aa481SHeng Qi {
1347b26aa481SHeng Qi 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1348b26aa481SHeng Qi 	unsigned int headroom, data_len;
1349b26aa481SHeng Qi 	struct sk_buff *skb;
1350b26aa481SHeng Qi 	int metasize;
1351b26aa481SHeng Qi 	u8 nr_frags;
1352b26aa481SHeng Qi 
1353b26aa481SHeng Qi 	if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1354b26aa481SHeng Qi 		pr_debug("Error building skb as missing reserved tailroom for xdp");
1355b26aa481SHeng Qi 		return NULL;
1356b26aa481SHeng Qi 	}
1357b26aa481SHeng Qi 
1358b26aa481SHeng Qi 	if (unlikely(xdp_buff_has_frags(xdp)))
1359b26aa481SHeng Qi 		nr_frags = sinfo->nr_frags;
1360b26aa481SHeng Qi 
1361b26aa481SHeng Qi 	skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1362b26aa481SHeng Qi 	if (unlikely(!skb))
1363b26aa481SHeng Qi 		return NULL;
1364b26aa481SHeng Qi 
1365b26aa481SHeng Qi 	headroom = xdp->data - xdp->data_hard_start;
1366b26aa481SHeng Qi 	data_len = xdp->data_end - xdp->data;
1367b26aa481SHeng Qi 	skb_reserve(skb, headroom);
1368b26aa481SHeng Qi 	__skb_put(skb, data_len);
1369b26aa481SHeng Qi 
1370b26aa481SHeng Qi 	metasize = xdp->data - xdp->data_meta;
1371b26aa481SHeng Qi 	metasize = metasize > 0 ? metasize : 0;
1372b26aa481SHeng Qi 	if (metasize)
1373b26aa481SHeng Qi 		skb_metadata_set(skb, metasize);
1374b26aa481SHeng Qi 
1375b26aa481SHeng Qi 	if (unlikely(xdp_buff_has_frags(xdp)))
1376b26aa481SHeng Qi 		xdp_update_skb_shared_info(skb, nr_frags,
1377b26aa481SHeng Qi 					   sinfo->xdp_frags_size,
1378b26aa481SHeng Qi 					   xdp_frags_truesz,
1379b26aa481SHeng Qi 					   xdp_buff_is_frag_pfmemalloc(xdp));
1380b26aa481SHeng Qi 
1381b26aa481SHeng Qi 	return skb;
1382b26aa481SHeng Qi }
1383b26aa481SHeng Qi 
1384ef75cb51SHeng Qi /* TODO: build xdp in big mode */
1385ef75cb51SHeng Qi static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1386ef75cb51SHeng Qi 				      struct virtnet_info *vi,
1387ef75cb51SHeng Qi 				      struct receive_queue *rq,
1388ef75cb51SHeng Qi 				      struct xdp_buff *xdp,
1389ef75cb51SHeng Qi 				      void *buf,
1390ef75cb51SHeng Qi 				      unsigned int len,
1391ef75cb51SHeng Qi 				      unsigned int frame_sz,
1392981f14d4SHeng Qi 				      int *num_buf,
1393ef75cb51SHeng Qi 				      unsigned int *xdp_frags_truesize,
1394ef75cb51SHeng Qi 				      struct virtnet_rq_stats *stats)
1395ef75cb51SHeng Qi {
1396ef75cb51SHeng Qi 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1397ef75cb51SHeng Qi 	unsigned int headroom, tailroom, room;
1398ef75cb51SHeng Qi 	unsigned int truesize, cur_frag_size;
1399ef75cb51SHeng Qi 	struct skb_shared_info *shinfo;
1400ef75cb51SHeng Qi 	unsigned int xdp_frags_truesz = 0;
1401ef75cb51SHeng Qi 	struct page *page;
1402ef75cb51SHeng Qi 	skb_frag_t *frag;
1403ef75cb51SHeng Qi 	int offset;
1404ef75cb51SHeng Qi 	void *ctx;
1405ef75cb51SHeng Qi 
1406ef75cb51SHeng Qi 	xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1407ef75cb51SHeng Qi 	xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1408ef75cb51SHeng Qi 			 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1409ef75cb51SHeng Qi 
1410981f14d4SHeng Qi 	if (!*num_buf)
1411981f14d4SHeng Qi 		return 0;
1412981f14d4SHeng Qi 
1413ef75cb51SHeng Qi 	if (*num_buf > 1) {
1414ef75cb51SHeng Qi 		/* If we want to build multi-buffer xdp, we need
1415ef75cb51SHeng Qi 		 * to specify that the flags of xdp_buff have the
1416ef75cb51SHeng Qi 		 * XDP_FLAGS_HAS_FRAG bit.
1417ef75cb51SHeng Qi 		 */
1418ef75cb51SHeng Qi 		if (!xdp_buff_has_frags(xdp))
1419ef75cb51SHeng Qi 			xdp_buff_set_frags_flag(xdp);
1420ef75cb51SHeng Qi 
1421ef75cb51SHeng Qi 		shinfo = xdp_get_shared_info_from_buff(xdp);
1422ef75cb51SHeng Qi 		shinfo->nr_frags = 0;
1423ef75cb51SHeng Qi 		shinfo->xdp_frags_size = 0;
1424ef75cb51SHeng Qi 	}
1425ef75cb51SHeng Qi 
1426981f14d4SHeng Qi 	if (*num_buf > MAX_SKB_FRAGS + 1)
1427ef75cb51SHeng Qi 		return -EINVAL;
1428ef75cb51SHeng Qi 
1429981f14d4SHeng Qi 	while (--*num_buf > 0) {
1430295525e2SXuan Zhuo 		buf = virtnet_rq_get_buf(rq, &len, &ctx);
1431ef75cb51SHeng Qi 		if (unlikely(!buf)) {
1432ef75cb51SHeng Qi 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
1433ef75cb51SHeng Qi 				 dev->name, *num_buf,
1434ef75cb51SHeng Qi 				 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1435ef75cb51SHeng Qi 			dev->stats.rx_length_errors++;
14364cb00b13SXuan Zhuo 			goto err;
1437ef75cb51SHeng Qi 		}
1438ef75cb51SHeng Qi 
1439ef75cb51SHeng Qi 		stats->bytes += len;
1440ef75cb51SHeng Qi 		page = virt_to_head_page(buf);
1441ef75cb51SHeng Qi 		offset = buf - page_address(page);
1442ef75cb51SHeng Qi 
1443ef75cb51SHeng Qi 		truesize = mergeable_ctx_to_truesize(ctx);
1444ef75cb51SHeng Qi 		headroom = mergeable_ctx_to_headroom(ctx);
1445ef75cb51SHeng Qi 		tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1446ef75cb51SHeng Qi 		room = SKB_DATA_ALIGN(headroom + tailroom);
1447ef75cb51SHeng Qi 
1448ef75cb51SHeng Qi 		cur_frag_size = truesize;
1449ef75cb51SHeng Qi 		xdp_frags_truesz += cur_frag_size;
1450ef75cb51SHeng Qi 		if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1451ef75cb51SHeng Qi 			put_page(page);
1452ef75cb51SHeng Qi 			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1453ef75cb51SHeng Qi 				 dev->name, len, (unsigned long)(truesize - room));
1454ef75cb51SHeng Qi 			dev->stats.rx_length_errors++;
14554cb00b13SXuan Zhuo 			goto err;
1456ef75cb51SHeng Qi 		}
1457ef75cb51SHeng Qi 
1458ef75cb51SHeng Qi 		frag = &shinfo->frags[shinfo->nr_frags++];
1459b51f4113SYunsheng Lin 		skb_frag_fill_page_desc(frag, page, offset, len);
1460ef75cb51SHeng Qi 		if (page_is_pfmemalloc(page))
1461ef75cb51SHeng Qi 			xdp_buff_set_frag_pfmemalloc(xdp);
1462ef75cb51SHeng Qi 
1463ef75cb51SHeng Qi 		shinfo->xdp_frags_size += len;
1464ef75cb51SHeng Qi 	}
1465ef75cb51SHeng Qi 
1466ef75cb51SHeng Qi 	*xdp_frags_truesize = xdp_frags_truesz;
1467ef75cb51SHeng Qi 	return 0;
14684cb00b13SXuan Zhuo 
14694cb00b13SXuan Zhuo err:
14704cb00b13SXuan Zhuo 	put_xdp_frags(xdp);
14714cb00b13SXuan Zhuo 	return -EINVAL;
1472ef75cb51SHeng Qi }
1473ef75cb51SHeng Qi 
1474ad4858beSXuan Zhuo static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1475ad4858beSXuan Zhuo 				   struct receive_queue *rq,
1476ad4858beSXuan Zhuo 				   struct bpf_prog *xdp_prog,
1477ad4858beSXuan Zhuo 				   void *ctx,
1478ad4858beSXuan Zhuo 				   unsigned int *frame_sz,
1479ad4858beSXuan Zhuo 				   int *num_buf,
1480ad4858beSXuan Zhuo 				   struct page **page,
1481ad4858beSXuan Zhuo 				   int offset,
1482ad4858beSXuan Zhuo 				   unsigned int *len,
1483ad4858beSXuan Zhuo 				   struct virtio_net_hdr_mrg_rxbuf *hdr)
1484ad4858beSXuan Zhuo {
1485ad4858beSXuan Zhuo 	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1486ad4858beSXuan Zhuo 	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1487ad4858beSXuan Zhuo 	struct page *xdp_page;
1488ad4858beSXuan Zhuo 	unsigned int xdp_room;
1489ad4858beSXuan Zhuo 
1490ad4858beSXuan Zhuo 	/* Transient failure which in theory could occur if
1491ad4858beSXuan Zhuo 	 * in-flight packets from before XDP was enabled reach
1492ad4858beSXuan Zhuo 	 * the receive path after XDP is loaded.
1493ad4858beSXuan Zhuo 	 */
1494ad4858beSXuan Zhuo 	if (unlikely(hdr->hdr.gso_type))
1495ad4858beSXuan Zhuo 		return NULL;
1496ad4858beSXuan Zhuo 
1497ad4858beSXuan Zhuo 	/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1498ad4858beSXuan Zhuo 	 * with headroom may add hole in truesize, which
1499ad4858beSXuan Zhuo 	 * make their length exceed PAGE_SIZE. So we disabled the
1500ad4858beSXuan Zhuo 	 * hole mechanism for xdp. See add_recvbuf_mergeable().
1501ad4858beSXuan Zhuo 	 */
1502ad4858beSXuan Zhuo 	*frame_sz = truesize;
1503ad4858beSXuan Zhuo 
1504dbe4fec2SXuan Zhuo 	if (likely(headroom >= virtnet_get_headroom(vi) &&
1505dbe4fec2SXuan Zhuo 		   (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1506dbe4fec2SXuan Zhuo 		return page_address(*page) + offset;
1507dbe4fec2SXuan Zhuo 	}
1508dbe4fec2SXuan Zhuo 
1509ad4858beSXuan Zhuo 	/* This happens when headroom is not enough because
1510ad4858beSXuan Zhuo 	 * of the buffer was prefilled before XDP is set.
1511ad4858beSXuan Zhuo 	 * This should only happen for the first several packets.
1512ad4858beSXuan Zhuo 	 * In fact, vq reset can be used here to help us clean up
1513ad4858beSXuan Zhuo 	 * the prefilled buffers, but many existing devices do not
1514ad4858beSXuan Zhuo 	 * support it, and we don't want to bother users who are
1515ad4858beSXuan Zhuo 	 * using xdp normally.
1516ad4858beSXuan Zhuo 	 */
1517dbe4fec2SXuan Zhuo 	if (!xdp_prog->aux->xdp_has_frags) {
1518ad4858beSXuan Zhuo 		/* linearize data for XDP */
1519ad4858beSXuan Zhuo 		xdp_page = xdp_linearize_page(rq, num_buf,
1520ad4858beSXuan Zhuo 					      *page, offset,
1521ad4858beSXuan Zhuo 					      VIRTIO_XDP_HEADROOM,
1522ad4858beSXuan Zhuo 					      len);
1523ad4858beSXuan Zhuo 		if (!xdp_page)
1524ad4858beSXuan Zhuo 			return NULL;
1525dbe4fec2SXuan Zhuo 	} else {
1526ad4858beSXuan Zhuo 		xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1527ad4858beSXuan Zhuo 					  sizeof(struct skb_shared_info));
1528ad4858beSXuan Zhuo 		if (*len + xdp_room > PAGE_SIZE)
1529ad4858beSXuan Zhuo 			return NULL;
1530ad4858beSXuan Zhuo 
1531ad4858beSXuan Zhuo 		xdp_page = alloc_page(GFP_ATOMIC);
1532ad4858beSXuan Zhuo 		if (!xdp_page)
1533ad4858beSXuan Zhuo 			return NULL;
1534ad4858beSXuan Zhuo 
1535ad4858beSXuan Zhuo 		memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1536ad4858beSXuan Zhuo 		       page_address(*page) + offset, *len);
1537ad4858beSXuan Zhuo 	}
1538ad4858beSXuan Zhuo 
1539dbe4fec2SXuan Zhuo 	*frame_sz = PAGE_SIZE;
1540dbe4fec2SXuan Zhuo 
1541dbe4fec2SXuan Zhuo 	put_page(*page);
1542dbe4fec2SXuan Zhuo 
1543dbe4fec2SXuan Zhuo 	*page = xdp_page;
1544dbe4fec2SXuan Zhuo 
1545dbe4fec2SXuan Zhuo 	return page_address(*page) + VIRTIO_XDP_HEADROOM;
1546ad4858beSXuan Zhuo }
1547ad4858beSXuan Zhuo 
1548d8f2835aSXuan Zhuo static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1549d8f2835aSXuan Zhuo 					     struct virtnet_info *vi,
1550d8f2835aSXuan Zhuo 					     struct receive_queue *rq,
1551d8f2835aSXuan Zhuo 					     struct bpf_prog *xdp_prog,
1552d8f2835aSXuan Zhuo 					     void *buf,
1553d8f2835aSXuan Zhuo 					     void *ctx,
1554d8f2835aSXuan Zhuo 					     unsigned int len,
1555d8f2835aSXuan Zhuo 					     unsigned int *xdp_xmit,
1556d8f2835aSXuan Zhuo 					     struct virtnet_rq_stats *stats)
1557d8f2835aSXuan Zhuo {
1558d8f2835aSXuan Zhuo 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1559d8f2835aSXuan Zhuo 	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1560d8f2835aSXuan Zhuo 	struct page *page = virt_to_head_page(buf);
1561d8f2835aSXuan Zhuo 	int offset = buf - page_address(page);
1562d8f2835aSXuan Zhuo 	unsigned int xdp_frags_truesz = 0;
1563d8f2835aSXuan Zhuo 	struct sk_buff *head_skb;
1564d8f2835aSXuan Zhuo 	unsigned int frame_sz;
1565d8f2835aSXuan Zhuo 	struct xdp_buff xdp;
1566d8f2835aSXuan Zhuo 	void *data;
1567d8f2835aSXuan Zhuo 	u32 act;
1568d8f2835aSXuan Zhuo 	int err;
1569d8f2835aSXuan Zhuo 
1570d8f2835aSXuan Zhuo 	data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1571d8f2835aSXuan Zhuo 				     offset, &len, hdr);
1572d8f2835aSXuan Zhuo 	if (unlikely(!data))
1573d8f2835aSXuan Zhuo 		goto err_xdp;
1574d8f2835aSXuan Zhuo 
1575d8f2835aSXuan Zhuo 	err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1576d8f2835aSXuan Zhuo 					 &num_buf, &xdp_frags_truesz, stats);
1577d8f2835aSXuan Zhuo 	if (unlikely(err))
1578d8f2835aSXuan Zhuo 		goto err_xdp;
1579d8f2835aSXuan Zhuo 
1580d8f2835aSXuan Zhuo 	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1581d8f2835aSXuan Zhuo 
1582d8f2835aSXuan Zhuo 	switch (act) {
1583d8f2835aSXuan Zhuo 	case XDP_PASS:
1584d8f2835aSXuan Zhuo 		head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1585d8f2835aSXuan Zhuo 		if (unlikely(!head_skb))
1586d8f2835aSXuan Zhuo 			break;
1587d8f2835aSXuan Zhuo 		return head_skb;
1588d8f2835aSXuan Zhuo 
1589d8f2835aSXuan Zhuo 	case XDP_TX:
1590d8f2835aSXuan Zhuo 	case XDP_REDIRECT:
1591d8f2835aSXuan Zhuo 		return NULL;
1592d8f2835aSXuan Zhuo 
1593d8f2835aSXuan Zhuo 	default:
1594d8f2835aSXuan Zhuo 		break;
1595d8f2835aSXuan Zhuo 	}
1596d8f2835aSXuan Zhuo 
1597d8f2835aSXuan Zhuo 	put_xdp_frags(&xdp);
1598d8f2835aSXuan Zhuo 
1599d8f2835aSXuan Zhuo err_xdp:
1600d8f2835aSXuan Zhuo 	put_page(page);
1601d8f2835aSXuan Zhuo 	mergeable_buf_free(rq, num_buf, dev, stats);
1602d8f2835aSXuan Zhuo 
1603d8f2835aSXuan Zhuo 	stats->xdp_drops++;
1604d8f2835aSXuan Zhuo 	stats->drops++;
1605d8f2835aSXuan Zhuo 	return NULL;
1606d8f2835aSXuan Zhuo }
1607d8f2835aSXuan Zhuo 
16088fc3b9e9SMichael S. Tsirkin static struct sk_buff *receive_mergeable(struct net_device *dev,
1609fdd819b2SMichael S. Tsirkin 					 struct virtnet_info *vi,
16108fc3b9e9SMichael S. Tsirkin 					 struct receive_queue *rq,
1611680557cfSMichael S. Tsirkin 					 void *buf,
1612680557cfSMichael S. Tsirkin 					 void *ctx,
1613186b3c99SJason Wang 					 unsigned int len,
16147d9d60fdSToshiaki Makita 					 unsigned int *xdp_xmit,
1615d46eeeafSJason Wang 					 struct virtnet_rq_stats *stats)
16169ab86bbcSShirley Ma {
1617012873d0SMichael S. Tsirkin 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1618981f14d4SHeng Qi 	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
16198fc3b9e9SMichael S. Tsirkin 	struct page *page = virt_to_head_page(buf);
16208fc3b9e9SMichael S. Tsirkin 	int offset = buf - page_address(page);
1621f600b690SJohn Fastabend 	struct sk_buff *head_skb, *curr_skb;
16229ce6146eSJesper Dangaard Brouer 	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
16234941d472SJason Wang 	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1624ef75cb51SHeng Qi 	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1625ef75cb51SHeng Qi 	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1626ab7db917SMichael Dalton 
162756434a01SJohn Fastabend 	head_skb = NULL;
1628d46eeeafSJason Wang 	stats->bytes += len - vi->hdr_len;
162956434a01SJohn Fastabend 
1630ef75cb51SHeng Qi 	if (unlikely(len > truesize - room)) {
1631ad993a95SXie Yongji 		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1632ef75cb51SHeng Qi 			 dev->name, len, (unsigned long)(truesize - room));
1633ad993a95SXie Yongji 		dev->stats.rx_length_errors++;
1634ad993a95SXie Yongji 		goto err_skb;
1635ad993a95SXie Yongji 	}
16366213f07cSLi RongQing 
163759ba3b1aSXuan Zhuo 	if (unlikely(vi->xdp_enabled)) {
163859ba3b1aSXuan Zhuo 		struct bpf_prog *xdp_prog;
16396213f07cSLi RongQing 
1640f600b690SJohn Fastabend 		rcu_read_lock();
1641f600b690SJohn Fastabend 		xdp_prog = rcu_dereference(rq->xdp_prog);
1642f600b690SJohn Fastabend 		if (xdp_prog) {
1643d8f2835aSXuan Zhuo 			head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1644d8f2835aSXuan Zhuo 							 len, xdp_xmit, stats);
1645fab89bafSHeng Qi 			rcu_read_unlock();
16461830f893SJason Wang 			return head_skb;
164756434a01SJohn Fastabend 		}
1648f600b690SJohn Fastabend 		rcu_read_unlock();
164959ba3b1aSXuan Zhuo 	}
1650f600b690SJohn Fastabend 
1651fa0f1ba7SXuan Zhuo 	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1652f600b690SJohn Fastabend 	curr_skb = head_skb;
16539ab86bbcSShirley Ma 
16548fc3b9e9SMichael S. Tsirkin 	if (unlikely(!curr_skb))
16558fc3b9e9SMichael S. Tsirkin 		goto err_skb;
16569ab86bbcSShirley Ma 	while (--num_buf) {
16578fc3b9e9SMichael S. Tsirkin 		int num_skb_frags;
16588fc3b9e9SMichael S. Tsirkin 
1659295525e2SXuan Zhuo 		buf = virtnet_rq_get_buf(rq, &len, &ctx);
166003e9f8a0SYunjian Wang 		if (unlikely(!buf)) {
16618fc3b9e9SMichael S. Tsirkin 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
1662fdd819b2SMichael S. Tsirkin 				 dev->name, num_buf,
1663012873d0SMichael S. Tsirkin 				 virtio16_to_cpu(vi->vdev,
1664012873d0SMichael S. Tsirkin 						 hdr->num_buffers));
16658fc3b9e9SMichael S. Tsirkin 			dev->stats.rx_length_errors++;
16668fc3b9e9SMichael S. Tsirkin 			goto err_buf;
16673f2c31d9SMark McLoughlin 		}
16688fc3b9e9SMichael S. Tsirkin 
1669d46eeeafSJason Wang 		stats->bytes += len;
16708fc3b9e9SMichael S. Tsirkin 		page = virt_to_head_page(buf);
167128b39bc7SJason Wang 
167228b39bc7SJason Wang 		truesize = mergeable_ctx_to_truesize(ctx);
1673ef75cb51SHeng Qi 		headroom = mergeable_ctx_to_headroom(ctx);
1674ef75cb51SHeng Qi 		tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1675ef75cb51SHeng Qi 		room = SKB_DATA_ALIGN(headroom + tailroom);
1676ef75cb51SHeng Qi 		if (unlikely(len > truesize - room)) {
167756da5fd0SDan Carpenter 			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1678ef75cb51SHeng Qi 				 dev->name, len, (unsigned long)(truesize - room));
1679680557cfSMichael S. Tsirkin 			dev->stats.rx_length_errors++;
1680680557cfSMichael S. Tsirkin 			goto err_skb;
1681680557cfSMichael S. Tsirkin 		}
16828fc3b9e9SMichael S. Tsirkin 
16838fc3b9e9SMichael S. Tsirkin 		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
16842613af0eSMichael Dalton 		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
16852613af0eSMichael Dalton 			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
16868fc3b9e9SMichael S. Tsirkin 
16878fc3b9e9SMichael S. Tsirkin 			if (unlikely(!nskb))
16888fc3b9e9SMichael S. Tsirkin 				goto err_skb;
16892613af0eSMichael Dalton 			if (curr_skb == head_skb)
16902613af0eSMichael Dalton 				skb_shinfo(curr_skb)->frag_list = nskb;
16912613af0eSMichael Dalton 			else
16922613af0eSMichael Dalton 				curr_skb->next = nskb;
16932613af0eSMichael Dalton 			curr_skb = nskb;
16942613af0eSMichael Dalton 			head_skb->truesize += nskb->truesize;
16952613af0eSMichael Dalton 			num_skb_frags = 0;
16962613af0eSMichael Dalton 		}
16972613af0eSMichael Dalton 		if (curr_skb != head_skb) {
16982613af0eSMichael Dalton 			head_skb->data_len += len;
16992613af0eSMichael Dalton 			head_skb->len += len;
1700fb51879dSMichael Dalton 			head_skb->truesize += truesize;
17012613af0eSMichael Dalton 		}
17028fc3b9e9SMichael S. Tsirkin 		offset = buf - page_address(page);
1703ba275241SJason Wang 		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1704ba275241SJason Wang 			put_page(page);
1705ba275241SJason Wang 			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1706fb51879dSMichael Dalton 					     len, truesize);
1707ba275241SJason Wang 		} else {
17082613af0eSMichael Dalton 			skb_add_rx_frag(curr_skb, num_skb_frags, page,
1709fb51879dSMichael Dalton 					offset, len, truesize);
1710ba275241SJason Wang 		}
17118fc3b9e9SMichael S. Tsirkin 	}
17128fc3b9e9SMichael S. Tsirkin 
17135377d758SJohannes Berg 	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
17148fc3b9e9SMichael S. Tsirkin 	return head_skb;
17158fc3b9e9SMichael S. Tsirkin 
17168fc3b9e9SMichael S. Tsirkin err_skb:
17178fc3b9e9SMichael S. Tsirkin 	put_page(page);
171880f50f91SXuan Zhuo 	mergeable_buf_free(rq, num_buf, dev, stats);
171980f50f91SXuan Zhuo 
17208fc3b9e9SMichael S. Tsirkin err_buf:
1721d46eeeafSJason Wang 	stats->drops++;
17228fc3b9e9SMichael S. Tsirkin 	dev_kfree_skb(head_skb);
17238fc3b9e9SMichael S. Tsirkin 	return NULL;
17249ab86bbcSShirley Ma }
17259ab86bbcSShirley Ma 
172691f41f01SAndrew Melnychenko static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
172791f41f01SAndrew Melnychenko 				struct sk_buff *skb)
172891f41f01SAndrew Melnychenko {
172991f41f01SAndrew Melnychenko 	enum pkt_hash_types rss_hash_type;
173091f41f01SAndrew Melnychenko 
173191f41f01SAndrew Melnychenko 	if (!hdr_hash || !skb)
173291f41f01SAndrew Melnychenko 		return;
173391f41f01SAndrew Melnychenko 
173495bb6330SMichael S. Tsirkin 	switch (__le16_to_cpu(hdr_hash->hash_report)) {
173591f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_TCPv4:
173691f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_UDPv4:
173791f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_TCPv6:
173891f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_UDPv6:
173991f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
174091f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
174191f41f01SAndrew Melnychenko 		rss_hash_type = PKT_HASH_TYPE_L4;
174291f41f01SAndrew Melnychenko 		break;
174391f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_IPv4:
174491f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_IPv6:
174591f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_IPv6_EX:
174691f41f01SAndrew Melnychenko 		rss_hash_type = PKT_HASH_TYPE_L3;
174791f41f01SAndrew Melnychenko 		break;
174891f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_NONE:
174991f41f01SAndrew Melnychenko 	default:
175091f41f01SAndrew Melnychenko 		rss_hash_type = PKT_HASH_TYPE_NONE;
175191f41f01SAndrew Melnychenko 	}
175295bb6330SMichael S. Tsirkin 	skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
175391f41f01SAndrew Melnychenko }
175491f41f01SAndrew Melnychenko 
17557d9d60fdSToshiaki Makita static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
17562471c75eSJesper Dangaard Brouer 			void *buf, unsigned int len, void **ctx,
1757a0929a44SToshiaki Makita 			unsigned int *xdp_xmit,
1758d46eeeafSJason Wang 			struct virtnet_rq_stats *stats)
17599ab86bbcSShirley Ma {
1760e9d7417bSJason Wang 	struct net_device *dev = vi->dev;
17619ab86bbcSShirley Ma 	struct sk_buff *skb;
1762dae64749SFeng Liu 	struct virtio_net_common_hdr *hdr;
17639ab86bbcSShirley Ma 
1764bcff3162SMichael S. Tsirkin 	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
17659ab86bbcSShirley Ma 		pr_debug("%s: short packet %i\n", dev->name, len);
17669ab86bbcSShirley Ma 		dev->stats.rx_length_errors++;
1767eb1d929fSParav Pandit 		virtnet_rq_free_unused_buf(rq->vq, buf);
17687d9d60fdSToshiaki Makita 		return;
17699ab86bbcSShirley Ma 	}
17709ab86bbcSShirley Ma 
1771f121159dSMichael S. Tsirkin 	if (vi->mergeable_rx_bufs)
17727d9d60fdSToshiaki Makita 		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1773a0929a44SToshiaki Makita 					stats);
1774f121159dSMichael S. Tsirkin 	else if (vi->big_packets)
1775a0929a44SToshiaki Makita 		skb = receive_big(dev, vi, rq, buf, len, stats);
1776f121159dSMichael S. Tsirkin 	else
1777a0929a44SToshiaki Makita 		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1778f121159dSMichael S. Tsirkin 
17798fc3b9e9SMichael S. Tsirkin 	if (unlikely(!skb))
17807d9d60fdSToshiaki Makita 		return;
17813f2c31d9SMark McLoughlin 
1782dae64749SFeng Liu 	hdr = skb_vnet_common_hdr(skb);
178391f41f01SAndrew Melnychenko 	if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1784dae64749SFeng Liu 		virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
17853fa2a1dfSstephen hemminger 
1786e858fae2SMike Rapoport 	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
178710a8d94aSJason Wang 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1788296f96fcSRusty Russell 
1789e858fae2SMike Rapoport 	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1790e858fae2SMike Rapoport 				  virtio_is_little_endian(vi->vdev))) {
1791e858fae2SMike Rapoport 		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1792e858fae2SMike Rapoport 				     dev->name, hdr->hdr.gso_type,
1793fdd819b2SMichael S. Tsirkin 				     hdr->hdr.gso_size);
1794296f96fcSRusty Russell 		goto frame_err;
1795296f96fcSRusty Russell 	}
1796296f96fcSRusty Russell 
1797133bbb18SWillem de Bruijn 	skb_record_rx_queue(skb, vq2rxq(rq->vq));
1798d1dc06dcSMike Rapoport 	skb->protocol = eth_type_trans(skb, dev);
1799d1dc06dcSMike Rapoport 	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1800d1dc06dcSMike Rapoport 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
1801d1dc06dcSMike Rapoport 
18020fbd050aSEric Dumazet 	napi_gro_receive(&rq->napi, skb);
18037d9d60fdSToshiaki Makita 	return;
1804296f96fcSRusty Russell 
1805296f96fcSRusty Russell frame_err:
1806296f96fcSRusty Russell 	dev->stats.rx_frame_errors++;
1807296f96fcSRusty Russell 	dev_kfree_skb(skb);
1808296f96fcSRusty Russell }
1809296f96fcSRusty Russell 
1810192f68cfSJason Wang /* Unlike mergeable buffers, all buffers are allocated to the
1811192f68cfSJason Wang  * same size, except for the headroom. For this reason we do
1812192f68cfSJason Wang  * not need to use  mergeable_len_to_ctx here - it is enough
1813192f68cfSJason Wang  * to store the headroom as the context ignoring the truesize.
1814192f68cfSJason Wang  */
1815946fa564SMichael S. Tsirkin static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1816946fa564SMichael S. Tsirkin 			     gfp_t gfp)
1817296f96fcSRusty Russell {
1818f6b10209SJason Wang 	char *buf;
18192de2f7f4SJohn Fastabend 	unsigned int xdp_headroom = virtnet_get_headroom(vi);
1820192f68cfSJason Wang 	void *ctx = (void *)(unsigned long)xdp_headroom;
1821f6b10209SJason Wang 	int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
18229ab86bbcSShirley Ma 	int err;
18233f2c31d9SMark McLoughlin 
1824f6b10209SJason Wang 	len = SKB_DATA_ALIGN(len) +
1825f6b10209SJason Wang 	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1826295525e2SXuan Zhuo 
1827295525e2SXuan Zhuo 	buf = virtnet_rq_alloc(rq, len, gfp);
1828295525e2SXuan Zhuo 	if (unlikely(!buf))
18299ab86bbcSShirley Ma 		return -ENOMEM;
1830296f96fcSRusty Russell 
1831295525e2SXuan Zhuo 	virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
1832f6b10209SJason Wang 			       vi->hdr_len + GOOD_PACKET_LEN);
1833295525e2SXuan Zhuo 
1834192f68cfSJason Wang 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1835295525e2SXuan Zhuo 	if (err < 0) {
1836295525e2SXuan Zhuo 		if (rq->do_dma)
1837295525e2SXuan Zhuo 			virtnet_rq_unmap(rq, buf, 0);
1838f6b10209SJason Wang 		put_page(virt_to_head_page(buf));
1839295525e2SXuan Zhuo 	}
1840295525e2SXuan Zhuo 
18419ab86bbcSShirley Ma 	return err;
184297402b96SHerbert Xu }
184397402b96SHerbert Xu 
1844012873d0SMichael S. Tsirkin static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1845012873d0SMichael S. Tsirkin 			   gfp_t gfp)
18469ab86bbcSShirley Ma {
18479ab86bbcSShirley Ma 	struct page *first, *list = NULL;
18489ab86bbcSShirley Ma 	char *p;
18499ab86bbcSShirley Ma 	int i, err, offset;
1850296f96fcSRusty Russell 
18514959aebbSGavin Li 	sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1852a5835440SRusty Russell 
18534959aebbSGavin Li 	/* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
18544959aebbSGavin Li 	for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1855e9d7417bSJason Wang 		first = get_a_page(rq, gfp);
18569ab86bbcSShirley Ma 		if (!first) {
18579ab86bbcSShirley Ma 			if (list)
1858e9d7417bSJason Wang 				give_pages(rq, list);
18599ab86bbcSShirley Ma 			return -ENOMEM;
1860296f96fcSRusty Russell 		}
1861e9d7417bSJason Wang 		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
18629ab86bbcSShirley Ma 
18639ab86bbcSShirley Ma 		/* chain new page in list head to match sg */
18649ab86bbcSShirley Ma 		first->private = (unsigned long)list;
18659ab86bbcSShirley Ma 		list = first;
18669ab86bbcSShirley Ma 	}
18679ab86bbcSShirley Ma 
1868e9d7417bSJason Wang 	first = get_a_page(rq, gfp);
18699ab86bbcSShirley Ma 	if (!first) {
1870e9d7417bSJason Wang 		give_pages(rq, list);
18719ab86bbcSShirley Ma 		return -ENOMEM;
18729ab86bbcSShirley Ma 	}
18739ab86bbcSShirley Ma 	p = page_address(first);
18749ab86bbcSShirley Ma 
1875e9d7417bSJason Wang 	/* rq->sg[0], rq->sg[1] share the same page */
1876012873d0SMichael S. Tsirkin 	/* a separated rq->sg[0] for header - required in case !any_header_sg */
1877012873d0SMichael S. Tsirkin 	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
18789ab86bbcSShirley Ma 
1879e9d7417bSJason Wang 	/* rq->sg[1] for data packet, from offset */
18809ab86bbcSShirley Ma 	offset = sizeof(struct padded_vnet_hdr);
1881e9d7417bSJason Wang 	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
18829ab86bbcSShirley Ma 
18839ab86bbcSShirley Ma 	/* chain first in list head */
18849ab86bbcSShirley Ma 	first->private = (unsigned long)list;
18854959aebbSGavin Li 	err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1886aa989f5eSMichael S. Tsirkin 				  first, gfp);
18879ab86bbcSShirley Ma 	if (err < 0)
1888e9d7417bSJason Wang 		give_pages(rq, first);
18899ab86bbcSShirley Ma 
18909ab86bbcSShirley Ma 	return err;
18919ab86bbcSShirley Ma }
18929ab86bbcSShirley Ma 
1893d85b758fSMichael S. Tsirkin static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
18943cc81a9aSJason Wang 					  struct ewma_pkt_len *avg_pkt_len,
18953cc81a9aSJason Wang 					  unsigned int room)
18969ab86bbcSShirley Ma {
1897c1ddc42dSAndrew Melnychenko 	struct virtnet_info *vi = rq->vq->vdev->priv;
1898c1ddc42dSAndrew Melnychenko 	const size_t hdr_len = vi->hdr_len;
1899fbf28d78SMichael Dalton 	unsigned int len;
1900fbf28d78SMichael Dalton 
19013cc81a9aSJason Wang 	if (room)
19023cc81a9aSJason Wang 		return PAGE_SIZE - room;
19033cc81a9aSJason Wang 
19045377d758SJohannes Berg 	len = hdr_len +	clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1905f0c3192cSMichael S. Tsirkin 				rq->min_buf_len, PAGE_SIZE - hdr_len);
19063cc81a9aSJason Wang 
1907e377fcc8SMichael S. Tsirkin 	return ALIGN(len, L1_CACHE_BYTES);
1908fbf28d78SMichael Dalton }
1909fbf28d78SMichael Dalton 
19102de2f7f4SJohn Fastabend static int add_recvbuf_mergeable(struct virtnet_info *vi,
19112de2f7f4SJohn Fastabend 				 struct receive_queue *rq, gfp_t gfp)
1912fbf28d78SMichael Dalton {
1913fb51879dSMichael Dalton 	struct page_frag *alloc_frag = &rq->alloc_frag;
19142de2f7f4SJohn Fastabend 	unsigned int headroom = virtnet_get_headroom(vi);
19153cc81a9aSJason Wang 	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
19163cc81a9aSJason Wang 	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1917fb51879dSMichael Dalton 	unsigned int len, hole;
1918295525e2SXuan Zhuo 	void *ctx;
1919295525e2SXuan Zhuo 	char *buf;
1920295525e2SXuan Zhuo 	int err;
19219ab86bbcSShirley Ma 
19223cc81a9aSJason Wang 	/* Extra tailroom is needed to satisfy XDP's assumption. This
19233cc81a9aSJason Wang 	 * means rx frags coalescing won't work, but consider we've
19243cc81a9aSJason Wang 	 * disabled GSO for XDP, it won't be a big issue.
19253cc81a9aSJason Wang 	 */
19263cc81a9aSJason Wang 	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1927295525e2SXuan Zhuo 
1928295525e2SXuan Zhuo 	buf = virtnet_rq_alloc(rq, len + room, gfp);
1929295525e2SXuan Zhuo 	if (unlikely(!buf))
19309ab86bbcSShirley Ma 		return -ENOMEM;
1931ab7db917SMichael Dalton 
19322de2f7f4SJohn Fastabend 	buf += headroom; /* advance address leaving hole at front of pkt */
1933fb51879dSMichael Dalton 	hole = alloc_frag->size - alloc_frag->offset;
19343cc81a9aSJason Wang 	if (hole < len + room) {
1935ab7db917SMichael Dalton 		/* To avoid internal fragmentation, if there is very likely not
1936ab7db917SMichael Dalton 		 * enough space for another buffer, add the remaining space to
19371daa8790SMichael S. Tsirkin 		 * the current buffer.
1938484beac2SHeng Qi 		 * XDP core assumes that frame_size of xdp_buff and the length
1939484beac2SHeng Qi 		 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1940ab7db917SMichael Dalton 		 */
1941484beac2SHeng Qi 		if (!headroom)
1942fb51879dSMichael Dalton 			len += hole;
1943fb51879dSMichael Dalton 		alloc_frag->offset += hole;
1944fb51879dSMichael Dalton 	}
19459ab86bbcSShirley Ma 
1946295525e2SXuan Zhuo 	virtnet_rq_init_one_sg(rq, buf, len);
1947295525e2SXuan Zhuo 
1948ef75cb51SHeng Qi 	ctx = mergeable_len_to_ctx(len + room, headroom);
1949680557cfSMichael S. Tsirkin 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1950295525e2SXuan Zhuo 	if (err < 0) {
1951295525e2SXuan Zhuo 		if (rq->do_dma)
1952295525e2SXuan Zhuo 			virtnet_rq_unmap(rq, buf, 0);
19532613af0eSMichael Dalton 		put_page(virt_to_head_page(buf));
1954295525e2SXuan Zhuo 	}
19559ab86bbcSShirley Ma 
19569ab86bbcSShirley Ma 	return err;
1957296f96fcSRusty Russell }
1958296f96fcSRusty Russell 
1959b2baed69SRusty Russell /*
1960b2baed69SRusty Russell  * Returns false if we couldn't fill entirely (OOM).
1961b2baed69SRusty Russell  *
1962b2baed69SRusty Russell  * Normally run in the receive path, but can also be run from ndo_open
1963b2baed69SRusty Russell  * before we're receiving packets, or from refill_work which is
1964b2baed69SRusty Russell  * careful to disable receiving (using napi_disable).
1965b2baed69SRusty Russell  */
1966946fa564SMichael S. Tsirkin static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1967946fa564SMichael S. Tsirkin 			  gfp_t gfp)
19683f2c31d9SMark McLoughlin {
19693f2c31d9SMark McLoughlin 	int err;
19701788f495SMichael S. Tsirkin 	bool oom;
19713f2c31d9SMark McLoughlin 
19720aea51c3SAmit Shah 	do {
19739ab86bbcSShirley Ma 		if (vi->mergeable_rx_bufs)
19742de2f7f4SJohn Fastabend 			err = add_recvbuf_mergeable(vi, rq, gfp);
19759ab86bbcSShirley Ma 		else if (vi->big_packets)
1976012873d0SMichael S. Tsirkin 			err = add_recvbuf_big(vi, rq, gfp);
19779ab86bbcSShirley Ma 		else
1978946fa564SMichael S. Tsirkin 			err = add_recvbuf_small(vi, rq, gfp);
19793f2c31d9SMark McLoughlin 
19801788f495SMichael S. Tsirkin 		oom = err == -ENOMEM;
19819ed4cb07SRusty Russell 		if (err)
19823f2c31d9SMark McLoughlin 			break;
1983b7dfde95SLinus Torvalds 	} while (rq->vq->num_free);
1984461f03dcSToshiaki Makita 	if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
198501c32598SMichael S. Tsirkin 		unsigned long flags;
198601c32598SMichael S. Tsirkin 
198701c32598SMichael S. Tsirkin 		flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
1988d46eeeafSJason Wang 		rq->stats.kicks++;
198901c32598SMichael S. Tsirkin 		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
1990461f03dcSToshiaki Makita 	}
1991461f03dcSToshiaki Makita 
19923161e453SRusty Russell 	return !oom;
19933f2c31d9SMark McLoughlin }
19943f2c31d9SMark McLoughlin 
199518445c4dSRusty Russell static void skb_recv_done(struct virtqueue *rvq)
1996296f96fcSRusty Russell {
1997296f96fcSRusty Russell 	struct virtnet_info *vi = rvq->vdev->priv;
1998986a4f4dSJason Wang 	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1999e9d7417bSJason Wang 
2000e4e8452aSWillem de Bruijn 	virtqueue_napi_schedule(&rq->napi, rvq);
2001296f96fcSRusty Russell }
2002296f96fcSRusty Russell 
2003e4e8452aSWillem de Bruijn static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
20043e9d08ecSBruce Rogers {
2005e4e8452aSWillem de Bruijn 	napi_enable(napi);
20063e9d08ecSBruce Rogers 
20073e9d08ecSBruce Rogers 	/* If all buffers were filled by other side before we napi_enabled, we
2008e4e8452aSWillem de Bruijn 	 * won't get another interrupt, so process any outstanding packets now.
2009e4e8452aSWillem de Bruijn 	 * Call local_bh_enable after to trigger softIRQ processing.
2010e4e8452aSWillem de Bruijn 	 */
2011ec13ee80SMichael S. Tsirkin 	local_bh_disable();
2012e4e8452aSWillem de Bruijn 	virtqueue_napi_schedule(napi, vq);
2013ec13ee80SMichael S. Tsirkin 	local_bh_enable();
20143e9d08ecSBruce Rogers }
20153e9d08ecSBruce Rogers 
2016b92f1e67SWillem de Bruijn static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2017b92f1e67SWillem de Bruijn 				   struct virtqueue *vq,
2018b92f1e67SWillem de Bruijn 				   struct napi_struct *napi)
2019b92f1e67SWillem de Bruijn {
2020b92f1e67SWillem de Bruijn 	if (!napi->weight)
2021b92f1e67SWillem de Bruijn 		return;
2022b92f1e67SWillem de Bruijn 
2023b92f1e67SWillem de Bruijn 	/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2024b92f1e67SWillem de Bruijn 	 * enable the feature if this is likely affine with the transmit path.
2025b92f1e67SWillem de Bruijn 	 */
2026b92f1e67SWillem de Bruijn 	if (!vi->affinity_hint_set) {
2027b92f1e67SWillem de Bruijn 		napi->weight = 0;
2028b92f1e67SWillem de Bruijn 		return;
2029b92f1e67SWillem de Bruijn 	}
2030b92f1e67SWillem de Bruijn 
2031b92f1e67SWillem de Bruijn 	return virtnet_napi_enable(vq, napi);
2032b92f1e67SWillem de Bruijn }
2033b92f1e67SWillem de Bruijn 
203478a57b48SWillem de Bruijn static void virtnet_napi_tx_disable(struct napi_struct *napi)
203578a57b48SWillem de Bruijn {
203678a57b48SWillem de Bruijn 	if (napi->weight)
203778a57b48SWillem de Bruijn 		napi_disable(napi);
203878a57b48SWillem de Bruijn }
203978a57b48SWillem de Bruijn 
20403161e453SRusty Russell static void refill_work(struct work_struct *work)
20413161e453SRusty Russell {
2042e9d7417bSJason Wang 	struct virtnet_info *vi =
2043e9d7417bSJason Wang 		container_of(work, struct virtnet_info, refill.work);
20443161e453SRusty Russell 	bool still_empty;
2045986a4f4dSJason Wang 	int i;
20463161e453SRusty Russell 
204755257d72SSasha Levin 	for (i = 0; i < vi->curr_queue_pairs; i++) {
2048986a4f4dSJason Wang 		struct receive_queue *rq = &vi->rq[i];
2049986a4f4dSJason Wang 
2050986a4f4dSJason Wang 		napi_disable(&rq->napi);
2051946fa564SMichael S. Tsirkin 		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2052e4e8452aSWillem de Bruijn 		virtnet_napi_enable(rq->vq, &rq->napi);
20533161e453SRusty Russell 
20543161e453SRusty Russell 		/* In theory, this can happen: if we don't get any buffers in
2055986a4f4dSJason Wang 		 * we will *never* try to fill again.
2056986a4f4dSJason Wang 		 */
20573161e453SRusty Russell 		if (still_empty)
20583b07e9caSTejun Heo 			schedule_delayed_work(&vi->refill, HZ/2);
20593161e453SRusty Russell 	}
2060986a4f4dSJason Wang }
20613161e453SRusty Russell 
20622471c75eSJesper Dangaard Brouer static int virtnet_receive(struct receive_queue *rq, int budget,
20632471c75eSJesper Dangaard Brouer 			   unsigned int *xdp_xmit)
2064296f96fcSRusty Russell {
2065e9d7417bSJason Wang 	struct virtnet_info *vi = rq->vq->vdev->priv;
2066d46eeeafSJason Wang 	struct virtnet_rq_stats stats = {};
2067a0929a44SToshiaki Makita 	unsigned int len;
20689ab86bbcSShirley Ma 	void *buf;
2069a0929a44SToshiaki Makita 	int i;
2070296f96fcSRusty Russell 
2071192f68cfSJason Wang 	if (!vi->big_packets || vi->mergeable_rx_bufs) {
2072680557cfSMichael S. Tsirkin 		void *ctx;
2073680557cfSMichael S. Tsirkin 
2074d46eeeafSJason Wang 		while (stats.packets < budget &&
2075295525e2SXuan Zhuo 		       (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2076a0929a44SToshiaki Makita 			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2077d46eeeafSJason Wang 			stats.packets++;
2078680557cfSMichael S. Tsirkin 		}
2079680557cfSMichael S. Tsirkin 	} else {
2080d46eeeafSJason Wang 		while (stats.packets < budget &&
2081295525e2SXuan Zhuo 		       (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
2082a0929a44SToshiaki Makita 			receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2083d46eeeafSJason Wang 			stats.packets++;
2084296f96fcSRusty Russell 		}
2085680557cfSMichael S. Tsirkin 	}
2086296f96fcSRusty Russell 
2087718be6baS? jiang 	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
20885a159128SJason Wang 		if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
20895a159128SJason Wang 			spin_lock(&vi->refill_lock);
20905a159128SJason Wang 			if (vi->refill_enabled)
20913b07e9caSTejun Heo 				schedule_delayed_work(&vi->refill, 0);
20925a159128SJason Wang 			spin_unlock(&vi->refill_lock);
20935a159128SJason Wang 		}
20943161e453SRusty Russell 	}
2095296f96fcSRusty Russell 
2096d7dfc5cfSToshiaki Makita 	u64_stats_update_begin(&rq->stats.syncp);
2097a0929a44SToshiaki Makita 	for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
2098a0929a44SToshiaki Makita 		size_t offset = virtnet_rq_stats_desc[i].offset;
2099a0929a44SToshiaki Makita 		u64 *item;
2100a0929a44SToshiaki Makita 
2101d46eeeafSJason Wang 		item = (u64 *)((u8 *)&rq->stats + offset);
2102d46eeeafSJason Wang 		*item += *(u64 *)((u8 *)&stats + offset);
2103a0929a44SToshiaki Makita 	}
2104d7dfc5cfSToshiaki Makita 	u64_stats_update_end(&rq->stats.syncp);
210561845d20SJason Wang 
2106d46eeeafSJason Wang 	return stats.packets;
21072ffa7598SJason Wang }
21082ffa7598SJason Wang 
21097b0411efSWillem de Bruijn static void virtnet_poll_cleantx(struct receive_queue *rq)
21107b0411efSWillem de Bruijn {
21117b0411efSWillem de Bruijn 	struct virtnet_info *vi = rq->vq->vdev->priv;
21127b0411efSWillem de Bruijn 	unsigned int index = vq2rxq(rq->vq);
21137b0411efSWillem de Bruijn 	struct send_queue *sq = &vi->sq[index];
21147b0411efSWillem de Bruijn 	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
21157b0411efSWillem de Bruijn 
2116534da5e8SToshiaki Makita 	if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
21177b0411efSWillem de Bruijn 		return;
21187b0411efSWillem de Bruijn 
21197b0411efSWillem de Bruijn 	if (__netif_tx_trylock(txq)) {
2120ebcce492SXuan Zhuo 		if (sq->reset) {
2121ebcce492SXuan Zhuo 			__netif_tx_unlock(txq);
2122ebcce492SXuan Zhuo 			return;
2123ebcce492SXuan Zhuo 		}
2124ebcce492SXuan Zhuo 
2125a7766ef1SMichael S. Tsirkin 		do {
2126a7766ef1SMichael S. Tsirkin 			virtqueue_disable_cb(sq->vq);
2127df133f3fSMichael S. Tsirkin 			free_old_xmit_skbs(sq, true);
2128a7766ef1SMichael S. Tsirkin 		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
21297b0411efSWillem de Bruijn 
21307b0411efSWillem de Bruijn 		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
21317b0411efSWillem de Bruijn 			netif_tx_wake_queue(txq);
213222bc63c5SMichael S. Tsirkin 
213322bc63c5SMichael S. Tsirkin 		__netif_tx_unlock(txq);
213422bc63c5SMichael S. Tsirkin 	}
21357b0411efSWillem de Bruijn }
21367b0411efSWillem de Bruijn 
21372ffa7598SJason Wang static int virtnet_poll(struct napi_struct *napi, int budget)
21382ffa7598SJason Wang {
21392ffa7598SJason Wang 	struct receive_queue *rq =
21402ffa7598SJason Wang 		container_of(napi, struct receive_queue, napi);
21419267c430SJason Wang 	struct virtnet_info *vi = rq->vq->vdev->priv;
21429267c430SJason Wang 	struct send_queue *sq;
21432a43565cSToshiaki Makita 	unsigned int received;
21442471c75eSJesper Dangaard Brouer 	unsigned int xdp_xmit = 0;
21452ffa7598SJason Wang 
21467b0411efSWillem de Bruijn 	virtnet_poll_cleantx(rq);
21477b0411efSWillem de Bruijn 
2148186b3c99SJason Wang 	received = virtnet_receive(rq, budget, &xdp_xmit);
21492ffa7598SJason Wang 
2150ad7e615fSMagnus Karlsson 	if (xdp_xmit & VIRTIO_XDP_REDIR)
2151ad7e615fSMagnus Karlsson 		xdp_do_flush();
2152ad7e615fSMagnus Karlsson 
21538329d98eSRusty Russell 	/* Out of packets? */
2154e4e8452aSWillem de Bruijn 	if (received < budget)
2155e4e8452aSWillem de Bruijn 		virtqueue_napi_complete(napi, rq->vq, received);
2156296f96fcSRusty Russell 
21572471c75eSJesper Dangaard Brouer 	if (xdp_xmit & VIRTIO_XDP_TX) {
215897c2c69eSXuan Zhuo 		sq = virtnet_xdp_get_sq(vi);
2159461f03dcSToshiaki Makita 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2160461f03dcSToshiaki Makita 			u64_stats_update_begin(&sq->stats.syncp);
2161461f03dcSToshiaki Makita 			sq->stats.kicks++;
2162461f03dcSToshiaki Makita 			u64_stats_update_end(&sq->stats.syncp);
2163461f03dcSToshiaki Makita 		}
216497c2c69eSXuan Zhuo 		virtnet_xdp_put_sq(vi, sq);
21659267c430SJason Wang 	}
2166186b3c99SJason Wang 
2167296f96fcSRusty Russell 	return received;
2168296f96fcSRusty Russell }
2169296f96fcSRusty Russell 
21705306623aSFeng Liu static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
21715306623aSFeng Liu {
21725306623aSFeng Liu 	virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
21735306623aSFeng Liu 	napi_disable(&vi->rq[qp_index].napi);
21745306623aSFeng Liu 	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
21755306623aSFeng Liu }
21765306623aSFeng Liu 
21775306623aSFeng Liu static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
21785306623aSFeng Liu {
21795306623aSFeng Liu 	struct net_device *dev = vi->dev;
21805306623aSFeng Liu 	int err;
21815306623aSFeng Liu 
21825306623aSFeng Liu 	err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
21835306623aSFeng Liu 			       vi->rq[qp_index].napi.napi_id);
21845306623aSFeng Liu 	if (err < 0)
21855306623aSFeng Liu 		return err;
21865306623aSFeng Liu 
21875306623aSFeng Liu 	err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
21885306623aSFeng Liu 					 MEM_TYPE_PAGE_SHARED, NULL);
21895306623aSFeng Liu 	if (err < 0)
21905306623aSFeng Liu 		goto err_xdp_reg_mem_model;
21915306623aSFeng Liu 
21925306623aSFeng Liu 	virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
21935306623aSFeng Liu 	virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
21945306623aSFeng Liu 
21955306623aSFeng Liu 	return 0;
21965306623aSFeng Liu 
21975306623aSFeng Liu err_xdp_reg_mem_model:
21985306623aSFeng Liu 	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
21995306623aSFeng Liu 	return err;
22005306623aSFeng Liu }
22015306623aSFeng Liu 
2202986a4f4dSJason Wang static int virtnet_open(struct net_device *dev)
2203986a4f4dSJason Wang {
2204986a4f4dSJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
2205754b8a21SJesper Dangaard Brouer 	int i, err;
2206986a4f4dSJason Wang 
22075a159128SJason Wang 	enable_delayed_refill(vi);
22085a159128SJason Wang 
2209e4166625SJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
2210e4166625SJason Wang 		if (i < vi->curr_queue_pairs)
2211986a4f4dSJason Wang 			/* Make sure we have some buffers: if oom use wq. */
2212946fa564SMichael S. Tsirkin 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2213986a4f4dSJason Wang 				schedule_delayed_work(&vi->refill, 0);
2214754b8a21SJesper Dangaard Brouer 
22155306623aSFeng Liu 		err = virtnet_enable_queue_pair(vi, i);
2216754b8a21SJesper Dangaard Brouer 		if (err < 0)
22175306623aSFeng Liu 			goto err_enable_qp;
2218986a4f4dSJason Wang 	}
2219986a4f4dSJason Wang 
2220986a4f4dSJason Wang 	return 0;
22215306623aSFeng Liu 
22225306623aSFeng Liu err_enable_qp:
22235306623aSFeng Liu 	disable_delayed_refill(vi);
22245306623aSFeng Liu 	cancel_delayed_work_sync(&vi->refill);
22255306623aSFeng Liu 
22265306623aSFeng Liu 	for (i--; i >= 0; i--)
22275306623aSFeng Liu 		virtnet_disable_queue_pair(vi, i);
22285306623aSFeng Liu 	return err;
2229986a4f4dSJason Wang }
2230986a4f4dSJason Wang 
2231b92f1e67SWillem de Bruijn static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2232b92f1e67SWillem de Bruijn {
2233b92f1e67SWillem de Bruijn 	struct send_queue *sq = container_of(napi, struct send_queue, napi);
2234b92f1e67SWillem de Bruijn 	struct virtnet_info *vi = sq->vq->vdev->priv;
2235534da5e8SToshiaki Makita 	unsigned int index = vq2txq(sq->vq);
2236534da5e8SToshiaki Makita 	struct netdev_queue *txq;
22375a2f966dSMichael S. Tsirkin 	int opaque;
22385a2f966dSMichael S. Tsirkin 	bool done;
2239b92f1e67SWillem de Bruijn 
2240534da5e8SToshiaki Makita 	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2241534da5e8SToshiaki Makita 		/* We don't need to enable cb for XDP */
2242534da5e8SToshiaki Makita 		napi_complete_done(napi, 0);
2243534da5e8SToshiaki Makita 		return 0;
2244534da5e8SToshiaki Makita 	}
2245534da5e8SToshiaki Makita 
2246534da5e8SToshiaki Makita 	txq = netdev_get_tx_queue(vi->dev, index);
2247b92f1e67SWillem de Bruijn 	__netif_tx_lock(txq, raw_smp_processor_id());
22485a2f966dSMichael S. Tsirkin 	virtqueue_disable_cb(sq->vq);
2249df133f3fSMichael S. Tsirkin 	free_old_xmit_skbs(sq, true);
22505a2f966dSMichael S. Tsirkin 
225122bc63c5SMichael S. Tsirkin 	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
225222bc63c5SMichael S. Tsirkin 		netif_tx_wake_queue(txq);
225322bc63c5SMichael S. Tsirkin 
22545a2f966dSMichael S. Tsirkin 	opaque = virtqueue_enable_cb_prepare(sq->vq);
22555a2f966dSMichael S. Tsirkin 
22565a2f966dSMichael S. Tsirkin 	done = napi_complete_done(napi, 0);
22575a2f966dSMichael S. Tsirkin 
22585a2f966dSMichael S. Tsirkin 	if (!done)
22595a2f966dSMichael S. Tsirkin 		virtqueue_disable_cb(sq->vq);
22605a2f966dSMichael S. Tsirkin 
2261b92f1e67SWillem de Bruijn 	__netif_tx_unlock(txq);
2262b92f1e67SWillem de Bruijn 
22635a2f966dSMichael S. Tsirkin 	if (done) {
22645a2f966dSMichael S. Tsirkin 		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
22655a2f966dSMichael S. Tsirkin 			if (napi_schedule_prep(napi)) {
22665a2f966dSMichael S. Tsirkin 				__netif_tx_lock(txq, raw_smp_processor_id());
22675a2f966dSMichael S. Tsirkin 				virtqueue_disable_cb(sq->vq);
22685a2f966dSMichael S. Tsirkin 				__netif_tx_unlock(txq);
22695a2f966dSMichael S. Tsirkin 				__napi_schedule(napi);
22705a2f966dSMichael S. Tsirkin 			}
22715a2f966dSMichael S. Tsirkin 		}
22725a2f966dSMichael S. Tsirkin 	}
2273b92f1e67SWillem de Bruijn 
2274b92f1e67SWillem de Bruijn 	return 0;
2275b92f1e67SWillem de Bruijn }
2276b92f1e67SWillem de Bruijn 
2277e9d7417bSJason Wang static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2278296f96fcSRusty Russell {
2279012873d0SMichael S. Tsirkin 	struct virtio_net_hdr_mrg_rxbuf *hdr;
2280296f96fcSRusty Russell 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2281e9d7417bSJason Wang 	struct virtnet_info *vi = sq->vq->vdev->priv;
2282e2fcad58SJason A. Donenfeld 	int num_sg;
2283012873d0SMichael S. Tsirkin 	unsigned hdr_len = vi->hdr_len;
2284e7428e95SMichael S. Tsirkin 	bool can_push;
2285296f96fcSRusty Russell 
2286e174961cSJohannes Berg 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2287e7428e95SMichael S. Tsirkin 
2288e7428e95SMichael S. Tsirkin 	can_push = vi->any_header_sg &&
2289e7428e95SMichael S. Tsirkin 		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2290e7428e95SMichael S. Tsirkin 		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2291e7428e95SMichael S. Tsirkin 	/* Even if we can, don't push here yet as this would skew
2292e7428e95SMichael S. Tsirkin 	 * csum_start offset below. */
2293e7428e95SMichael S. Tsirkin 	if (can_push)
2294012873d0SMichael S. Tsirkin 		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2295e7428e95SMichael S. Tsirkin 	else
2296dae64749SFeng Liu 		hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
2297296f96fcSRusty Russell 
2298e858fae2SMike Rapoport 	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2299fd3a8862SWillem de Bruijn 				    virtio_is_little_endian(vi->vdev), false,
2300fd3a8862SWillem de Bruijn 				    0))
230185eb1389SXianting Tian 		return -EPROTO;
2302296f96fcSRusty Russell 
2303e7428e95SMichael S. Tsirkin 	if (vi->mergeable_rx_bufs)
2304012873d0SMichael S. Tsirkin 		hdr->num_buffers = 0;
23053f2c31d9SMark McLoughlin 
2306547c890cSJason Wang 	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2307e7428e95SMichael S. Tsirkin 	if (can_push) {
2308e7428e95SMichael S. Tsirkin 		__skb_push(skb, hdr_len);
2309e7428e95SMichael S. Tsirkin 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2310e2fcad58SJason A. Donenfeld 		if (unlikely(num_sg < 0))
2311e2fcad58SJason A. Donenfeld 			return num_sg;
2312e7428e95SMichael S. Tsirkin 		/* Pull header back to avoid skew in tx bytes calculations. */
2313e7428e95SMichael S. Tsirkin 		__skb_pull(skb, hdr_len);
2314e7428e95SMichael S. Tsirkin 	} else {
2315e7428e95SMichael S. Tsirkin 		sg_set_buf(sq->sg, hdr, hdr_len);
2316e2fcad58SJason A. Donenfeld 		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2317e2fcad58SJason A. Donenfeld 		if (unlikely(num_sg < 0))
2318e2fcad58SJason A. Donenfeld 			return num_sg;
2319e2fcad58SJason A. Donenfeld 		num_sg++;
2320e7428e95SMichael S. Tsirkin 	}
23219dc7b9e4SRusty Russell 	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
232211a3a154SRusty Russell }
232311a3a154SRusty Russell 
2324424efe9cSStephen Hemminger static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
232599ffc696SRusty Russell {
232699ffc696SRusty Russell 	struct virtnet_info *vi = netdev_priv(dev);
2327986a4f4dSJason Wang 	int qnum = skb_get_queue_mapping(skb);
2328986a4f4dSJason Wang 	struct send_queue *sq = &vi->sq[qnum];
23299ed4cb07SRusty Russell 	int err;
23304b7fd2e6SMichael S. Tsirkin 	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
23316b16f9eeSFlorian Westphal 	bool kick = !netdev_xmit_more();
2332b92f1e67SWillem de Bruijn 	bool use_napi = sq->napi.weight;
23332cb9c6baSRusty Russell 
23342cb9c6baSRusty Russell 	/* Free up any pending old buffers before queueing new ones. */
2335a7766ef1SMichael S. Tsirkin 	do {
2336a7766ef1SMichael S. Tsirkin 		if (use_napi)
2337a7766ef1SMichael S. Tsirkin 			virtqueue_disable_cb(sq->vq);
2338a7766ef1SMichael S. Tsirkin 
2339df133f3fSMichael S. Tsirkin 		free_old_xmit_skbs(sq, false);
234099ffc696SRusty Russell 
2341a7766ef1SMichael S. Tsirkin 	} while (use_napi && kick &&
2342a7766ef1SMichael S. Tsirkin 	       unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2343bdb12e0dSWillem de Bruijn 
2344074c3582SJacob Keller 	/* timestamp packet in software */
2345074c3582SJacob Keller 	skb_tx_timestamp(skb);
2346074c3582SJacob Keller 
234703f191baSMichael S. Tsirkin 	/* Try to transmit */
2348b7dfde95SLinus Torvalds 	err = xmit_skb(sq, skb);
234999ffc696SRusty Russell 
23509ed4cb07SRusty Russell 	/* This should not happen! */
2351681daee2SJason Wang 	if (unlikely(err)) {
235258eba97dSRusty Russell 		dev->stats.tx_fifo_errors++;
23532e57b79cSRick Jones 		if (net_ratelimit())
235458eba97dSRusty Russell 			dev_warn(&dev->dev,
23557934b481SYuval Shaia 				 "Unexpected TXQ (%d) queue failure: %d\n",
23567934b481SYuval Shaia 				 qnum, err);
235758eba97dSRusty Russell 		dev->stats.tx_dropped++;
235885e94525SEric W. Biederman 		dev_kfree_skb_any(skb);
235958eba97dSRusty Russell 		return NETDEV_TX_OK;
2360296f96fcSRusty Russell 	}
236103f191baSMichael S. Tsirkin 
236248925e37SRusty Russell 	/* Don't wait up for transmitted skbs to be freed. */
2363b92f1e67SWillem de Bruijn 	if (!use_napi) {
236448925e37SRusty Russell 		skb_orphan(skb);
2365895b5c9fSFlorian Westphal 		nf_reset_ct(skb);
2366b92f1e67SWillem de Bruijn 	}
236748925e37SRusty Russell 
2368b8ef4809SXuan Zhuo 	check_sq_full_and_disable(vi, dev, sq);
236948925e37SRusty Russell 
2370461f03dcSToshiaki Makita 	if (kick || netif_xmit_stopped(txq)) {
2371461f03dcSToshiaki Makita 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2372461f03dcSToshiaki Makita 			u64_stats_update_begin(&sq->stats.syncp);
2373461f03dcSToshiaki Makita 			sq->stats.kicks++;
2374461f03dcSToshiaki Makita 			u64_stats_update_end(&sq->stats.syncp);
2375461f03dcSToshiaki Makita 		}
2376461f03dcSToshiaki Makita 	}
23770b725a2cSDavid S. Miller 
23780b725a2cSDavid S. Miller 	return NETDEV_TX_OK;
2379c223a078SDavid S. Miller }
2380c223a078SDavid S. Miller 
23816a4763e2SXuan Zhuo static int virtnet_rx_resize(struct virtnet_info *vi,
23826a4763e2SXuan Zhuo 			     struct receive_queue *rq, u32 ring_num)
23836a4763e2SXuan Zhuo {
23846a4763e2SXuan Zhuo 	bool running = netif_running(vi->dev);
23856a4763e2SXuan Zhuo 	int err, qindex;
23866a4763e2SXuan Zhuo 
23876a4763e2SXuan Zhuo 	qindex = rq - vi->rq;
23886a4763e2SXuan Zhuo 
23896a4763e2SXuan Zhuo 	if (running)
23906a4763e2SXuan Zhuo 		napi_disable(&rq->napi);
23916a4763e2SXuan Zhuo 
23926a4763e2SXuan Zhuo 	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
23936a4763e2SXuan Zhuo 	if (err)
23946a4763e2SXuan Zhuo 		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
23956a4763e2SXuan Zhuo 
23966a4763e2SXuan Zhuo 	if (!try_fill_recv(vi, rq, GFP_KERNEL))
23976a4763e2SXuan Zhuo 		schedule_delayed_work(&vi->refill, 0);
23986a4763e2SXuan Zhuo 
23996a4763e2SXuan Zhuo 	if (running)
24006a4763e2SXuan Zhuo 		virtnet_napi_enable(rq->vq, &rq->napi);
24016a4763e2SXuan Zhuo 	return err;
24026a4763e2SXuan Zhuo }
24036a4763e2SXuan Zhuo 
2404ebcce492SXuan Zhuo static int virtnet_tx_resize(struct virtnet_info *vi,
2405ebcce492SXuan Zhuo 			     struct send_queue *sq, u32 ring_num)
2406ebcce492SXuan Zhuo {
2407ebcce492SXuan Zhuo 	bool running = netif_running(vi->dev);
2408ebcce492SXuan Zhuo 	struct netdev_queue *txq;
2409ebcce492SXuan Zhuo 	int err, qindex;
2410ebcce492SXuan Zhuo 
2411ebcce492SXuan Zhuo 	qindex = sq - vi->sq;
2412ebcce492SXuan Zhuo 
2413ebcce492SXuan Zhuo 	if (running)
2414ebcce492SXuan Zhuo 		virtnet_napi_tx_disable(&sq->napi);
2415ebcce492SXuan Zhuo 
2416ebcce492SXuan Zhuo 	txq = netdev_get_tx_queue(vi->dev, qindex);
2417ebcce492SXuan Zhuo 
2418ebcce492SXuan Zhuo 	/* 1. wait all ximt complete
2419ebcce492SXuan Zhuo 	 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2420ebcce492SXuan Zhuo 	 */
2421ebcce492SXuan Zhuo 	__netif_tx_lock_bh(txq);
2422ebcce492SXuan Zhuo 
2423ebcce492SXuan Zhuo 	/* Prevent rx poll from accessing sq. */
2424ebcce492SXuan Zhuo 	sq->reset = true;
2425ebcce492SXuan Zhuo 
2426ebcce492SXuan Zhuo 	/* Prevent the upper layer from trying to send packets. */
2427ebcce492SXuan Zhuo 	netif_stop_subqueue(vi->dev, qindex);
2428ebcce492SXuan Zhuo 
2429ebcce492SXuan Zhuo 	__netif_tx_unlock_bh(txq);
2430ebcce492SXuan Zhuo 
2431ebcce492SXuan Zhuo 	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2432ebcce492SXuan Zhuo 	if (err)
2433ebcce492SXuan Zhuo 		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2434ebcce492SXuan Zhuo 
2435ebcce492SXuan Zhuo 	__netif_tx_lock_bh(txq);
2436ebcce492SXuan Zhuo 	sq->reset = false;
2437ebcce492SXuan Zhuo 	netif_tx_wake_queue(txq);
2438ebcce492SXuan Zhuo 	__netif_tx_unlock_bh(txq);
2439ebcce492SXuan Zhuo 
2440ebcce492SXuan Zhuo 	if (running)
2441ebcce492SXuan Zhuo 		virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2442ebcce492SXuan Zhuo 	return err;
2443ebcce492SXuan Zhuo }
2444ebcce492SXuan Zhuo 
244540cbfc37SAmos Kong /*
244640cbfc37SAmos Kong  * Send command via the control virtqueue and check status.  Commands
244740cbfc37SAmos Kong  * supported by the hypervisor, as indicated by feature bits, should
2448788a8b6dSstephen hemminger  * never fail unless improperly formatted.
244940cbfc37SAmos Kong  */
245040cbfc37SAmos Kong static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2451d24bae32Sstephen hemminger 				 struct scatterlist *out)
245240cbfc37SAmos Kong {
2453f7bc9594SRusty Russell 	struct scatterlist *sgs[4], hdr, stat;
2454d24bae32Sstephen hemminger 	unsigned out_num = 0, tmp;
2455222722bcSYunjian Wang 	int ret;
245640cbfc37SAmos Kong 
245740cbfc37SAmos Kong 	/* Caller should know better */
2458f7bc9594SRusty Russell 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
245940cbfc37SAmos Kong 
246012e57169SMichael S. Tsirkin 	vi->ctrl->status = ~0;
246112e57169SMichael S. Tsirkin 	vi->ctrl->hdr.class = class;
246212e57169SMichael S. Tsirkin 	vi->ctrl->hdr.cmd = cmd;
2463f7bc9594SRusty Russell 	/* Add header */
246412e57169SMichael S. Tsirkin 	sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2465f7bc9594SRusty Russell 	sgs[out_num++] = &hdr;
246640cbfc37SAmos Kong 
2467f7bc9594SRusty Russell 	if (out)
2468f7bc9594SRusty Russell 		sgs[out_num++] = out;
246940cbfc37SAmos Kong 
2470f7bc9594SRusty Russell 	/* Add return status. */
247112e57169SMichael S. Tsirkin 	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2472d24bae32Sstephen hemminger 	sgs[out_num] = &stat;
247340cbfc37SAmos Kong 
2474d24bae32Sstephen hemminger 	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
2475222722bcSYunjian Wang 	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2476222722bcSYunjian Wang 	if (ret < 0) {
2477222722bcSYunjian Wang 		dev_warn(&vi->vdev->dev,
2478222722bcSYunjian Wang 			 "Failed to add sgs for command vq: %d\n.", ret);
2479222722bcSYunjian Wang 		return false;
2480222722bcSYunjian Wang 	}
248140cbfc37SAmos Kong 
248267975901SHeinz Graalfs 	if (unlikely(!virtqueue_kick(vi->cvq)))
248312e57169SMichael S. Tsirkin 		return vi->ctrl->status == VIRTIO_NET_OK;
248440cbfc37SAmos Kong 
248540cbfc37SAmos Kong 	/* Spin for a response, the kick causes an ioport write, trapping
248640cbfc37SAmos Kong 	 * into the hypervisor, so the request should be handled immediately.
248740cbfc37SAmos Kong 	 */
2488047b9b94SHeinz Graalfs 	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2489047b9b94SHeinz Graalfs 	       !virtqueue_is_broken(vi->cvq))
249040cbfc37SAmos Kong 		cpu_relax();
249140cbfc37SAmos Kong 
249212e57169SMichael S. Tsirkin 	return vi->ctrl->status == VIRTIO_NET_OK;
249340cbfc37SAmos Kong }
249440cbfc37SAmos Kong 
24959c46f6d4SAlex Williamson static int virtnet_set_mac_address(struct net_device *dev, void *p)
24969c46f6d4SAlex Williamson {
24979c46f6d4SAlex Williamson 	struct virtnet_info *vi = netdev_priv(dev);
24989c46f6d4SAlex Williamson 	struct virtio_device *vdev = vi->vdev;
2499f2f2c8b4SJiri Pirko 	int ret;
2500e37e2ff3SAndy Lutomirski 	struct sockaddr *addr;
25017e58d5aeSAmos Kong 	struct scatterlist sg;
25029c46f6d4SAlex Williamson 
2503ba5e4426SSridhar Samudrala 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2504ba5e4426SSridhar Samudrala 		return -EOPNOTSUPP;
2505ba5e4426SSridhar Samudrala 
2506801822d1SShyam Saini 	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2507e37e2ff3SAndy Lutomirski 	if (!addr)
2508e37e2ff3SAndy Lutomirski 		return -ENOMEM;
2509e37e2ff3SAndy Lutomirski 
2510e37e2ff3SAndy Lutomirski 	ret = eth_prepare_mac_addr_change(dev, addr);
2511f2f2c8b4SJiri Pirko 	if (ret)
2512e37e2ff3SAndy Lutomirski 		goto out;
25139c46f6d4SAlex Williamson 
25147e58d5aeSAmos Kong 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
25157e58d5aeSAmos Kong 		sg_init_one(&sg, addr->sa_data, dev->addr_len);
25167e58d5aeSAmos Kong 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2517d24bae32Sstephen hemminger 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
25187e58d5aeSAmos Kong 			dev_warn(&vdev->dev,
25197e58d5aeSAmos Kong 				 "Failed to set mac address by vq command.\n");
2520e37e2ff3SAndy Lutomirski 			ret = -EINVAL;
2521e37e2ff3SAndy Lutomirski 			goto out;
25227e58d5aeSAmos Kong 		}
25237e93a02fSMichael S. Tsirkin 	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
25247e93a02fSMichael S. Tsirkin 		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2525855e0c52SRusty Russell 		unsigned int i;
2526855e0c52SRusty Russell 
2527855e0c52SRusty Russell 		/* Naturally, this has an atomicity problem. */
2528855e0c52SRusty Russell 		for (i = 0; i < dev->addr_len; i++)
2529855e0c52SRusty Russell 			virtio_cwrite8(vdev,
2530855e0c52SRusty Russell 				       offsetof(struct virtio_net_config, mac) +
2531855e0c52SRusty Russell 				       i, addr->sa_data[i]);
25327e58d5aeSAmos Kong 	}
25337e58d5aeSAmos Kong 
25347e58d5aeSAmos Kong 	eth_commit_mac_addr_change(dev, p);
2535e37e2ff3SAndy Lutomirski 	ret = 0;
25369c46f6d4SAlex Williamson 
2537e37e2ff3SAndy Lutomirski out:
2538e37e2ff3SAndy Lutomirski 	kfree(addr);
2539e37e2ff3SAndy Lutomirski 	return ret;
25409c46f6d4SAlex Williamson }
25419c46f6d4SAlex Williamson 
2542bc1f4470Sstephen hemminger static void virtnet_stats(struct net_device *dev,
25433fa2a1dfSstephen hemminger 			  struct rtnl_link_stats64 *tot)
25443fa2a1dfSstephen hemminger {
25453fa2a1dfSstephen hemminger 	struct virtnet_info *vi = netdev_priv(dev);
25463fa2a1dfSstephen hemminger 	unsigned int start;
2547d7dfc5cfSToshiaki Makita 	int i;
25483fa2a1dfSstephen hemminger 
2549d7dfc5cfSToshiaki Makita 	for (i = 0; i < vi->max_queue_pairs; i++) {
2550a520794bSTony Lu 		u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2551d7dfc5cfSToshiaki Makita 		struct receive_queue *rq = &vi->rq[i];
2552d7dfc5cfSToshiaki Makita 		struct send_queue *sq = &vi->sq[i];
25533fa2a1dfSstephen hemminger 
25543fa2a1dfSstephen hemminger 		do {
2555068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&sq->stats.syncp);
2556d7dfc5cfSToshiaki Makita 			tpackets = sq->stats.packets;
2557d7dfc5cfSToshiaki Makita 			tbytes   = sq->stats.bytes;
2558a520794bSTony Lu 			terrors  = sq->stats.tx_timeouts;
2559068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
256083a27052SEric Dumazet 
256183a27052SEric Dumazet 		do {
2562068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&rq->stats.syncp);
2563d46eeeafSJason Wang 			rpackets = rq->stats.packets;
2564d46eeeafSJason Wang 			rbytes   = rq->stats.bytes;
2565d46eeeafSJason Wang 			rdrops   = rq->stats.drops;
2566068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
25673fa2a1dfSstephen hemminger 
25683fa2a1dfSstephen hemminger 		tot->rx_packets += rpackets;
25693fa2a1dfSstephen hemminger 		tot->tx_packets += tpackets;
25703fa2a1dfSstephen hemminger 		tot->rx_bytes   += rbytes;
25713fa2a1dfSstephen hemminger 		tot->tx_bytes   += tbytes;
25722c4a2f7dSToshiaki Makita 		tot->rx_dropped += rdrops;
2573a520794bSTony Lu 		tot->tx_errors  += terrors;
25743fa2a1dfSstephen hemminger 	}
25753fa2a1dfSstephen hemminger 
25763fa2a1dfSstephen hemminger 	tot->tx_dropped = dev->stats.tx_dropped;
2577021ac8d3SRick Jones 	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
25783fa2a1dfSstephen hemminger 	tot->rx_length_errors = dev->stats.rx_length_errors;
25793fa2a1dfSstephen hemminger 	tot->rx_frame_errors = dev->stats.rx_frame_errors;
25803fa2a1dfSstephen hemminger }
25813fa2a1dfSstephen hemminger 
2582586d17c5SJason Wang static void virtnet_ack_link_announce(struct virtnet_info *vi)
2583586d17c5SJason Wang {
2584586d17c5SJason Wang 	rtnl_lock();
2585586d17c5SJason Wang 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2586d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2587586d17c5SJason Wang 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2588586d17c5SJason Wang 	rtnl_unlock();
2589586d17c5SJason Wang }
2590586d17c5SJason Wang 
259147315329SJohn Fastabend static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2592986a4f4dSJason Wang {
2593986a4f4dSJason Wang 	struct scatterlist sg;
2594986a4f4dSJason Wang 	struct net_device *dev = vi->dev;
2595986a4f4dSJason Wang 
2596986a4f4dSJason Wang 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2597986a4f4dSJason Wang 		return 0;
2598986a4f4dSJason Wang 
259912e57169SMichael S. Tsirkin 	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
260012e57169SMichael S. Tsirkin 	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
2601986a4f4dSJason Wang 
2602986a4f4dSJason Wang 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2603d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2604986a4f4dSJason Wang 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2605986a4f4dSJason Wang 			 queue_pairs);
2606986a4f4dSJason Wang 		return -EINVAL;
260755257d72SSasha Levin 	} else {
2608986a4f4dSJason Wang 		vi->curr_queue_pairs = queue_pairs;
260935ed159bSJason Wang 		/* virtnet_open() will refill when device is going to up. */
261035ed159bSJason Wang 		if (dev->flags & IFF_UP)
26119b9cd802SJason Wang 			schedule_delayed_work(&vi->refill, 0);
261255257d72SSasha Levin 	}
2613986a4f4dSJason Wang 
2614986a4f4dSJason Wang 	return 0;
2615986a4f4dSJason Wang }
2616986a4f4dSJason Wang 
261747315329SJohn Fastabend static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
261847315329SJohn Fastabend {
261947315329SJohn Fastabend 	int err;
262047315329SJohn Fastabend 
262147315329SJohn Fastabend 	rtnl_lock();
262247315329SJohn Fastabend 	err = _virtnet_set_queues(vi, queue_pairs);
262347315329SJohn Fastabend 	rtnl_unlock();
262447315329SJohn Fastabend 	return err;
262547315329SJohn Fastabend }
262647315329SJohn Fastabend 
2627296f96fcSRusty Russell static int virtnet_close(struct net_device *dev)
2628296f96fcSRusty Russell {
2629296f96fcSRusty Russell 	struct virtnet_info *vi = netdev_priv(dev);
2630986a4f4dSJason Wang 	int i;
2631296f96fcSRusty Russell 
26325a159128SJason Wang 	/* Make sure NAPI doesn't schedule refill work */
26335a159128SJason Wang 	disable_delayed_refill(vi);
2634b2baed69SRusty Russell 	/* Make sure refill_work doesn't re-enable napi! */
2635b2baed69SRusty Russell 	cancel_delayed_work_sync(&vi->refill);
2636986a4f4dSJason Wang 
26375306623aSFeng Liu 	for (i = 0; i < vi->max_queue_pairs; i++)
26385306623aSFeng Liu 		virtnet_disable_queue_pair(vi, i);
2639296f96fcSRusty Russell 
2640296f96fcSRusty Russell 	return 0;
2641296f96fcSRusty Russell }
2642296f96fcSRusty Russell 
26432af7698eSAlex Williamson static void virtnet_set_rx_mode(struct net_device *dev)
26442af7698eSAlex Williamson {
26452af7698eSAlex Williamson 	struct virtnet_info *vi = netdev_priv(dev);
2646f565a7c2SAlex Williamson 	struct scatterlist sg[2];
2647f565a7c2SAlex Williamson 	struct virtio_net_ctrl_mac *mac_data;
2648ccffad25SJiri Pirko 	struct netdev_hw_addr *ha;
264932e7bfc4SJiri Pirko 	int uc_count;
26504cd24eafSJiri Pirko 	int mc_count;
2651f565a7c2SAlex Williamson 	void *buf;
2652f565a7c2SAlex Williamson 	int i;
26532af7698eSAlex Williamson 
2654788a8b6dSstephen hemminger 	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
26552af7698eSAlex Williamson 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
26562af7698eSAlex Williamson 		return;
26572af7698eSAlex Williamson 
265812e57169SMichael S. Tsirkin 	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
265912e57169SMichael S. Tsirkin 	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
26602af7698eSAlex Williamson 
266112e57169SMichael S. Tsirkin 	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
26622af7698eSAlex Williamson 
26632af7698eSAlex Williamson 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2664d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
26652af7698eSAlex Williamson 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
266612e57169SMichael S. Tsirkin 			 vi->ctrl->promisc ? "en" : "dis");
26672af7698eSAlex Williamson 
266812e57169SMichael S. Tsirkin 	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
26692af7698eSAlex Williamson 
26702af7698eSAlex Williamson 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2671d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
26722af7698eSAlex Williamson 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
267312e57169SMichael S. Tsirkin 			 vi->ctrl->allmulti ? "en" : "dis");
2674f565a7c2SAlex Williamson 
267532e7bfc4SJiri Pirko 	uc_count = netdev_uc_count(dev);
26764cd24eafSJiri Pirko 	mc_count = netdev_mc_count(dev);
2677f565a7c2SAlex Williamson 	/* MAC filter - use one buffer for both lists */
26784cd24eafSJiri Pirko 	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2679f565a7c2SAlex Williamson 		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
26804cd24eafSJiri Pirko 	mac_data = buf;
2681e68ed8f0SJoe Perches 	if (!buf)
2682f565a7c2SAlex Williamson 		return;
2683f565a7c2SAlex Williamson 
268423e258e1SAlex Williamson 	sg_init_table(sg, 2);
268523e258e1SAlex Williamson 
2686f565a7c2SAlex Williamson 	/* Store the unicast list and count in the front of the buffer */
2687fdd819b2SMichael S. Tsirkin 	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2688ccffad25SJiri Pirko 	i = 0;
268932e7bfc4SJiri Pirko 	netdev_for_each_uc_addr(ha, dev)
2690ccffad25SJiri Pirko 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2691f565a7c2SAlex Williamson 
2692f565a7c2SAlex Williamson 	sg_set_buf(&sg[0], mac_data,
269332e7bfc4SJiri Pirko 		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2694f565a7c2SAlex Williamson 
2695f565a7c2SAlex Williamson 	/* multicast list and count fill the end */
269632e7bfc4SJiri Pirko 	mac_data = (void *)&mac_data->macs[uc_count][0];
2697f565a7c2SAlex Williamson 
2698fdd819b2SMichael S. Tsirkin 	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2699567ec874SJiri Pirko 	i = 0;
270022bedad3SJiri Pirko 	netdev_for_each_mc_addr(ha, dev)
270122bedad3SJiri Pirko 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2702f565a7c2SAlex Williamson 
2703f565a7c2SAlex Williamson 	sg_set_buf(&sg[1], mac_data,
27044cd24eafSJiri Pirko 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2705f565a7c2SAlex Williamson 
2706f565a7c2SAlex Williamson 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2707d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
270899e872aeSThomas Huth 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2709f565a7c2SAlex Williamson 
2710f565a7c2SAlex Williamson 	kfree(buf);
27112af7698eSAlex Williamson }
27122af7698eSAlex Williamson 
271380d5c368SPatrick McHardy static int virtnet_vlan_rx_add_vid(struct net_device *dev,
271480d5c368SPatrick McHardy 				   __be16 proto, u16 vid)
27150bde9569SAlex Williamson {
27160bde9569SAlex Williamson 	struct virtnet_info *vi = netdev_priv(dev);
27170bde9569SAlex Williamson 	struct scatterlist sg;
27180bde9569SAlex Williamson 
2719d7fad4c8SMichael S. Tsirkin 	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
272012e57169SMichael S. Tsirkin 	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
27210bde9569SAlex Williamson 
27220bde9569SAlex Williamson 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2723d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
27240bde9569SAlex Williamson 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
27258e586137SJiri Pirko 	return 0;
27260bde9569SAlex Williamson }
27270bde9569SAlex Williamson 
272880d5c368SPatrick McHardy static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
272980d5c368SPatrick McHardy 				    __be16 proto, u16 vid)
27300bde9569SAlex Williamson {
27310bde9569SAlex Williamson 	struct virtnet_info *vi = netdev_priv(dev);
27320bde9569SAlex Williamson 	struct scatterlist sg;
27330bde9569SAlex Williamson 
2734d7fad4c8SMichael S. Tsirkin 	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
273512e57169SMichael S. Tsirkin 	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
27360bde9569SAlex Williamson 
27370bde9569SAlex Williamson 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2738d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
27390bde9569SAlex Williamson 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
27408e586137SJiri Pirko 	return 0;
27410bde9569SAlex Williamson }
27420bde9569SAlex Williamson 
2743310974faSPeter Xu static void virtnet_clean_affinity(struct virtnet_info *vi)
2744986a4f4dSJason Wang {
2745986a4f4dSJason Wang 	int i;
27468898c21cSWanlong Gao 
27478898c21cSWanlong Gao 	if (vi->affinity_hint_set) {
27488898c21cSWanlong Gao 		for (i = 0; i < vi->max_queue_pairs; i++) {
274919e226e8SCaleb Raitto 			virtqueue_set_affinity(vi->rq[i].vq, NULL);
275019e226e8SCaleb Raitto 			virtqueue_set_affinity(vi->sq[i].vq, NULL);
27518898c21cSWanlong Gao 		}
27528898c21cSWanlong Gao 
27538898c21cSWanlong Gao 		vi->affinity_hint_set = false;
27548898c21cSWanlong Gao 	}
27558898c21cSWanlong Gao }
27568898c21cSWanlong Gao 
27578898c21cSWanlong Gao static void virtnet_set_affinity(struct virtnet_info *vi)
2758986a4f4dSJason Wang {
27592ca653d6SCaleb Raitto 	cpumask_var_t mask;
27602ca653d6SCaleb Raitto 	int stragglers;
27612ca653d6SCaleb Raitto 	int group_size;
27622ca653d6SCaleb Raitto 	int i, j, cpu;
27632ca653d6SCaleb Raitto 	int num_cpu;
27642ca653d6SCaleb Raitto 	int stride;
2765986a4f4dSJason Wang 
27662ca653d6SCaleb Raitto 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2767310974faSPeter Xu 		virtnet_clean_affinity(vi);
2768986a4f4dSJason Wang 		return;
2769986a4f4dSJason Wang 	}
2770986a4f4dSJason Wang 
27712ca653d6SCaleb Raitto 	num_cpu = num_online_cpus();
27722ca653d6SCaleb Raitto 	stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
27732ca653d6SCaleb Raitto 	stragglers = num_cpu >= vi->curr_queue_pairs ?
27742ca653d6SCaleb Raitto 			num_cpu % vi->curr_queue_pairs :
27752ca653d6SCaleb Raitto 			0;
27769b51d9d8SYury Norov 	cpu = cpumask_first(cpu_online_mask);
27774d99f660SAndrei Vagin 
27782ca653d6SCaleb Raitto 	for (i = 0; i < vi->curr_queue_pairs; i++) {
27792ca653d6SCaleb Raitto 		group_size = stride + (i < stragglers ? 1 : 0);
27802ca653d6SCaleb Raitto 
27812ca653d6SCaleb Raitto 		for (j = 0; j < group_size; j++) {
27822ca653d6SCaleb Raitto 			cpumask_set_cpu(cpu, mask);
27832ca653d6SCaleb Raitto 			cpu = cpumask_next_wrap(cpu, cpu_online_mask,
27842ca653d6SCaleb Raitto 						nr_cpu_ids, false);
27852ca653d6SCaleb Raitto 		}
27862ca653d6SCaleb Raitto 		virtqueue_set_affinity(vi->rq[i].vq, mask);
27872ca653d6SCaleb Raitto 		virtqueue_set_affinity(vi->sq[i].vq, mask);
2788044ab86dSAntoine Tenart 		__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
27892ca653d6SCaleb Raitto 		cpumask_clear(mask);
2790986a4f4dSJason Wang 	}
2791986a4f4dSJason Wang 
2792986a4f4dSJason Wang 	vi->affinity_hint_set = true;
27932ca653d6SCaleb Raitto 	free_cpumask_var(mask);
279447be2479SWanlong Gao }
2795986a4f4dSJason Wang 
27968017c279SSebastian Andrzej Siewior static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
27978de4b2f3SWanlong Gao {
27988017c279SSebastian Andrzej Siewior 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
27998017c279SSebastian Andrzej Siewior 						   node);
28008de4b2f3SWanlong Gao 	virtnet_set_affinity(vi);
28018017c279SSebastian Andrzej Siewior 	return 0;
28028de4b2f3SWanlong Gao }
28033ab098dfSJason Wang 
28048017c279SSebastian Andrzej Siewior static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
28058017c279SSebastian Andrzej Siewior {
28068017c279SSebastian Andrzej Siewior 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
28078017c279SSebastian Andrzej Siewior 						   node_dead);
28088017c279SSebastian Andrzej Siewior 	virtnet_set_affinity(vi);
28098017c279SSebastian Andrzej Siewior 	return 0;
28108017c279SSebastian Andrzej Siewior }
28118017c279SSebastian Andrzej Siewior 
28128017c279SSebastian Andrzej Siewior static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
28138017c279SSebastian Andrzej Siewior {
28148017c279SSebastian Andrzej Siewior 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
28158017c279SSebastian Andrzej Siewior 						   node);
28168017c279SSebastian Andrzej Siewior 
2817310974faSPeter Xu 	virtnet_clean_affinity(vi);
28188017c279SSebastian Andrzej Siewior 	return 0;
28198017c279SSebastian Andrzej Siewior }
28208017c279SSebastian Andrzej Siewior 
28218017c279SSebastian Andrzej Siewior static enum cpuhp_state virtionet_online;
28228017c279SSebastian Andrzej Siewior 
28238017c279SSebastian Andrzej Siewior static int virtnet_cpu_notif_add(struct virtnet_info *vi)
28248017c279SSebastian Andrzej Siewior {
28258017c279SSebastian Andrzej Siewior 	int ret;
28268017c279SSebastian Andrzej Siewior 
28278017c279SSebastian Andrzej Siewior 	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
28288017c279SSebastian Andrzej Siewior 	if (ret)
28298017c279SSebastian Andrzej Siewior 		return ret;
28308017c279SSebastian Andrzej Siewior 	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
28318017c279SSebastian Andrzej Siewior 					       &vi->node_dead);
28328017c279SSebastian Andrzej Siewior 	if (!ret)
28338017c279SSebastian Andrzej Siewior 		return ret;
28348017c279SSebastian Andrzej Siewior 	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
28358017c279SSebastian Andrzej Siewior 	return ret;
28368017c279SSebastian Andrzej Siewior }
28378017c279SSebastian Andrzej Siewior 
28388017c279SSebastian Andrzej Siewior static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
28398017c279SSebastian Andrzej Siewior {
28408017c279SSebastian Andrzej Siewior 	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
28418017c279SSebastian Andrzej Siewior 	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
28428017c279SSebastian Andrzej Siewior 					    &vi->node_dead);
2843a9ea3fc6SHerbert Xu }
2844a9ea3fc6SHerbert Xu 
28458f9f4668SRick Jones static void virtnet_get_ringparam(struct net_device *dev,
284674624944SHao Chen 				  struct ethtool_ringparam *ring,
284774624944SHao Chen 				  struct kernel_ethtool_ringparam *kernel_ring,
284874624944SHao Chen 				  struct netlink_ext_ack *extack)
28498f9f4668SRick Jones {
28508f9f4668SRick Jones 	struct virtnet_info *vi = netdev_priv(dev);
28518f9f4668SRick Jones 
28528597b5ddSXuan Zhuo 	ring->rx_max_pending = vi->rq[0].vq->num_max;
28538597b5ddSXuan Zhuo 	ring->tx_max_pending = vi->sq[0].vq->num_max;
28548597b5ddSXuan Zhuo 	ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
28558597b5ddSXuan Zhuo 	ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
28568f9f4668SRick Jones }
28578f9f4668SRick Jones 
2858a335b33fSXuan Zhuo static int virtnet_set_ringparam(struct net_device *dev,
2859a335b33fSXuan Zhuo 				 struct ethtool_ringparam *ring,
2860a335b33fSXuan Zhuo 				 struct kernel_ethtool_ringparam *kernel_ring,
2861a335b33fSXuan Zhuo 				 struct netlink_ext_ack *extack)
2862a335b33fSXuan Zhuo {
2863a335b33fSXuan Zhuo 	struct virtnet_info *vi = netdev_priv(dev);
2864a335b33fSXuan Zhuo 	u32 rx_pending, tx_pending;
2865a335b33fSXuan Zhuo 	struct receive_queue *rq;
2866a335b33fSXuan Zhuo 	struct send_queue *sq;
2867a335b33fSXuan Zhuo 	int i, err;
2868a335b33fSXuan Zhuo 
2869a335b33fSXuan Zhuo 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2870a335b33fSXuan Zhuo 		return -EINVAL;
2871a335b33fSXuan Zhuo 
2872a335b33fSXuan Zhuo 	rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2873a335b33fSXuan Zhuo 	tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2874a335b33fSXuan Zhuo 
2875a335b33fSXuan Zhuo 	if (ring->rx_pending == rx_pending &&
2876a335b33fSXuan Zhuo 	    ring->tx_pending == tx_pending)
2877a335b33fSXuan Zhuo 		return 0;
2878a335b33fSXuan Zhuo 
2879a335b33fSXuan Zhuo 	if (ring->rx_pending > vi->rq[0].vq->num_max)
2880a335b33fSXuan Zhuo 		return -EINVAL;
2881a335b33fSXuan Zhuo 
2882a335b33fSXuan Zhuo 	if (ring->tx_pending > vi->sq[0].vq->num_max)
2883a335b33fSXuan Zhuo 		return -EINVAL;
2884a335b33fSXuan Zhuo 
2885a335b33fSXuan Zhuo 	for (i = 0; i < vi->max_queue_pairs; i++) {
2886a335b33fSXuan Zhuo 		rq = vi->rq + i;
2887a335b33fSXuan Zhuo 		sq = vi->sq + i;
2888a335b33fSXuan Zhuo 
2889a335b33fSXuan Zhuo 		if (ring->tx_pending != tx_pending) {
2890a335b33fSXuan Zhuo 			err = virtnet_tx_resize(vi, sq, ring->tx_pending);
2891a335b33fSXuan Zhuo 			if (err)
2892a335b33fSXuan Zhuo 				return err;
2893a335b33fSXuan Zhuo 		}
2894a335b33fSXuan Zhuo 
2895a335b33fSXuan Zhuo 		if (ring->rx_pending != rx_pending) {
2896a335b33fSXuan Zhuo 			err = virtnet_rx_resize(vi, rq, ring->rx_pending);
2897a335b33fSXuan Zhuo 			if (err)
2898a335b33fSXuan Zhuo 				return err;
2899a335b33fSXuan Zhuo 		}
2900a335b33fSXuan Zhuo 	}
2901a335b33fSXuan Zhuo 
2902a335b33fSXuan Zhuo 	return 0;
2903a9ea3fc6SHerbert Xu }
2904a9ea3fc6SHerbert Xu 
2905c7114b12SAndrew Melnychenko static bool virtnet_commit_rss_command(struct virtnet_info *vi)
2906c7114b12SAndrew Melnychenko {
2907c7114b12SAndrew Melnychenko 	struct net_device *dev = vi->dev;
2908c7114b12SAndrew Melnychenko 	struct scatterlist sgs[4];
2909c7114b12SAndrew Melnychenko 	unsigned int sg_buf_size;
2910c7114b12SAndrew Melnychenko 
2911c7114b12SAndrew Melnychenko 	/* prepare sgs */
2912c7114b12SAndrew Melnychenko 	sg_init_table(sgs, 4);
2913c7114b12SAndrew Melnychenko 
2914c7114b12SAndrew Melnychenko 	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
2915c7114b12SAndrew Melnychenko 	sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
2916c7114b12SAndrew Melnychenko 
2917c7114b12SAndrew Melnychenko 	sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
2918c7114b12SAndrew Melnychenko 	sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
2919c7114b12SAndrew Melnychenko 
2920c7114b12SAndrew Melnychenko 	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
2921c7114b12SAndrew Melnychenko 			- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
2922c7114b12SAndrew Melnychenko 	sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
2923c7114b12SAndrew Melnychenko 
2924c7114b12SAndrew Melnychenko 	sg_buf_size = vi->rss_key_size;
2925c7114b12SAndrew Melnychenko 	sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
2926c7114b12SAndrew Melnychenko 
2927c7114b12SAndrew Melnychenko 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
292891f41f01SAndrew Melnychenko 				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
292991f41f01SAndrew Melnychenko 				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
2930c7114b12SAndrew Melnychenko 		dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
2931c7114b12SAndrew Melnychenko 		return false;
2932c7114b12SAndrew Melnychenko 	}
2933c7114b12SAndrew Melnychenko 	return true;
2934c7114b12SAndrew Melnychenko }
2935c7114b12SAndrew Melnychenko 
2936c7114b12SAndrew Melnychenko static void virtnet_init_default_rss(struct virtnet_info *vi)
2937c7114b12SAndrew Melnychenko {
2938c7114b12SAndrew Melnychenko 	u32 indir_val = 0;
2939c7114b12SAndrew Melnychenko 	int i = 0;
2940c7114b12SAndrew Melnychenko 
2941c7114b12SAndrew Melnychenko 	vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
2942c1170820SAndrew Melnychenko 	vi->rss_hash_types_saved = vi->rss_hash_types_supported;
2943c7114b12SAndrew Melnychenko 	vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
2944c7114b12SAndrew Melnychenko 						? vi->rss_indir_table_size - 1 : 0;
2945c7114b12SAndrew Melnychenko 	vi->ctrl->rss.unclassified_queue = 0;
2946c7114b12SAndrew Melnychenko 
2947c7114b12SAndrew Melnychenko 	for (; i < vi->rss_indir_table_size; ++i) {
2948c7114b12SAndrew Melnychenko 		indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
2949c7114b12SAndrew Melnychenko 		vi->ctrl->rss.indirection_table[i] = indir_val;
2950c7114b12SAndrew Melnychenko 	}
2951c7114b12SAndrew Melnychenko 
29522c507ce9SHawkins Jiawei 	vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
2953c7114b12SAndrew Melnychenko 	vi->ctrl->rss.hash_key_length = vi->rss_key_size;
2954c7114b12SAndrew Melnychenko 
2955c7114b12SAndrew Melnychenko 	netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
2956c7114b12SAndrew Melnychenko }
2957c7114b12SAndrew Melnychenko 
2958c1170820SAndrew Melnychenko static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
2959c1170820SAndrew Melnychenko {
2960c1170820SAndrew Melnychenko 	info->data = 0;
2961c1170820SAndrew Melnychenko 	switch (info->flow_type) {
2962c1170820SAndrew Melnychenko 	case TCP_V4_FLOW:
2963c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
2964c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST |
2965c1170820SAndrew Melnychenko 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2966c1170820SAndrew Melnychenko 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
2967c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
2968c1170820SAndrew Melnychenko 		}
2969c1170820SAndrew Melnychenko 		break;
2970c1170820SAndrew Melnychenko 	case TCP_V6_FLOW:
2971c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
2972c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST |
2973c1170820SAndrew Melnychenko 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2974c1170820SAndrew Melnychenko 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
2975c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
2976c1170820SAndrew Melnychenko 		}
2977c1170820SAndrew Melnychenko 		break;
2978c1170820SAndrew Melnychenko 	case UDP_V4_FLOW:
2979c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
2980c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST |
2981c1170820SAndrew Melnychenko 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2982c1170820SAndrew Melnychenko 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
2983c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
2984c1170820SAndrew Melnychenko 		}
2985c1170820SAndrew Melnychenko 		break;
2986c1170820SAndrew Melnychenko 	case UDP_V6_FLOW:
2987c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
2988c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST |
2989c1170820SAndrew Melnychenko 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2990c1170820SAndrew Melnychenko 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
2991c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
2992c1170820SAndrew Melnychenko 		}
2993c1170820SAndrew Melnychenko 		break;
2994c1170820SAndrew Melnychenko 	case IPV4_FLOW:
2995c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
2996c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
2997c1170820SAndrew Melnychenko 
2998c1170820SAndrew Melnychenko 		break;
2999c1170820SAndrew Melnychenko 	case IPV6_FLOW:
3000c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3001c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
3002c1170820SAndrew Melnychenko 
3003c1170820SAndrew Melnychenko 		break;
3004c1170820SAndrew Melnychenko 	default:
3005c1170820SAndrew Melnychenko 		info->data = 0;
3006c1170820SAndrew Melnychenko 		break;
3007c1170820SAndrew Melnychenko 	}
3008c1170820SAndrew Melnychenko }
3009c1170820SAndrew Melnychenko 
3010c1170820SAndrew Melnychenko static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3011c1170820SAndrew Melnychenko {
3012c1170820SAndrew Melnychenko 	u32 new_hashtypes = vi->rss_hash_types_saved;
3013c1170820SAndrew Melnychenko 	bool is_disable = info->data & RXH_DISCARD;
3014c1170820SAndrew Melnychenko 	bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
3015c1170820SAndrew Melnychenko 
3016c1170820SAndrew Melnychenko 	/* supports only 'sd', 'sdfn' and 'r' */
3017c1170820SAndrew Melnychenko 	if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
3018c1170820SAndrew Melnychenko 		return false;
3019c1170820SAndrew Melnychenko 
3020c1170820SAndrew Melnychenko 	switch (info->flow_type) {
3021c1170820SAndrew Melnychenko 	case TCP_V4_FLOW:
3022c1170820SAndrew Melnychenko 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
3023c1170820SAndrew Melnychenko 		if (!is_disable)
3024c1170820SAndrew Melnychenko 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3025c1170820SAndrew Melnychenko 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
3026c1170820SAndrew Melnychenko 		break;
3027c1170820SAndrew Melnychenko 	case UDP_V4_FLOW:
3028c1170820SAndrew Melnychenko 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
3029c1170820SAndrew Melnychenko 		if (!is_disable)
3030c1170820SAndrew Melnychenko 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3031c1170820SAndrew Melnychenko 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
3032c1170820SAndrew Melnychenko 		break;
3033c1170820SAndrew Melnychenko 	case IPV4_FLOW:
3034c1170820SAndrew Melnychenko 		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3035c1170820SAndrew Melnychenko 		if (!is_disable)
3036c1170820SAndrew Melnychenko 			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3037c1170820SAndrew Melnychenko 		break;
3038c1170820SAndrew Melnychenko 	case TCP_V6_FLOW:
3039c1170820SAndrew Melnychenko 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
3040c1170820SAndrew Melnychenko 		if (!is_disable)
3041c1170820SAndrew Melnychenko 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3042c1170820SAndrew Melnychenko 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
3043c1170820SAndrew Melnychenko 		break;
3044c1170820SAndrew Melnychenko 	case UDP_V6_FLOW:
3045c1170820SAndrew Melnychenko 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
3046c1170820SAndrew Melnychenko 		if (!is_disable)
3047c1170820SAndrew Melnychenko 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3048c1170820SAndrew Melnychenko 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
3049c1170820SAndrew Melnychenko 		break;
3050c1170820SAndrew Melnychenko 	case IPV6_FLOW:
3051c1170820SAndrew Melnychenko 		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3052c1170820SAndrew Melnychenko 		if (!is_disable)
3053c1170820SAndrew Melnychenko 			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3054c1170820SAndrew Melnychenko 		break;
3055c1170820SAndrew Melnychenko 	default:
3056c1170820SAndrew Melnychenko 		/* unsupported flow */
3057c1170820SAndrew Melnychenko 		return false;
3058c1170820SAndrew Melnychenko 	}
3059c1170820SAndrew Melnychenko 
3060c1170820SAndrew Melnychenko 	/* if unsupported hashtype was set */
3061c1170820SAndrew Melnychenko 	if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3062c1170820SAndrew Melnychenko 		return false;
3063c1170820SAndrew Melnychenko 
3064c1170820SAndrew Melnychenko 	if (new_hashtypes != vi->rss_hash_types_saved) {
3065c1170820SAndrew Melnychenko 		vi->rss_hash_types_saved = new_hashtypes;
3066c1170820SAndrew Melnychenko 		vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3067c1170820SAndrew Melnychenko 		if (vi->dev->features & NETIF_F_RXHASH)
3068c1170820SAndrew Melnychenko 			return virtnet_commit_rss_command(vi);
3069c1170820SAndrew Melnychenko 	}
3070c1170820SAndrew Melnychenko 
3071c1170820SAndrew Melnychenko 	return true;
3072c1170820SAndrew Melnychenko }
307366846048SRick Jones 
307466846048SRick Jones static void virtnet_get_drvinfo(struct net_device *dev,
307566846048SRick Jones 				struct ethtool_drvinfo *info)
307666846048SRick Jones {
307766846048SRick Jones 	struct virtnet_info *vi = netdev_priv(dev);
307866846048SRick Jones 	struct virtio_device *vdev = vi->vdev;
307966846048SRick Jones 
3080fb3ceec1SWolfram Sang 	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
3081fb3ceec1SWolfram Sang 	strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
3082fb3ceec1SWolfram Sang 	strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
308366846048SRick Jones 
308466846048SRick Jones }
308566846048SRick Jones 
3086d73bcd2cSJason Wang /* TODO: Eliminate OOO packets during switching */
3087d73bcd2cSJason Wang static int virtnet_set_channels(struct net_device *dev,
3088d73bcd2cSJason Wang 				struct ethtool_channels *channels)
3089d73bcd2cSJason Wang {
3090d73bcd2cSJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
3091d73bcd2cSJason Wang 	u16 queue_pairs = channels->combined_count;
3092d73bcd2cSJason Wang 	int err;
3093d73bcd2cSJason Wang 
3094d73bcd2cSJason Wang 	/* We don't support separate rx/tx channels.
3095d73bcd2cSJason Wang 	 * We don't allow setting 'other' channels.
3096d73bcd2cSJason Wang 	 */
3097d73bcd2cSJason Wang 	if (channels->rx_count || channels->tx_count || channels->other_count)
3098d73bcd2cSJason Wang 		return -EINVAL;
3099d73bcd2cSJason Wang 
3100c18e9cd6SAmos Kong 	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3101d73bcd2cSJason Wang 		return -EINVAL;
3102d73bcd2cSJason Wang 
3103f600b690SJohn Fastabend 	/* For now we don't support modifying channels while XDP is loaded
3104f600b690SJohn Fastabend 	 * also when XDP is loaded all RX queues have XDP programs so we only
3105f600b690SJohn Fastabend 	 * need to check a single RX queue.
3106f600b690SJohn Fastabend 	 */
3107f600b690SJohn Fastabend 	if (vi->rq[0].xdp_prog)
3108f600b690SJohn Fastabend 		return -EINVAL;
3109f600b690SJohn Fastabend 
3110a0d1d0f4SSebastian Andrzej Siewior 	cpus_read_lock();
311147315329SJohn Fastabend 	err = _virtnet_set_queues(vi, queue_pairs);
3112de33212fSJeff Dike 	if (err) {
3113a0d1d0f4SSebastian Andrzej Siewior 		cpus_read_unlock();
3114de33212fSJeff Dike 		goto err;
3115d73bcd2cSJason Wang 	}
3116de33212fSJeff Dike 	virtnet_set_affinity(vi);
3117a0d1d0f4SSebastian Andrzej Siewior 	cpus_read_unlock();
3118d73bcd2cSJason Wang 
3119de33212fSJeff Dike 	netif_set_real_num_tx_queues(dev, queue_pairs);
3120de33212fSJeff Dike 	netif_set_real_num_rx_queues(dev, queue_pairs);
3121de33212fSJeff Dike  err:
3122d73bcd2cSJason Wang 	return err;
3123d73bcd2cSJason Wang }
3124d73bcd2cSJason Wang 
3125d7dfc5cfSToshiaki Makita static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3126d7dfc5cfSToshiaki Makita {
3127d7dfc5cfSToshiaki Makita 	struct virtnet_info *vi = netdev_priv(dev);
3128d7dfc5cfSToshiaki Makita 	unsigned int i, j;
3129d7a9a01bSAlexander Duyck 	u8 *p = data;
3130d7dfc5cfSToshiaki Makita 
3131d7dfc5cfSToshiaki Makita 	switch (stringset) {
3132d7dfc5cfSToshiaki Makita 	case ETH_SS_STATS:
3133d7dfc5cfSToshiaki Makita 		for (i = 0; i < vi->curr_queue_pairs; i++) {
3134d7a9a01bSAlexander Duyck 			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
3135d7a9a01bSAlexander Duyck 				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
3136d7a9a01bSAlexander Duyck 						virtnet_rq_stats_desc[j].desc);
3137d7dfc5cfSToshiaki Makita 		}
3138d7dfc5cfSToshiaki Makita 
3139d7dfc5cfSToshiaki Makita 		for (i = 0; i < vi->curr_queue_pairs; i++) {
3140d7a9a01bSAlexander Duyck 			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
3141d7a9a01bSAlexander Duyck 				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
3142d7a9a01bSAlexander Duyck 						virtnet_sq_stats_desc[j].desc);
3143d7dfc5cfSToshiaki Makita 		}
3144d7dfc5cfSToshiaki Makita 		break;
3145d7dfc5cfSToshiaki Makita 	}
3146d7dfc5cfSToshiaki Makita }
3147d7dfc5cfSToshiaki Makita 
3148d7dfc5cfSToshiaki Makita static int virtnet_get_sset_count(struct net_device *dev, int sset)
3149d7dfc5cfSToshiaki Makita {
3150d7dfc5cfSToshiaki Makita 	struct virtnet_info *vi = netdev_priv(dev);
3151d7dfc5cfSToshiaki Makita 
3152d7dfc5cfSToshiaki Makita 	switch (sset) {
3153d7dfc5cfSToshiaki Makita 	case ETH_SS_STATS:
3154d7dfc5cfSToshiaki Makita 		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
3155d7dfc5cfSToshiaki Makita 					       VIRTNET_SQ_STATS_LEN);
3156d7dfc5cfSToshiaki Makita 	default:
3157d7dfc5cfSToshiaki Makita 		return -EOPNOTSUPP;
3158d7dfc5cfSToshiaki Makita 	}
3159d7dfc5cfSToshiaki Makita }
3160d7dfc5cfSToshiaki Makita 
3161d7dfc5cfSToshiaki Makita static void virtnet_get_ethtool_stats(struct net_device *dev,
3162d7dfc5cfSToshiaki Makita 				      struct ethtool_stats *stats, u64 *data)
3163d7dfc5cfSToshiaki Makita {
3164d7dfc5cfSToshiaki Makita 	struct virtnet_info *vi = netdev_priv(dev);
3165d7dfc5cfSToshiaki Makita 	unsigned int idx = 0, start, i, j;
3166d7dfc5cfSToshiaki Makita 	const u8 *stats_base;
3167d7dfc5cfSToshiaki Makita 	size_t offset;
3168d7dfc5cfSToshiaki Makita 
3169d7dfc5cfSToshiaki Makita 	for (i = 0; i < vi->curr_queue_pairs; i++) {
3170d7dfc5cfSToshiaki Makita 		struct receive_queue *rq = &vi->rq[i];
3171d7dfc5cfSToshiaki Makita 
3172d46eeeafSJason Wang 		stats_base = (u8 *)&rq->stats;
3173d7dfc5cfSToshiaki Makita 		do {
3174068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&rq->stats.syncp);
3175d7dfc5cfSToshiaki Makita 			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
3176d7dfc5cfSToshiaki Makita 				offset = virtnet_rq_stats_desc[j].offset;
3177d7dfc5cfSToshiaki Makita 				data[idx + j] = *(u64 *)(stats_base + offset);
3178d7dfc5cfSToshiaki Makita 			}
3179068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3180d7dfc5cfSToshiaki Makita 		idx += VIRTNET_RQ_STATS_LEN;
3181d7dfc5cfSToshiaki Makita 	}
3182d7dfc5cfSToshiaki Makita 
3183d7dfc5cfSToshiaki Makita 	for (i = 0; i < vi->curr_queue_pairs; i++) {
3184d7dfc5cfSToshiaki Makita 		struct send_queue *sq = &vi->sq[i];
3185d7dfc5cfSToshiaki Makita 
3186d7dfc5cfSToshiaki Makita 		stats_base = (u8 *)&sq->stats;
3187d7dfc5cfSToshiaki Makita 		do {
3188068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&sq->stats.syncp);
3189d7dfc5cfSToshiaki Makita 			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
3190d7dfc5cfSToshiaki Makita 				offset = virtnet_sq_stats_desc[j].offset;
3191d7dfc5cfSToshiaki Makita 				data[idx + j] = *(u64 *)(stats_base + offset);
3192d7dfc5cfSToshiaki Makita 			}
3193068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3194d7dfc5cfSToshiaki Makita 		idx += VIRTNET_SQ_STATS_LEN;
3195d7dfc5cfSToshiaki Makita 	}
3196d7dfc5cfSToshiaki Makita }
3197d7dfc5cfSToshiaki Makita 
3198d73bcd2cSJason Wang static void virtnet_get_channels(struct net_device *dev,
3199d73bcd2cSJason Wang 				 struct ethtool_channels *channels)
3200d73bcd2cSJason Wang {
3201d73bcd2cSJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
3202d73bcd2cSJason Wang 
3203d73bcd2cSJason Wang 	channels->combined_count = vi->curr_queue_pairs;
3204d73bcd2cSJason Wang 	channels->max_combined = vi->max_queue_pairs;
3205d73bcd2cSJason Wang 	channels->max_other = 0;
3206d73bcd2cSJason Wang 	channels->rx_count = 0;
3207d73bcd2cSJason Wang 	channels->tx_count = 0;
3208d73bcd2cSJason Wang 	channels->other_count = 0;
3209d73bcd2cSJason Wang }
3210d73bcd2cSJason Wang 
3211ebb6b4b1SPhilippe Reynes static int virtnet_set_link_ksettings(struct net_device *dev,
3212ebb6b4b1SPhilippe Reynes 				      const struct ethtool_link_ksettings *cmd)
321316032be5SNikolay Aleksandrov {
321416032be5SNikolay Aleksandrov 	struct virtnet_info *vi = netdev_priv(dev);
321516032be5SNikolay Aleksandrov 
32169aedc6e2SCris Forno 	return ethtool_virtdev_set_link_ksettings(dev, cmd,
32179aedc6e2SCris Forno 						  &vi->speed, &vi->duplex);
321816032be5SNikolay Aleksandrov }
321916032be5SNikolay Aleksandrov 
3220ebb6b4b1SPhilippe Reynes static int virtnet_get_link_ksettings(struct net_device *dev,
3221ebb6b4b1SPhilippe Reynes 				      struct ethtool_link_ksettings *cmd)
322216032be5SNikolay Aleksandrov {
322316032be5SNikolay Aleksandrov 	struct virtnet_info *vi = netdev_priv(dev);
322416032be5SNikolay Aleksandrov 
3225ebb6b4b1SPhilippe Reynes 	cmd->base.speed = vi->speed;
3226ebb6b4b1SPhilippe Reynes 	cmd->base.duplex = vi->duplex;
3227ebb6b4b1SPhilippe Reynes 	cmd->base.port = PORT_OTHER;
322816032be5SNikolay Aleksandrov 
322916032be5SNikolay Aleksandrov 	return 0;
323016032be5SNikolay Aleksandrov }
323116032be5SNikolay Aleksandrov 
3232699b045aSAlvaro Karsz static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3233699b045aSAlvaro Karsz 				       struct ethtool_coalesce *ec)
3234699b045aSAlvaro Karsz {
3235699b045aSAlvaro Karsz 	struct scatterlist sgs_tx, sgs_rx;
3236*829cce76SHeng Qi 	int i;
3237699b045aSAlvaro Karsz 
3238accc1bf2SBrett Creeley 	vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3239accc1bf2SBrett Creeley 	vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3240accc1bf2SBrett Creeley 	sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
3241699b045aSAlvaro Karsz 
3242699b045aSAlvaro Karsz 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3243699b045aSAlvaro Karsz 				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
3244699b045aSAlvaro Karsz 				  &sgs_tx))
3245699b045aSAlvaro Karsz 		return -EINVAL;
3246699b045aSAlvaro Karsz 
3247699b045aSAlvaro Karsz 	/* Save parameters */
3248308d7982SGavin Li 	vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
3249308d7982SGavin Li 	vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
3250*829cce76SHeng Qi 	for (i = 0; i < vi->max_queue_pairs; i++) {
3251*829cce76SHeng Qi 		vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3252*829cce76SHeng Qi 		vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3253*829cce76SHeng Qi 	}
3254699b045aSAlvaro Karsz 
3255accc1bf2SBrett Creeley 	vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3256accc1bf2SBrett Creeley 	vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3257accc1bf2SBrett Creeley 	sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3258699b045aSAlvaro Karsz 
3259699b045aSAlvaro Karsz 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3260699b045aSAlvaro Karsz 				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
3261699b045aSAlvaro Karsz 				  &sgs_rx))
3262699b045aSAlvaro Karsz 		return -EINVAL;
3263699b045aSAlvaro Karsz 
3264699b045aSAlvaro Karsz 	/* Save parameters */
3265308d7982SGavin Li 	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
3266308d7982SGavin Li 	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
3267*829cce76SHeng Qi 	for (i = 0; i < vi->max_queue_pairs; i++) {
3268*829cce76SHeng Qi 		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3269*829cce76SHeng Qi 		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3270*829cce76SHeng Qi 	}
3271699b045aSAlvaro Karsz 
3272699b045aSAlvaro Karsz 	return 0;
3273699b045aSAlvaro Karsz }
3274699b045aSAlvaro Karsz 
3275394bd877SGavin Li static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3276394bd877SGavin Li 					 u16 vqn, u32 max_usecs, u32 max_packets)
3277394bd877SGavin Li {
3278394bd877SGavin Li 	struct scatterlist sgs;
3279394bd877SGavin Li 
3280394bd877SGavin Li 	vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
3281394bd877SGavin Li 	vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
3282394bd877SGavin Li 	vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
3283394bd877SGavin Li 	sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
3284394bd877SGavin Li 
3285394bd877SGavin Li 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3286394bd877SGavin Li 				  VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
3287394bd877SGavin Li 				  &sgs))
3288394bd877SGavin Li 		return -EINVAL;
3289394bd877SGavin Li 
3290394bd877SGavin Li 	return 0;
3291394bd877SGavin Li }
3292394bd877SGavin Li 
3293394bd877SGavin Li static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3294394bd877SGavin Li 					  struct ethtool_coalesce *ec,
3295394bd877SGavin Li 					  u16 queue)
3296394bd877SGavin Li {
3297394bd877SGavin Li 	int err;
3298394bd877SGavin Li 
3299394bd877SGavin Li 	if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) {
3300394bd877SGavin Li 		err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
3301394bd877SGavin Li 						    ec->rx_coalesce_usecs,
3302394bd877SGavin Li 						    ec->rx_max_coalesced_frames);
3303394bd877SGavin Li 		if (err)
3304394bd877SGavin Li 			return err;
3305394bd877SGavin Li 		/* Save parameters */
3306394bd877SGavin Li 		vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3307394bd877SGavin Li 		vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3308394bd877SGavin Li 	}
3309394bd877SGavin Li 
3310394bd877SGavin Li 	if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) {
3311394bd877SGavin Li 		err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
3312394bd877SGavin Li 						    ec->tx_coalesce_usecs,
3313394bd877SGavin Li 						    ec->tx_max_coalesced_frames);
3314394bd877SGavin Li 		if (err)
3315394bd877SGavin Li 			return err;
3316394bd877SGavin Li 		/* Save parameters */
3317394bd877SGavin Li 		vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3318394bd877SGavin Li 		vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3319394bd877SGavin Li 	}
3320394bd877SGavin Li 
3321394bd877SGavin Li 	return 0;
3322394bd877SGavin Li }
3323394bd877SGavin Li 
3324699b045aSAlvaro Karsz static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
3325699b045aSAlvaro Karsz {
3326699b045aSAlvaro Karsz 	/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3327699b045aSAlvaro Karsz 	 * feature is negotiated.
3328699b045aSAlvaro Karsz 	 */
3329699b045aSAlvaro Karsz 	if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
3330699b045aSAlvaro Karsz 		return -EOPNOTSUPP;
3331699b045aSAlvaro Karsz 
3332699b045aSAlvaro Karsz 	if (ec->tx_max_coalesced_frames > 1 ||
3333699b045aSAlvaro Karsz 	    ec->rx_max_coalesced_frames != 1)
3334699b045aSAlvaro Karsz 		return -EINVAL;
3335699b045aSAlvaro Karsz 
3336699b045aSAlvaro Karsz 	return 0;
3337699b045aSAlvaro Karsz }
3338699b045aSAlvaro Karsz 
3339394bd877SGavin Li static int virtnet_should_update_vq_weight(int dev_flags, int weight,
3340394bd877SGavin Li 					   int vq_weight, bool *should_update)
3341394bd877SGavin Li {
3342394bd877SGavin Li 	if (weight ^ vq_weight) {
3343394bd877SGavin Li 		if (dev_flags & IFF_UP)
3344394bd877SGavin Li 			return -EBUSY;
3345394bd877SGavin Li 		*should_update = true;
3346394bd877SGavin Li 	}
3347394bd877SGavin Li 
3348394bd877SGavin Li 	return 0;
3349394bd877SGavin Li }
3350394bd877SGavin Li 
33510c465be1SJason Wang static int virtnet_set_coalesce(struct net_device *dev,
3352f3ccfda1SYufeng Mo 				struct ethtool_coalesce *ec,
3353f3ccfda1SYufeng Mo 				struct kernel_ethtool_coalesce *kernel_coal,
3354f3ccfda1SYufeng Mo 				struct netlink_ext_ack *extack)
33550c465be1SJason Wang {
33560c465be1SJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
3357394bd877SGavin Li 	int ret, queue_number, napi_weight;
3358699b045aSAlvaro Karsz 	bool update_napi = false;
33590c465be1SJason Wang 
3360699b045aSAlvaro Karsz 	/* Can't change NAPI weight if the link is up */
33610c465be1SJason Wang 	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3362394bd877SGavin Li 	for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
3363394bd877SGavin Li 		ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3364394bd877SGavin Li 						      vi->sq[queue_number].napi.weight,
3365394bd877SGavin Li 						      &update_napi);
3366394bd877SGavin Li 		if (ret)
3367394bd877SGavin Li 			return ret;
3368394bd877SGavin Li 
3369394bd877SGavin Li 		if (update_napi) {
3370394bd877SGavin Li 			/* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3371394bd877SGavin Li 			 * updated for the sake of simplicity, which might not be necessary
3372394bd877SGavin Li 			 */
3373394bd877SGavin Li 			break;
3374394bd877SGavin Li 		}
3375699b045aSAlvaro Karsz 	}
3376699b045aSAlvaro Karsz 
3377699b045aSAlvaro Karsz 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3378699b045aSAlvaro Karsz 		ret = virtnet_send_notf_coal_cmds(vi, ec);
3379699b045aSAlvaro Karsz 	else
3380699b045aSAlvaro Karsz 		ret = virtnet_coal_params_supported(ec);
3381699b045aSAlvaro Karsz 
3382699b045aSAlvaro Karsz 	if (ret)
3383699b045aSAlvaro Karsz 		return ret;
3384699b045aSAlvaro Karsz 
3385699b045aSAlvaro Karsz 	if (update_napi) {
3386394bd877SGavin Li 		for (; queue_number < vi->max_queue_pairs; queue_number++)
3387394bd877SGavin Li 			vi->sq[queue_number].napi.weight = napi_weight;
33880c465be1SJason Wang 	}
33890c465be1SJason Wang 
3390699b045aSAlvaro Karsz 	return ret;
33910c465be1SJason Wang }
33920c465be1SJason Wang 
33930c465be1SJason Wang static int virtnet_get_coalesce(struct net_device *dev,
3394f3ccfda1SYufeng Mo 				struct ethtool_coalesce *ec,
3395f3ccfda1SYufeng Mo 				struct kernel_ethtool_coalesce *kernel_coal,
3396f3ccfda1SYufeng Mo 				struct netlink_ext_ack *extack)
33970c465be1SJason Wang {
33980c465be1SJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
33990c465be1SJason Wang 
3400699b045aSAlvaro Karsz 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3401308d7982SGavin Li 		ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
3402308d7982SGavin Li 		ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
3403308d7982SGavin Li 		ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
3404308d7982SGavin Li 		ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3405699b045aSAlvaro Karsz 	} else {
3406699b045aSAlvaro Karsz 		ec->rx_max_coalesced_frames = 1;
34070c465be1SJason Wang 
34080c465be1SJason Wang 		if (vi->sq[0].napi.weight)
34090c465be1SJason Wang 			ec->tx_max_coalesced_frames = 1;
3410699b045aSAlvaro Karsz 	}
34110c465be1SJason Wang 
34120c465be1SJason Wang 	return 0;
34130c465be1SJason Wang }
34140c465be1SJason Wang 
3415394bd877SGavin Li static int virtnet_set_per_queue_coalesce(struct net_device *dev,
3416394bd877SGavin Li 					  u32 queue,
3417394bd877SGavin Li 					  struct ethtool_coalesce *ec)
3418394bd877SGavin Li {
3419394bd877SGavin Li 	struct virtnet_info *vi = netdev_priv(dev);
3420394bd877SGavin Li 	int ret, napi_weight;
3421394bd877SGavin Li 	bool update_napi = false;
3422394bd877SGavin Li 
3423394bd877SGavin Li 	if (queue >= vi->max_queue_pairs)
3424394bd877SGavin Li 		return -EINVAL;
3425394bd877SGavin Li 
3426394bd877SGavin Li 	/* Can't change NAPI weight if the link is up */
3427394bd877SGavin Li 	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3428394bd877SGavin Li 	ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3429394bd877SGavin Li 					      vi->sq[queue].napi.weight,
3430394bd877SGavin Li 					      &update_napi);
3431394bd877SGavin Li 	if (ret)
3432394bd877SGavin Li 		return ret;
3433394bd877SGavin Li 
3434394bd877SGavin Li 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3435394bd877SGavin Li 		ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
3436394bd877SGavin Li 	else
3437394bd877SGavin Li 		ret = virtnet_coal_params_supported(ec);
3438394bd877SGavin Li 
3439394bd877SGavin Li 	if (ret)
3440394bd877SGavin Li 		return ret;
3441394bd877SGavin Li 
3442394bd877SGavin Li 	if (update_napi)
3443394bd877SGavin Li 		vi->sq[queue].napi.weight = napi_weight;
3444394bd877SGavin Li 
3445394bd877SGavin Li 	return 0;
3446394bd877SGavin Li }
3447394bd877SGavin Li 
3448394bd877SGavin Li static int virtnet_get_per_queue_coalesce(struct net_device *dev,
3449394bd877SGavin Li 					  u32 queue,
3450394bd877SGavin Li 					  struct ethtool_coalesce *ec)
3451394bd877SGavin Li {
3452394bd877SGavin Li 	struct virtnet_info *vi = netdev_priv(dev);
3453394bd877SGavin Li 
3454394bd877SGavin Li 	if (queue >= vi->max_queue_pairs)
3455394bd877SGavin Li 		return -EINVAL;
3456394bd877SGavin Li 
3457394bd877SGavin Li 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
3458394bd877SGavin Li 		ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
3459394bd877SGavin Li 		ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
3460394bd877SGavin Li 		ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
3461394bd877SGavin Li 		ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3462394bd877SGavin Li 	} else {
3463394bd877SGavin Li 		ec->rx_max_coalesced_frames = 1;
3464394bd877SGavin Li 
346584a056f7SHeng Qi 		if (vi->sq[queue].napi.weight)
3466394bd877SGavin Li 			ec->tx_max_coalesced_frames = 1;
3467394bd877SGavin Li 	}
3468394bd877SGavin Li 
3469394bd877SGavin Li 	return 0;
3470394bd877SGavin Li }
3471394bd877SGavin Li 
347216032be5SNikolay Aleksandrov static void virtnet_init_settings(struct net_device *dev)
347316032be5SNikolay Aleksandrov {
347416032be5SNikolay Aleksandrov 	struct virtnet_info *vi = netdev_priv(dev);
347516032be5SNikolay Aleksandrov 
347616032be5SNikolay Aleksandrov 	vi->speed = SPEED_UNKNOWN;
347716032be5SNikolay Aleksandrov 	vi->duplex = DUPLEX_UNKNOWN;
347816032be5SNikolay Aleksandrov }
347916032be5SNikolay Aleksandrov 
3480faa9b39fSJason Baron static void virtnet_update_settings(struct virtnet_info *vi)
3481faa9b39fSJason Baron {
3482faa9b39fSJason Baron 	u32 speed;
3483faa9b39fSJason Baron 	u8 duplex;
3484faa9b39fSJason Baron 
3485faa9b39fSJason Baron 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3486faa9b39fSJason Baron 		return;
3487faa9b39fSJason Baron 
348864ffa39dSMichael S. Tsirkin 	virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
348964ffa39dSMichael S. Tsirkin 
3490faa9b39fSJason Baron 	if (ethtool_validate_speed(speed))
3491faa9b39fSJason Baron 		vi->speed = speed;
349264ffa39dSMichael S. Tsirkin 
349364ffa39dSMichael S. Tsirkin 	virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
349464ffa39dSMichael S. Tsirkin 
3495faa9b39fSJason Baron 	if (ethtool_validate_duplex(duplex))
3496faa9b39fSJason Baron 		vi->duplex = duplex;
3497faa9b39fSJason Baron }
3498faa9b39fSJason Baron 
3499c7114b12SAndrew Melnychenko static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3500c7114b12SAndrew Melnychenko {
3501c7114b12SAndrew Melnychenko 	return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3502c7114b12SAndrew Melnychenko }
3503c7114b12SAndrew Melnychenko 
3504c7114b12SAndrew Melnychenko static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3505c7114b12SAndrew Melnychenko {
3506c7114b12SAndrew Melnychenko 	return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3507c7114b12SAndrew Melnychenko }
3508c7114b12SAndrew Melnychenko 
3509c7114b12SAndrew Melnychenko static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
3510c7114b12SAndrew Melnychenko {
3511c7114b12SAndrew Melnychenko 	struct virtnet_info *vi = netdev_priv(dev);
3512c7114b12SAndrew Melnychenko 	int i;
3513c7114b12SAndrew Melnychenko 
3514c7114b12SAndrew Melnychenko 	if (indir) {
3515c7114b12SAndrew Melnychenko 		for (i = 0; i < vi->rss_indir_table_size; ++i)
3516c7114b12SAndrew Melnychenko 			indir[i] = vi->ctrl->rss.indirection_table[i];
3517c7114b12SAndrew Melnychenko 	}
3518c7114b12SAndrew Melnychenko 
3519c7114b12SAndrew Melnychenko 	if (key)
3520c7114b12SAndrew Melnychenko 		memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
3521c7114b12SAndrew Melnychenko 
3522c7114b12SAndrew Melnychenko 	if (hfunc)
3523c7114b12SAndrew Melnychenko 		*hfunc = ETH_RSS_HASH_TOP;
3524c7114b12SAndrew Melnychenko 
3525c7114b12SAndrew Melnychenko 	return 0;
3526c7114b12SAndrew Melnychenko }
3527c7114b12SAndrew Melnychenko 
3528c7114b12SAndrew Melnychenko static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
3529c7114b12SAndrew Melnychenko {
3530c7114b12SAndrew Melnychenko 	struct virtnet_info *vi = netdev_priv(dev);
3531c7114b12SAndrew Melnychenko 	int i;
3532c7114b12SAndrew Melnychenko 
3533c7114b12SAndrew Melnychenko 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
3534c7114b12SAndrew Melnychenko 		return -EOPNOTSUPP;
3535c7114b12SAndrew Melnychenko 
3536c7114b12SAndrew Melnychenko 	if (indir) {
3537c7114b12SAndrew Melnychenko 		for (i = 0; i < vi->rss_indir_table_size; ++i)
3538c7114b12SAndrew Melnychenko 			vi->ctrl->rss.indirection_table[i] = indir[i];
3539c7114b12SAndrew Melnychenko 	}
3540c7114b12SAndrew Melnychenko 	if (key)
3541c7114b12SAndrew Melnychenko 		memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
3542c7114b12SAndrew Melnychenko 
3543c7114b12SAndrew Melnychenko 	virtnet_commit_rss_command(vi);
3544c7114b12SAndrew Melnychenko 
3545c7114b12SAndrew Melnychenko 	return 0;
3546c7114b12SAndrew Melnychenko }
3547c7114b12SAndrew Melnychenko 
3548c7114b12SAndrew Melnychenko static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3549c7114b12SAndrew Melnychenko {
3550c7114b12SAndrew Melnychenko 	struct virtnet_info *vi = netdev_priv(dev);
3551c7114b12SAndrew Melnychenko 	int rc = 0;
3552c7114b12SAndrew Melnychenko 
3553c7114b12SAndrew Melnychenko 	switch (info->cmd) {
3554c7114b12SAndrew Melnychenko 	case ETHTOOL_GRXRINGS:
3555c7114b12SAndrew Melnychenko 		info->data = vi->curr_queue_pairs;
3556c7114b12SAndrew Melnychenko 		break;
3557c1170820SAndrew Melnychenko 	case ETHTOOL_GRXFH:
3558c1170820SAndrew Melnychenko 		virtnet_get_hashflow(vi, info);
3559c1170820SAndrew Melnychenko 		break;
3560c1170820SAndrew Melnychenko 	default:
3561c1170820SAndrew Melnychenko 		rc = -EOPNOTSUPP;
3562c1170820SAndrew Melnychenko 	}
3563c1170820SAndrew Melnychenko 
3564c1170820SAndrew Melnychenko 	return rc;
3565c1170820SAndrew Melnychenko }
3566c1170820SAndrew Melnychenko 
3567c1170820SAndrew Melnychenko static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3568c1170820SAndrew Melnychenko {
3569c1170820SAndrew Melnychenko 	struct virtnet_info *vi = netdev_priv(dev);
3570c1170820SAndrew Melnychenko 	int rc = 0;
3571c1170820SAndrew Melnychenko 
3572c1170820SAndrew Melnychenko 	switch (info->cmd) {
3573c1170820SAndrew Melnychenko 	case ETHTOOL_SRXFH:
3574c1170820SAndrew Melnychenko 		if (!virtnet_set_hashflow(vi, info))
3575c1170820SAndrew Melnychenko 			rc = -EINVAL;
3576c1170820SAndrew Melnychenko 
3577c1170820SAndrew Melnychenko 		break;
3578c7114b12SAndrew Melnychenko 	default:
3579c7114b12SAndrew Melnychenko 		rc = -EOPNOTSUPP;
3580c7114b12SAndrew Melnychenko 	}
3581c7114b12SAndrew Melnychenko 
3582c7114b12SAndrew Melnychenko 	return rc;
3583c7114b12SAndrew Melnychenko }
3584c7114b12SAndrew Melnychenko 
35850fc0b732SStephen Hemminger static const struct ethtool_ops virtnet_ethtool_ops = {
3586699b045aSAlvaro Karsz 	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3587699b045aSAlvaro Karsz 		ETHTOOL_COALESCE_USECS,
358866846048SRick Jones 	.get_drvinfo = virtnet_get_drvinfo,
35899f4d26d0SMark McLoughlin 	.get_link = ethtool_op_get_link,
35908f9f4668SRick Jones 	.get_ringparam = virtnet_get_ringparam,
3591a335b33fSXuan Zhuo 	.set_ringparam = virtnet_set_ringparam,
3592d7dfc5cfSToshiaki Makita 	.get_strings = virtnet_get_strings,
3593d7dfc5cfSToshiaki Makita 	.get_sset_count = virtnet_get_sset_count,
3594d7dfc5cfSToshiaki Makita 	.get_ethtool_stats = virtnet_get_ethtool_stats,
3595d73bcd2cSJason Wang 	.set_channels = virtnet_set_channels,
3596d73bcd2cSJason Wang 	.get_channels = virtnet_get_channels,
3597074c3582SJacob Keller 	.get_ts_info = ethtool_op_get_ts_info,
3598ebb6b4b1SPhilippe Reynes 	.get_link_ksettings = virtnet_get_link_ksettings,
3599ebb6b4b1SPhilippe Reynes 	.set_link_ksettings = virtnet_set_link_ksettings,
36000c465be1SJason Wang 	.set_coalesce = virtnet_set_coalesce,
36010c465be1SJason Wang 	.get_coalesce = virtnet_get_coalesce,
3602394bd877SGavin Li 	.set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
3603394bd877SGavin Li 	.get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
3604c7114b12SAndrew Melnychenko 	.get_rxfh_key_size = virtnet_get_rxfh_key_size,
3605c7114b12SAndrew Melnychenko 	.get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3606c7114b12SAndrew Melnychenko 	.get_rxfh = virtnet_get_rxfh,
3607c7114b12SAndrew Melnychenko 	.set_rxfh = virtnet_set_rxfh,
3608c7114b12SAndrew Melnychenko 	.get_rxnfc = virtnet_get_rxnfc,
3609c1170820SAndrew Melnychenko 	.set_rxnfc = virtnet_set_rxnfc,
3610a9ea3fc6SHerbert Xu };
3611a9ea3fc6SHerbert Xu 
36129fe7bfceSJohn Fastabend static void virtnet_freeze_down(struct virtio_device *vdev)
36139fe7bfceSJohn Fastabend {
36149fe7bfceSJohn Fastabend 	struct virtnet_info *vi = vdev->priv;
36159fe7bfceSJohn Fastabend 
36169fe7bfceSJohn Fastabend 	/* Make sure no work handler is accessing the device */
36179fe7bfceSJohn Fastabend 	flush_work(&vi->config_work);
36189fe7bfceSJohn Fastabend 
361905c998b7SAke Koomsin 	netif_tx_lock_bh(vi->dev);
36209fe7bfceSJohn Fastabend 	netif_device_detach(vi->dev);
362105c998b7SAke Koomsin 	netif_tx_unlock_bh(vi->dev);
36228af52fe9SStephan Gerhold 	if (netif_running(vi->dev))
36238af52fe9SStephan Gerhold 		virtnet_close(vi->dev);
36249fe7bfceSJohn Fastabend }
36259fe7bfceSJohn Fastabend 
36269fe7bfceSJohn Fastabend static int init_vqs(struct virtnet_info *vi);
36279fe7bfceSJohn Fastabend 
36289fe7bfceSJohn Fastabend static int virtnet_restore_up(struct virtio_device *vdev)
36299fe7bfceSJohn Fastabend {
36309fe7bfceSJohn Fastabend 	struct virtnet_info *vi = vdev->priv;
36318af52fe9SStephan Gerhold 	int err;
36329fe7bfceSJohn Fastabend 
36339fe7bfceSJohn Fastabend 	err = init_vqs(vi);
36349fe7bfceSJohn Fastabend 	if (err)
36359fe7bfceSJohn Fastabend 		return err;
36369fe7bfceSJohn Fastabend 
36379fe7bfceSJohn Fastabend 	virtio_device_ready(vdev);
36389fe7bfceSJohn Fastabend 
36395a159128SJason Wang 	enable_delayed_refill(vi);
36405a159128SJason Wang 
36419fe7bfceSJohn Fastabend 	if (netif_running(vi->dev)) {
36428af52fe9SStephan Gerhold 		err = virtnet_open(vi->dev);
36438af52fe9SStephan Gerhold 		if (err)
36448af52fe9SStephan Gerhold 			return err;
36459fe7bfceSJohn Fastabend 	}
36469fe7bfceSJohn Fastabend 
364705c998b7SAke Koomsin 	netif_tx_lock_bh(vi->dev);
36489fe7bfceSJohn Fastabend 	netif_device_attach(vi->dev);
364905c998b7SAke Koomsin 	netif_tx_unlock_bh(vi->dev);
36509fe7bfceSJohn Fastabend 	return err;
36519fe7bfceSJohn Fastabend }
36529fe7bfceSJohn Fastabend 
36533f93522fSJason Wang static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
36543f93522fSJason Wang {
36553f93522fSJason Wang 	struct scatterlist sg;
365612e57169SMichael S. Tsirkin 	vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
36573f93522fSJason Wang 
365812e57169SMichael S. Tsirkin 	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
36593f93522fSJason Wang 
36603f93522fSJason Wang 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
36613f93522fSJason Wang 				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
36623f93522fSJason Wang 		dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
36633f93522fSJason Wang 		return -EINVAL;
36643f93522fSJason Wang 	}
36653f93522fSJason Wang 
36663f93522fSJason Wang 	return 0;
36673f93522fSJason Wang }
36683f93522fSJason Wang 
36693f93522fSJason Wang static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
36703f93522fSJason Wang {
36713f93522fSJason Wang 	u64 offloads = 0;
36723f93522fSJason Wang 
36733f93522fSJason Wang 	if (!vi->guest_offloads)
36743f93522fSJason Wang 		return 0;
36753f93522fSJason Wang 
36763f93522fSJason Wang 	return virtnet_set_guest_offloads(vi, offloads);
36773f93522fSJason Wang }
36783f93522fSJason Wang 
36793f93522fSJason Wang static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
36803f93522fSJason Wang {
36813f93522fSJason Wang 	u64 offloads = vi->guest_offloads;
36823f93522fSJason Wang 
36833f93522fSJason Wang 	if (!vi->guest_offloads)
36843f93522fSJason Wang 		return 0;
36853f93522fSJason Wang 
36863f93522fSJason Wang 	return virtnet_set_guest_offloads(vi, offloads);
36873f93522fSJason Wang }
36883f93522fSJason Wang 
36899861ce03SJakub Kicinski static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
36909861ce03SJakub Kicinski 			   struct netlink_ext_ack *extack)
3691f600b690SJohn Fastabend {
3692e814b958SHeng Qi 	unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3693e814b958SHeng Qi 					   sizeof(struct skb_shared_info));
3694e814b958SHeng Qi 	unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3695f600b690SJohn Fastabend 	struct virtnet_info *vi = netdev_priv(dev);
3696f600b690SJohn Fastabend 	struct bpf_prog *old_prog;
3697017b29c3SJason Wang 	u16 xdp_qp = 0, curr_qp;
3698672aafd5SJohn Fastabend 	int i, err;
3699f600b690SJohn Fastabend 
37003f93522fSJason Wang 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
37013f93522fSJason Wang 	    && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
370292502fe8SJason Wang 	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
370392502fe8SJason Wang 	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
370418ba58e1SJason Wang 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3705418044e1SAndrew Melnychenko 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3706418044e1SAndrew Melnychenko 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3707418044e1SAndrew Melnychenko 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3708dbcf24d1SJason Wang 		NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
3709f600b690SJohn Fastabend 		return -EOPNOTSUPP;
3710f600b690SJohn Fastabend 	}
3711f600b690SJohn Fastabend 
3712f600b690SJohn Fastabend 	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
37134d463c4dSDaniel Borkmann 		NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3714f600b690SJohn Fastabend 		return -EINVAL;
3715f600b690SJohn Fastabend 	}
3716f600b690SJohn Fastabend 
37178d9bc36dSHeng Qi 	if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
37188d9bc36dSHeng Qi 		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
37198d9bc36dSHeng Qi 		netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3720f600b690SJohn Fastabend 		return -EINVAL;
3721f600b690SJohn Fastabend 	}
3722f600b690SJohn Fastabend 
3723672aafd5SJohn Fastabend 	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3724672aafd5SJohn Fastabend 	if (prog)
3725672aafd5SJohn Fastabend 		xdp_qp = nr_cpu_ids;
3726672aafd5SJohn Fastabend 
3727672aafd5SJohn Fastabend 	/* XDP requires extra queues for XDP_TX */
3728672aafd5SJohn Fastabend 	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
37299ce4e3d6SXuan Zhuo 		netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3730672aafd5SJohn Fastabend 				 curr_qp + xdp_qp, vi->max_queue_pairs);
373197c2c69eSXuan Zhuo 		xdp_qp = 0;
3732672aafd5SJohn Fastabend 	}
3733672aafd5SJohn Fastabend 
373403aa6d34SToshiaki Makita 	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
373503aa6d34SToshiaki Makita 	if (!prog && !old_prog)
373603aa6d34SToshiaki Makita 		return 0;
373703aa6d34SToshiaki Makita 
373885192dbfSAndrii Nakryiko 	if (prog)
373985192dbfSAndrii Nakryiko 		bpf_prog_add(prog, vi->max_queue_pairs - 1);
37402de2f7f4SJohn Fastabend 
37414941d472SJason Wang 	/* Make sure NAPI is not using any XDP TX queues for RX. */
3742534da5e8SToshiaki Makita 	if (netif_running(dev)) {
3743534da5e8SToshiaki Makita 		for (i = 0; i < vi->max_queue_pairs; i++) {
37444941d472SJason Wang 			napi_disable(&vi->rq[i].napi);
3745534da5e8SToshiaki Makita 			virtnet_napi_tx_disable(&vi->sq[i].napi);
3746534da5e8SToshiaki Makita 		}
3747534da5e8SToshiaki Makita 	}
37482de2f7f4SJohn Fastabend 
374903aa6d34SToshiaki Makita 	if (!prog) {
375003aa6d34SToshiaki Makita 		for (i = 0; i < vi->max_queue_pairs; i++) {
375103aa6d34SToshiaki Makita 			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
375203aa6d34SToshiaki Makita 			if (i == 0)
375303aa6d34SToshiaki Makita 				virtnet_restore_guest_offloads(vi);
375403aa6d34SToshiaki Makita 		}
375503aa6d34SToshiaki Makita 		synchronize_net();
375603aa6d34SToshiaki Makita 	}
375703aa6d34SToshiaki Makita 
37584941d472SJason Wang 	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
37594941d472SJason Wang 	if (err)
37604941d472SJason Wang 		goto err;
3761188313c1SToshiaki Makita 	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
37624941d472SJason Wang 	vi->xdp_queue_pairs = xdp_qp;
3763f600b690SJohn Fastabend 
376403aa6d34SToshiaki Makita 	if (prog) {
376597c2c69eSXuan Zhuo 		vi->xdp_enabled = true;
3766f600b690SJohn Fastabend 		for (i = 0; i < vi->max_queue_pairs; i++) {
3767f600b690SJohn Fastabend 			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
376803aa6d34SToshiaki Makita 			if (i == 0 && !old_prog)
37693f93522fSJason Wang 				virtnet_clear_guest_offloads(vi);
37703f93522fSJason Wang 		}
377166c0e13aSMarek Majtyka 		if (!old_prog)
377230bbf891SLorenzo Bianconi 			xdp_features_set_redirect_target(dev, true);
377397c2c69eSXuan Zhuo 	} else {
377466c0e13aSMarek Majtyka 		xdp_features_clear_redirect_target(dev);
377597c2c69eSXuan Zhuo 		vi->xdp_enabled = false;
377603aa6d34SToshiaki Makita 	}
377703aa6d34SToshiaki Makita 
377803aa6d34SToshiaki Makita 	for (i = 0; i < vi->max_queue_pairs; i++) {
3779f600b690SJohn Fastabend 		if (old_prog)
3780f600b690SJohn Fastabend 			bpf_prog_put(old_prog);
3781534da5e8SToshiaki Makita 		if (netif_running(dev)) {
37824941d472SJason Wang 			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3783534da5e8SToshiaki Makita 			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3784534da5e8SToshiaki Makita 					       &vi->sq[i].napi);
3785534da5e8SToshiaki Makita 		}
3786f600b690SJohn Fastabend 	}
3787f600b690SJohn Fastabend 
3788f600b690SJohn Fastabend 	return 0;
37892de2f7f4SJohn Fastabend 
37904941d472SJason Wang err:
379103aa6d34SToshiaki Makita 	if (!prog) {
379203aa6d34SToshiaki Makita 		virtnet_clear_guest_offloads(vi);
37934941d472SJason Wang 		for (i = 0; i < vi->max_queue_pairs; i++)
379403aa6d34SToshiaki Makita 			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
379503aa6d34SToshiaki Makita 	}
379603aa6d34SToshiaki Makita 
37978be4d9a4SToshiaki Makita 	if (netif_running(dev)) {
3798534da5e8SToshiaki Makita 		for (i = 0; i < vi->max_queue_pairs; i++) {
37994941d472SJason Wang 			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3800534da5e8SToshiaki Makita 			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3801534da5e8SToshiaki Makita 					       &vi->sq[i].napi);
3802534da5e8SToshiaki Makita 		}
38038be4d9a4SToshiaki Makita 	}
38042de2f7f4SJohn Fastabend 	if (prog)
38052de2f7f4SJohn Fastabend 		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
38062de2f7f4SJohn Fastabend 	return err;
3807f600b690SJohn Fastabend }
3808f600b690SJohn Fastabend 
3809f4e63525SJakub Kicinski static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3810f600b690SJohn Fastabend {
3811f600b690SJohn Fastabend 	switch (xdp->command) {
3812f600b690SJohn Fastabend 	case XDP_SETUP_PROG:
38139861ce03SJakub Kicinski 		return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
3814f600b690SJohn Fastabend 	default:
3815f600b690SJohn Fastabend 		return -EINVAL;
3816f600b690SJohn Fastabend 	}
3817f600b690SJohn Fastabend }
3818f600b690SJohn Fastabend 
3819ba5e4426SSridhar Samudrala static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
3820ba5e4426SSridhar Samudrala 				      size_t len)
3821ba5e4426SSridhar Samudrala {
3822ba5e4426SSridhar Samudrala 	struct virtnet_info *vi = netdev_priv(dev);
3823ba5e4426SSridhar Samudrala 	int ret;
3824ba5e4426SSridhar Samudrala 
3825ba5e4426SSridhar Samudrala 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3826ba5e4426SSridhar Samudrala 		return -EOPNOTSUPP;
3827ba5e4426SSridhar Samudrala 
3828ba5e4426SSridhar Samudrala 	ret = snprintf(buf, len, "sby");
3829ba5e4426SSridhar Samudrala 	if (ret >= len)
3830ba5e4426SSridhar Samudrala 		return -EOPNOTSUPP;
3831ba5e4426SSridhar Samudrala 
3832ba5e4426SSridhar Samudrala 	return 0;
3833ba5e4426SSridhar Samudrala }
3834ba5e4426SSridhar Samudrala 
3835a02e8964SWillem de Bruijn static int virtnet_set_features(struct net_device *dev,
3836a02e8964SWillem de Bruijn 				netdev_features_t features)
3837a02e8964SWillem de Bruijn {
3838a02e8964SWillem de Bruijn 	struct virtnet_info *vi = netdev_priv(dev);
3839cf8691cbSMichael S. Tsirkin 	u64 offloads;
3840a02e8964SWillem de Bruijn 	int err;
3841a02e8964SWillem de Bruijn 
3842dbcf24d1SJason Wang 	if ((dev->features ^ features) & NETIF_F_GRO_HW) {
384397c2c69eSXuan Zhuo 		if (vi->xdp_enabled)
3844a02e8964SWillem de Bruijn 			return -EBUSY;
3845a02e8964SWillem de Bruijn 
3846dbcf24d1SJason Wang 		if (features & NETIF_F_GRO_HW)
3847cf8691cbSMichael S. Tsirkin 			offloads = vi->guest_offloads_capable;
3848a02e8964SWillem de Bruijn 		else
3849cf8691cbSMichael S. Tsirkin 			offloads = vi->guest_offloads_capable &
3850dbcf24d1SJason Wang 				   ~GUEST_OFFLOAD_GRO_HW_MASK;
3851a02e8964SWillem de Bruijn 
3852a02e8964SWillem de Bruijn 		err = virtnet_set_guest_offloads(vi, offloads);
3853a02e8964SWillem de Bruijn 		if (err)
3854a02e8964SWillem de Bruijn 			return err;
38553618ad2aSTonghao Zhang 		vi->guest_offloads = offloads;
3856cf8691cbSMichael S. Tsirkin 	}
3857cf8691cbSMichael S. Tsirkin 
3858c7114b12SAndrew Melnychenko 	if ((dev->features ^ features) & NETIF_F_RXHASH) {
3859c7114b12SAndrew Melnychenko 		if (features & NETIF_F_RXHASH)
3860c1170820SAndrew Melnychenko 			vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3861c7114b12SAndrew Melnychenko 		else
3862c7114b12SAndrew Melnychenko 			vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
3863c7114b12SAndrew Melnychenko 
3864c7114b12SAndrew Melnychenko 		if (!virtnet_commit_rss_command(vi))
3865c7114b12SAndrew Melnychenko 			return -EINVAL;
3866c7114b12SAndrew Melnychenko 	}
3867c7114b12SAndrew Melnychenko 
3868a02e8964SWillem de Bruijn 	return 0;
3869a02e8964SWillem de Bruijn }
3870a02e8964SWillem de Bruijn 
3871a520794bSTony Lu static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
3872a520794bSTony Lu {
3873a520794bSTony Lu 	struct virtnet_info *priv = netdev_priv(dev);
3874a520794bSTony Lu 	struct send_queue *sq = &priv->sq[txqueue];
3875a520794bSTony Lu 	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
3876a520794bSTony Lu 
3877a520794bSTony Lu 	u64_stats_update_begin(&sq->stats.syncp);
3878a520794bSTony Lu 	sq->stats.tx_timeouts++;
3879a520794bSTony Lu 	u64_stats_update_end(&sq->stats.syncp);
3880a520794bSTony Lu 
3881a520794bSTony Lu 	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
3882a520794bSTony Lu 		   txqueue, sq->name, sq->vq->index, sq->vq->name,
38835337824fSEric Dumazet 		   jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
3884a520794bSTony Lu }
3885a520794bSTony Lu 
388676288b4eSStephen Hemminger static const struct net_device_ops virtnet_netdev = {
388776288b4eSStephen Hemminger 	.ndo_open            = virtnet_open,
388876288b4eSStephen Hemminger 	.ndo_stop   	     = virtnet_close,
388976288b4eSStephen Hemminger 	.ndo_start_xmit      = start_xmit,
389076288b4eSStephen Hemminger 	.ndo_validate_addr   = eth_validate_addr,
38919c46f6d4SAlex Williamson 	.ndo_set_mac_address = virtnet_set_mac_address,
38922af7698eSAlex Williamson 	.ndo_set_rx_mode     = virtnet_set_rx_mode,
38933fa2a1dfSstephen hemminger 	.ndo_get_stats64     = virtnet_stats,
38941824a989SAlex Williamson 	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
38951824a989SAlex Williamson 	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
3896f4e63525SJakub Kicinski 	.ndo_bpf		= virtnet_xdp,
3897186b3c99SJason Wang 	.ndo_xdp_xmit		= virtnet_xdp_xmit,
38982836b4f2SVlad Yasevich 	.ndo_features_check	= passthru_features_check,
3899ba5e4426SSridhar Samudrala 	.ndo_get_phys_port_name	= virtnet_get_phys_port_name,
3900a02e8964SWillem de Bruijn 	.ndo_set_features	= virtnet_set_features,
3901a520794bSTony Lu 	.ndo_tx_timeout		= virtnet_tx_timeout,
390276288b4eSStephen Hemminger };
390376288b4eSStephen Hemminger 
3904586d17c5SJason Wang static void virtnet_config_changed_work(struct work_struct *work)
39059f4d26d0SMark McLoughlin {
3906586d17c5SJason Wang 	struct virtnet_info *vi =
3907586d17c5SJason Wang 		container_of(work, struct virtnet_info, config_work);
39089f4d26d0SMark McLoughlin 	u16 v;
39099f4d26d0SMark McLoughlin 
3910855e0c52SRusty Russell 	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
3911855e0c52SRusty Russell 				 struct virtio_net_config, status, &v) < 0)
3912507613bfSMichael S. Tsirkin 		return;
3913586d17c5SJason Wang 
3914586d17c5SJason Wang 	if (v & VIRTIO_NET_S_ANNOUNCE) {
3915ee89bab1SAmerigo Wang 		netdev_notify_peers(vi->dev);
3916586d17c5SJason Wang 		virtnet_ack_link_announce(vi);
3917586d17c5SJason Wang 	}
39189f4d26d0SMark McLoughlin 
39199f4d26d0SMark McLoughlin 	/* Ignore unknown (future) status bits */
39209f4d26d0SMark McLoughlin 	v &= VIRTIO_NET_S_LINK_UP;
39219f4d26d0SMark McLoughlin 
39229f4d26d0SMark McLoughlin 	if (vi->status == v)
3923507613bfSMichael S. Tsirkin 		return;
39249f4d26d0SMark McLoughlin 
39259f4d26d0SMark McLoughlin 	vi->status = v;
39269f4d26d0SMark McLoughlin 
39279f4d26d0SMark McLoughlin 	if (vi->status & VIRTIO_NET_S_LINK_UP) {
3928faa9b39fSJason Baron 		virtnet_update_settings(vi);
39299f4d26d0SMark McLoughlin 		netif_carrier_on(vi->dev);
3930986a4f4dSJason Wang 		netif_tx_wake_all_queues(vi->dev);
39319f4d26d0SMark McLoughlin 	} else {
39329f4d26d0SMark McLoughlin 		netif_carrier_off(vi->dev);
3933986a4f4dSJason Wang 		netif_tx_stop_all_queues(vi->dev);
39349f4d26d0SMark McLoughlin 	}
39359f4d26d0SMark McLoughlin }
39369f4d26d0SMark McLoughlin 
39379f4d26d0SMark McLoughlin static void virtnet_config_changed(struct virtio_device *vdev)
39389f4d26d0SMark McLoughlin {
39399f4d26d0SMark McLoughlin 	struct virtnet_info *vi = vdev->priv;
39409f4d26d0SMark McLoughlin 
39413b07e9caSTejun Heo 	schedule_work(&vi->config_work);
39429f4d26d0SMark McLoughlin }
39439f4d26d0SMark McLoughlin 
3944986a4f4dSJason Wang static void virtnet_free_queues(struct virtnet_info *vi)
3945986a4f4dSJason Wang {
3946d4fb84eeSAndrey Vagin 	int i;
3947d4fb84eeSAndrey Vagin 
3948ab3971b1SJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
39495198d545SJakub Kicinski 		__netif_napi_del(&vi->rq[i].napi);
39505198d545SJakub Kicinski 		__netif_napi_del(&vi->sq[i].napi);
3951ab3971b1SJason Wang 	}
3952d4fb84eeSAndrey Vagin 
39535198d545SJakub Kicinski 	/* We called __netif_napi_del(),
3954963abe5cSEric Dumazet 	 * we need to respect an RCU grace period before freeing vi->rq
3955963abe5cSEric Dumazet 	 */
3956963abe5cSEric Dumazet 	synchronize_net();
3957963abe5cSEric Dumazet 
3958986a4f4dSJason Wang 	kfree(vi->rq);
3959986a4f4dSJason Wang 	kfree(vi->sq);
396012e57169SMichael S. Tsirkin 	kfree(vi->ctrl);
3961986a4f4dSJason Wang }
3962986a4f4dSJason Wang 
396347315329SJohn Fastabend static void _free_receive_bufs(struct virtnet_info *vi)
3964986a4f4dSJason Wang {
3965f600b690SJohn Fastabend 	struct bpf_prog *old_prog;
3966986a4f4dSJason Wang 	int i;
3967986a4f4dSJason Wang 
3968986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
3969986a4f4dSJason Wang 		while (vi->rq[i].pages)
3970986a4f4dSJason Wang 			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
3971f600b690SJohn Fastabend 
3972f600b690SJohn Fastabend 		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
3973f600b690SJohn Fastabend 		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
3974f600b690SJohn Fastabend 		if (old_prog)
3975f600b690SJohn Fastabend 			bpf_prog_put(old_prog);
3976986a4f4dSJason Wang 	}
397747315329SJohn Fastabend }
397847315329SJohn Fastabend 
397947315329SJohn Fastabend static void free_receive_bufs(struct virtnet_info *vi)
398047315329SJohn Fastabend {
398147315329SJohn Fastabend 	rtnl_lock();
398247315329SJohn Fastabend 	_free_receive_bufs(vi);
3983f600b690SJohn Fastabend 	rtnl_unlock();
3984986a4f4dSJason Wang }
3985986a4f4dSJason Wang 
3986fb51879dSMichael Dalton static void free_receive_page_frags(struct virtnet_info *vi)
3987fb51879dSMichael Dalton {
3988fb51879dSMichael Dalton 	int i;
3989fb51879dSMichael Dalton 	for (i = 0; i < vi->max_queue_pairs; i++)
3990295525e2SXuan Zhuo 		if (vi->rq[i].alloc_frag.page) {
3991295525e2SXuan Zhuo 			if (vi->rq[i].do_dma && vi->rq[i].last_dma)
3992295525e2SXuan Zhuo 				virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
3993fb51879dSMichael Dalton 			put_page(vi->rq[i].alloc_frag.page);
3994fb51879dSMichael Dalton 		}
3995295525e2SXuan Zhuo }
3996fb51879dSMichael Dalton 
39976e345f8cSXuan Zhuo static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
39986e345f8cSXuan Zhuo {
39996e345f8cSXuan Zhuo 	if (!is_xdp_frame(buf))
40006e345f8cSXuan Zhuo 		dev_kfree_skb(buf);
40016e345f8cSXuan Zhuo 	else
40026e345f8cSXuan Zhuo 		xdp_return_frame(ptr_to_xdp(buf));
40036e345f8cSXuan Zhuo }
40046e345f8cSXuan Zhuo 
40056e345f8cSXuan Zhuo static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
40066e345f8cSXuan Zhuo {
40076e345f8cSXuan Zhuo 	struct virtnet_info *vi = vq->vdev->priv;
40086e345f8cSXuan Zhuo 	int i = vq2rxq(vq);
40096e345f8cSXuan Zhuo 
40106e345f8cSXuan Zhuo 	if (vi->mergeable_rx_bufs)
40116e345f8cSXuan Zhuo 		put_page(virt_to_head_page(buf));
40126e345f8cSXuan Zhuo 	else if (vi->big_packets)
40136e345f8cSXuan Zhuo 		give_pages(&vi->rq[i], buf);
40146e345f8cSXuan Zhuo 	else
40156e345f8cSXuan Zhuo 		put_page(virt_to_head_page(buf));
40166e345f8cSXuan Zhuo }
40176e345f8cSXuan Zhuo 
4018986a4f4dSJason Wang static void free_unused_bufs(struct virtnet_info *vi)
4019986a4f4dSJason Wang {
4020986a4f4dSJason Wang 	void *buf;
4021986a4f4dSJason Wang 	int i;
4022986a4f4dSJason Wang 
4023986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
4024986a4f4dSJason Wang 		struct virtqueue *vq = vi->sq[i].vq;
40256e345f8cSXuan Zhuo 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
40266e345f8cSXuan Zhuo 			virtnet_sq_free_unused_buf(vq, buf);
4027f8bb5104SWenliang Wang 		cond_resched();
4028986a4f4dSJason Wang 	}
4029986a4f4dSJason Wang 
4030986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
4031295525e2SXuan Zhuo 		struct receive_queue *rq = &vi->rq[i];
4032295525e2SXuan Zhuo 
4033295525e2SXuan Zhuo 		while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
4034295525e2SXuan Zhuo 			virtnet_rq_free_unused_buf(rq->vq, buf);
4035f8bb5104SWenliang Wang 		cond_resched();
4036986a4f4dSJason Wang 	}
4037ab7db917SMichael Dalton }
4038986a4f4dSJason Wang 
4039e9d7417bSJason Wang static void virtnet_del_vqs(struct virtnet_info *vi)
4040e9d7417bSJason Wang {
4041e9d7417bSJason Wang 	struct virtio_device *vdev = vi->vdev;
4042e9d7417bSJason Wang 
4043310974faSPeter Xu 	virtnet_clean_affinity(vi);
4044986a4f4dSJason Wang 
4045e9d7417bSJason Wang 	vdev->config->del_vqs(vdev);
4046986a4f4dSJason Wang 
4047986a4f4dSJason Wang 	virtnet_free_queues(vi);
4048986a4f4dSJason Wang }
4049986a4f4dSJason Wang 
4050d85b758fSMichael S. Tsirkin /* How large should a single buffer be so a queue full of these can fit at
4051d85b758fSMichael S. Tsirkin  * least one full packet?
4052d85b758fSMichael S. Tsirkin  * Logic below assumes the mergeable buffer header is used.
4053d85b758fSMichael S. Tsirkin  */
4054d85b758fSMichael S. Tsirkin static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
4055d85b758fSMichael S. Tsirkin {
4056c1ddc42dSAndrew Melnychenko 	const unsigned int hdr_len = vi->hdr_len;
4057d85b758fSMichael S. Tsirkin 	unsigned int rq_size = virtqueue_get_vring_size(vq);
4058d85b758fSMichael S. Tsirkin 	unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
4059d85b758fSMichael S. Tsirkin 	unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
4060d85b758fSMichael S. Tsirkin 	unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
4061d85b758fSMichael S. Tsirkin 
4062f0c3192cSMichael S. Tsirkin 	return max(max(min_buf_len, hdr_len) - hdr_len,
4063f0c3192cSMichael S. Tsirkin 		   (unsigned int)GOOD_PACKET_LEN);
4064d85b758fSMichael S. Tsirkin }
4065d85b758fSMichael S. Tsirkin 
4066986a4f4dSJason Wang static int virtnet_find_vqs(struct virtnet_info *vi)
4067986a4f4dSJason Wang {
4068986a4f4dSJason Wang 	vq_callback_t **callbacks;
4069986a4f4dSJason Wang 	struct virtqueue **vqs;
4070986a4f4dSJason Wang 	int ret = -ENOMEM;
4071986a4f4dSJason Wang 	int i, total_vqs;
4072986a4f4dSJason Wang 	const char **names;
4073d45b897bSMichael S. Tsirkin 	bool *ctx;
4074986a4f4dSJason Wang 
4075986a4f4dSJason Wang 	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
4076986a4f4dSJason Wang 	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
4077986a4f4dSJason Wang 	 * possible control vq.
4078986a4f4dSJason Wang 	 */
4079986a4f4dSJason Wang 	total_vqs = vi->max_queue_pairs * 2 +
4080986a4f4dSJason Wang 		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
4081986a4f4dSJason Wang 
4082986a4f4dSJason Wang 	/* Allocate space for find_vqs parameters */
40836396bb22SKees Cook 	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
4084986a4f4dSJason Wang 	if (!vqs)
4085986a4f4dSJason Wang 		goto err_vq;
40866da2ec56SKees Cook 	callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
4087986a4f4dSJason Wang 	if (!callbacks)
4088986a4f4dSJason Wang 		goto err_callback;
40896da2ec56SKees Cook 	names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
4090986a4f4dSJason Wang 	if (!names)
4091986a4f4dSJason Wang 		goto err_names;
4092192f68cfSJason Wang 	if (!vi->big_packets || vi->mergeable_rx_bufs) {
40936396bb22SKees Cook 		ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
4094d45b897bSMichael S. Tsirkin 		if (!ctx)
4095d45b897bSMichael S. Tsirkin 			goto err_ctx;
4096d45b897bSMichael S. Tsirkin 	} else {
4097d45b897bSMichael S. Tsirkin 		ctx = NULL;
4098d45b897bSMichael S. Tsirkin 	}
4099986a4f4dSJason Wang 
4100986a4f4dSJason Wang 	/* Parameters for control virtqueue, if any */
4101986a4f4dSJason Wang 	if (vi->has_cvq) {
4102986a4f4dSJason Wang 		callbacks[total_vqs - 1] = NULL;
4103986a4f4dSJason Wang 		names[total_vqs - 1] = "control";
4104986a4f4dSJason Wang 	}
4105986a4f4dSJason Wang 
4106986a4f4dSJason Wang 	/* Allocate/initialize parameters for send/receive virtqueues */
4107986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
4108986a4f4dSJason Wang 		callbacks[rxq2vq(i)] = skb_recv_done;
4109986a4f4dSJason Wang 		callbacks[txq2vq(i)] = skb_xmit_done;
4110986a4f4dSJason Wang 		sprintf(vi->rq[i].name, "input.%d", i);
4111986a4f4dSJason Wang 		sprintf(vi->sq[i].name, "output.%d", i);
4112986a4f4dSJason Wang 		names[rxq2vq(i)] = vi->rq[i].name;
4113986a4f4dSJason Wang 		names[txq2vq(i)] = vi->sq[i].name;
4114d45b897bSMichael S. Tsirkin 		if (ctx)
4115d45b897bSMichael S. Tsirkin 			ctx[rxq2vq(i)] = true;
4116986a4f4dSJason Wang 	}
4117986a4f4dSJason Wang 
41182e9ca760SMichael S. Tsirkin 	ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
41192e9ca760SMichael S. Tsirkin 				  names, ctx, NULL);
4120986a4f4dSJason Wang 	if (ret)
4121986a4f4dSJason Wang 		goto err_find;
4122986a4f4dSJason Wang 
4123986a4f4dSJason Wang 	if (vi->has_cvq) {
4124986a4f4dSJason Wang 		vi->cvq = vqs[total_vqs - 1];
4125986a4f4dSJason Wang 		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
4126f646968fSPatrick McHardy 			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4127986a4f4dSJason Wang 	}
4128986a4f4dSJason Wang 
4129986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
4130986a4f4dSJason Wang 		vi->rq[i].vq = vqs[rxq2vq(i)];
4131d85b758fSMichael S. Tsirkin 		vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
4132986a4f4dSJason Wang 		vi->sq[i].vq = vqs[txq2vq(i)];
4133986a4f4dSJason Wang 	}
4134986a4f4dSJason Wang 
41352fa3c8a8STonghao Zhang 	/* run here: ret == 0. */
4136986a4f4dSJason Wang 
4137986a4f4dSJason Wang 
4138986a4f4dSJason Wang err_find:
4139d45b897bSMichael S. Tsirkin 	kfree(ctx);
4140d45b897bSMichael S. Tsirkin err_ctx:
4141986a4f4dSJason Wang 	kfree(names);
4142986a4f4dSJason Wang err_names:
4143986a4f4dSJason Wang 	kfree(callbacks);
4144986a4f4dSJason Wang err_callback:
4145986a4f4dSJason Wang 	kfree(vqs);
4146986a4f4dSJason Wang err_vq:
4147986a4f4dSJason Wang 	return ret;
4148986a4f4dSJason Wang }
4149986a4f4dSJason Wang 
4150986a4f4dSJason Wang static int virtnet_alloc_queues(struct virtnet_info *vi)
4151986a4f4dSJason Wang {
4152986a4f4dSJason Wang 	int i;
4153986a4f4dSJason Wang 
4154122b84a1SMax Gurtovoy 	if (vi->has_cvq) {
415512e57169SMichael S. Tsirkin 		vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
415612e57169SMichael S. Tsirkin 		if (!vi->ctrl)
415712e57169SMichael S. Tsirkin 			goto err_ctrl;
4158122b84a1SMax Gurtovoy 	} else {
4159122b84a1SMax Gurtovoy 		vi->ctrl = NULL;
4160122b84a1SMax Gurtovoy 	}
41616396bb22SKees Cook 	vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
4162986a4f4dSJason Wang 	if (!vi->sq)
4163986a4f4dSJason Wang 		goto err_sq;
41646396bb22SKees Cook 	vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
4165008d4278SAmerigo Wang 	if (!vi->rq)
4166986a4f4dSJason Wang 		goto err_rq;
4167986a4f4dSJason Wang 
4168986a4f4dSJason Wang 	INIT_DELAYED_WORK(&vi->refill, refill_work);
4169986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
4170986a4f4dSJason Wang 		vi->rq[i].pages = NULL;
4171d484735dSJakub Kicinski 		netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
4172986a4f4dSJason Wang 				      napi_weight);
41738d602e1aSJakub Kicinski 		netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
41748d602e1aSJakub Kicinski 					 virtnet_poll_tx,
4175b92f1e67SWillem de Bruijn 					 napi_tx ? napi_weight : 0);
4176986a4f4dSJason Wang 
4177986a4f4dSJason Wang 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
41785377d758SJohannes Berg 		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
4179986a4f4dSJason Wang 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
4180d7dfc5cfSToshiaki Makita 
4181d7dfc5cfSToshiaki Makita 		u64_stats_init(&vi->rq[i].stats.syncp);
4182d7dfc5cfSToshiaki Makita 		u64_stats_init(&vi->sq[i].stats.syncp);
4183986a4f4dSJason Wang 	}
4184986a4f4dSJason Wang 
4185986a4f4dSJason Wang 	return 0;
4186986a4f4dSJason Wang 
4187986a4f4dSJason Wang err_rq:
4188986a4f4dSJason Wang 	kfree(vi->sq);
4189986a4f4dSJason Wang err_sq:
419012e57169SMichael S. Tsirkin 	kfree(vi->ctrl);
419112e57169SMichael S. Tsirkin err_ctrl:
4192986a4f4dSJason Wang 	return -ENOMEM;
4193e9d7417bSJason Wang }
4194e9d7417bSJason Wang 
41953f9c10b0SAmit Shah static int init_vqs(struct virtnet_info *vi)
41963f9c10b0SAmit Shah {
4197986a4f4dSJason Wang 	int ret;
41983f9c10b0SAmit Shah 
4199986a4f4dSJason Wang 	/* Allocate send & receive queues */
4200986a4f4dSJason Wang 	ret = virtnet_alloc_queues(vi);
4201986a4f4dSJason Wang 	if (ret)
4202986a4f4dSJason Wang 		goto err;
42033f9c10b0SAmit Shah 
4204986a4f4dSJason Wang 	ret = virtnet_find_vqs(vi);
4205986a4f4dSJason Wang 	if (ret)
4206986a4f4dSJason Wang 		goto err_free;
42073f9c10b0SAmit Shah 
4208295525e2SXuan Zhuo 	virtnet_rq_set_premapped(vi);
4209295525e2SXuan Zhuo 
4210a0d1d0f4SSebastian Andrzej Siewior 	cpus_read_lock();
42118898c21cSWanlong Gao 	virtnet_set_affinity(vi);
4212a0d1d0f4SSebastian Andrzej Siewior 	cpus_read_unlock();
421347be2479SWanlong Gao 
42143f9c10b0SAmit Shah 	return 0;
4215986a4f4dSJason Wang 
4216986a4f4dSJason Wang err_free:
4217986a4f4dSJason Wang 	virtnet_free_queues(vi);
4218986a4f4dSJason Wang err:
4219986a4f4dSJason Wang 	return ret;
42203f9c10b0SAmit Shah }
42213f9c10b0SAmit Shah 
4222fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS
4223fbf28d78SMichael Dalton static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
4224718ad681Sstephen hemminger 		char *buf)
4225fbf28d78SMichael Dalton {
4226fbf28d78SMichael Dalton 	struct virtnet_info *vi = netdev_priv(queue->dev);
4227fbf28d78SMichael Dalton 	unsigned int queue_index = get_netdev_rx_queue_index(queue);
42283cc81a9aSJason Wang 	unsigned int headroom = virtnet_get_headroom(vi);
42293cc81a9aSJason Wang 	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
42305377d758SJohannes Berg 	struct ewma_pkt_len *avg;
4231fbf28d78SMichael Dalton 
4232fbf28d78SMichael Dalton 	BUG_ON(queue_index >= vi->max_queue_pairs);
4233fbf28d78SMichael Dalton 	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
4234d85b758fSMichael S. Tsirkin 	return sprintf(buf, "%u\n",
42353cc81a9aSJason Wang 		       get_mergeable_buf_len(&vi->rq[queue_index], avg,
42363cc81a9aSJason Wang 				       SKB_DATA_ALIGN(headroom + tailroom)));
4237fbf28d78SMichael Dalton }
4238fbf28d78SMichael Dalton 
4239fbf28d78SMichael Dalton static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
4240fbf28d78SMichael Dalton 	__ATTR_RO(mergeable_rx_buffer_size);
4241fbf28d78SMichael Dalton 
4242fbf28d78SMichael Dalton static struct attribute *virtio_net_mrg_rx_attrs[] = {
4243fbf28d78SMichael Dalton 	&mergeable_rx_buffer_size_attribute.attr,
4244fbf28d78SMichael Dalton 	NULL
4245fbf28d78SMichael Dalton };
4246fbf28d78SMichael Dalton 
4247fbf28d78SMichael Dalton static const struct attribute_group virtio_net_mrg_rx_group = {
4248fbf28d78SMichael Dalton 	.name = "virtio_net",
4249fbf28d78SMichael Dalton 	.attrs = virtio_net_mrg_rx_attrs
4250fbf28d78SMichael Dalton };
4251fbf28d78SMichael Dalton #endif
4252fbf28d78SMichael Dalton 
4253892d6eb1SJason Wang static bool virtnet_fail_on_feature(struct virtio_device *vdev,
4254892d6eb1SJason Wang 				    unsigned int fbit,
4255892d6eb1SJason Wang 				    const char *fname, const char *dname)
4256892d6eb1SJason Wang {
4257892d6eb1SJason Wang 	if (!virtio_has_feature(vdev, fbit))
4258892d6eb1SJason Wang 		return false;
4259892d6eb1SJason Wang 
4260892d6eb1SJason Wang 	dev_err(&vdev->dev, "device advertises feature %s but not %s",
4261892d6eb1SJason Wang 		fname, dname);
4262892d6eb1SJason Wang 
4263892d6eb1SJason Wang 	return true;
4264892d6eb1SJason Wang }
4265892d6eb1SJason Wang 
4266892d6eb1SJason Wang #define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
4267892d6eb1SJason Wang 	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
4268892d6eb1SJason Wang 
4269892d6eb1SJason Wang static bool virtnet_validate_features(struct virtio_device *vdev)
4270892d6eb1SJason Wang {
4271892d6eb1SJason Wang 	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
4272892d6eb1SJason Wang 	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
4273892d6eb1SJason Wang 			     "VIRTIO_NET_F_CTRL_VQ") ||
4274892d6eb1SJason Wang 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
4275892d6eb1SJason Wang 			     "VIRTIO_NET_F_CTRL_VQ") ||
4276892d6eb1SJason Wang 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
4277892d6eb1SJason Wang 			     "VIRTIO_NET_F_CTRL_VQ") ||
4278892d6eb1SJason Wang 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
4279892d6eb1SJason Wang 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
4280c7114b12SAndrew Melnychenko 			     "VIRTIO_NET_F_CTRL_VQ") ||
4281c7114b12SAndrew Melnychenko 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
428291f41f01SAndrew Melnychenko 			     "VIRTIO_NET_F_CTRL_VQ") ||
428391f41f01SAndrew Melnychenko 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
4284699b045aSAlvaro Karsz 			     "VIRTIO_NET_F_CTRL_VQ") ||
4285699b045aSAlvaro Karsz 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
42868af3bf66SGavin Li 			     "VIRTIO_NET_F_CTRL_VQ") ||
42878af3bf66SGavin Li 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
4288892d6eb1SJason Wang 			     "VIRTIO_NET_F_CTRL_VQ"))) {
4289892d6eb1SJason Wang 		return false;
4290892d6eb1SJason Wang 	}
4291892d6eb1SJason Wang 
4292892d6eb1SJason Wang 	return true;
4293892d6eb1SJason Wang }
4294892d6eb1SJason Wang 
4295d0c2c997SJarod Wilson #define MIN_MTU ETH_MIN_MTU
4296d0c2c997SJarod Wilson #define MAX_MTU ETH_MAX_MTU
4297d0c2c997SJarod Wilson 
4298fe36cbe0SMichael S. Tsirkin static int virtnet_validate(struct virtio_device *vdev)
4299296f96fcSRusty Russell {
43006ba42248SMichael S. Tsirkin 	if (!vdev->config->get) {
43016ba42248SMichael S. Tsirkin 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
43026ba42248SMichael S. Tsirkin 			__func__);
43036ba42248SMichael S. Tsirkin 		return -EINVAL;
43046ba42248SMichael S. Tsirkin 	}
43056ba42248SMichael S. Tsirkin 
4306892d6eb1SJason Wang 	if (!virtnet_validate_features(vdev))
4307892d6eb1SJason Wang 		return -EINVAL;
4308892d6eb1SJason Wang 
4309fe36cbe0SMichael S. Tsirkin 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4310fe36cbe0SMichael S. Tsirkin 		int mtu = virtio_cread16(vdev,
4311fe36cbe0SMichael S. Tsirkin 					 offsetof(struct virtio_net_config,
4312fe36cbe0SMichael S. Tsirkin 						  mtu));
4313fe36cbe0SMichael S. Tsirkin 		if (mtu < MIN_MTU)
4314fe36cbe0SMichael S. Tsirkin 			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
4315fe36cbe0SMichael S. Tsirkin 	}
4316fe36cbe0SMichael S. Tsirkin 
43177c06458cSLaurent Vivier 	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
43187c06458cSLaurent Vivier 	    !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
43197c06458cSLaurent Vivier 		dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
43207c06458cSLaurent Vivier 		__virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
43217c06458cSLaurent Vivier 	}
43227c06458cSLaurent Vivier 
4323fe36cbe0SMichael S. Tsirkin 	return 0;
4324fe36cbe0SMichael S. Tsirkin }
4325fe36cbe0SMichael S. Tsirkin 
432646cd26f4SGavin Li static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
432746cd26f4SGavin Li {
432846cd26f4SGavin Li 	return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
432946cd26f4SGavin Li 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
433046cd26f4SGavin Li 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4331418044e1SAndrew Melnychenko 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4332418044e1SAndrew Melnychenko 		(virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4333418044e1SAndrew Melnychenko 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
433446cd26f4SGavin Li }
433546cd26f4SGavin Li 
43364959aebbSGavin Li static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
43374959aebbSGavin Li {
43384959aebbSGavin Li 	bool guest_gso = virtnet_check_guest_gso(vi);
43394959aebbSGavin Li 
43404959aebbSGavin Li 	/* If device can receive ANY guest GSO packets, regardless of mtu,
43414959aebbSGavin Li 	 * allocate packets of maximum size, otherwise limit it to only
43424959aebbSGavin Li 	 * mtu size worth only.
43434959aebbSGavin Li 	 */
43444959aebbSGavin Li 	if (mtu > ETH_DATA_LEN || guest_gso) {
43454959aebbSGavin Li 		vi->big_packets = true;
43464959aebbSGavin Li 		vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
43474959aebbSGavin Li 	}
43484959aebbSGavin Li }
43494959aebbSGavin Li 
4350fe36cbe0SMichael S. Tsirkin static int virtnet_probe(struct virtio_device *vdev)
4351fe36cbe0SMichael S. Tsirkin {
4352d7dfc5cfSToshiaki Makita 	int i, err = -ENOMEM;
4353fe36cbe0SMichael S. Tsirkin 	struct net_device *dev;
4354fe36cbe0SMichael S. Tsirkin 	struct virtnet_info *vi;
4355fe36cbe0SMichael S. Tsirkin 	u16 max_queue_pairs;
43564959aebbSGavin Li 	int mtu = 0;
4357fe36cbe0SMichael S. Tsirkin 
4358c7114b12SAndrew Melnychenko 	/* Find if host supports multiqueue/rss virtio_net device */
4359c7114b12SAndrew Melnychenko 	max_queue_pairs = 1;
4360c7114b12SAndrew Melnychenko 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4361c7114b12SAndrew Melnychenko 		max_queue_pairs =
4362c7114b12SAndrew Melnychenko 		     virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
4363986a4f4dSJason Wang 
4364986a4f4dSJason Wang 	/* We need at least 2 queue's */
4365c7114b12SAndrew Melnychenko 	if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
4366986a4f4dSJason Wang 	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
4367986a4f4dSJason Wang 	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4368986a4f4dSJason Wang 		max_queue_pairs = 1;
4369296f96fcSRusty Russell 
4370296f96fcSRusty Russell 	/* Allocate ourselves a network device with room for our info */
4371986a4f4dSJason Wang 	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
4372296f96fcSRusty Russell 	if (!dev)
4373296f96fcSRusty Russell 		return -ENOMEM;
4374296f96fcSRusty Russell 
4375296f96fcSRusty Russell 	/* Set up network device as normal. */
4376ab5bd583SXuan Zhuo 	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
4377ab5bd583SXuan Zhuo 			   IFF_TX_SKB_NO_LINEAR;
437876288b4eSStephen Hemminger 	dev->netdev_ops = &virtnet_netdev;
4379296f96fcSRusty Russell 	dev->features = NETIF_F_HIGHDMA;
43803fa2a1dfSstephen hemminger 
43817ad24ea4SWilfried Klaebe 	dev->ethtool_ops = &virtnet_ethtool_ops;
4382296f96fcSRusty Russell 	SET_NETDEV_DEV(dev, &vdev->dev);
4383296f96fcSRusty Russell 
4384296f96fcSRusty Russell 	/* Do we support "hardware" checksums? */
438598e778c9SMichał Mirosław 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
4386296f96fcSRusty Russell 		/* This opens up the world of extra features. */
438748900cb6SJason Wang 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
438898e778c9SMichał Mirosław 		if (csum)
438948900cb6SJason Wang 			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
439098e778c9SMichał Mirosław 
439198e778c9SMichał Mirosław 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
4392e078de03SDavid S. Miller 			dev->hw_features |= NETIF_F_TSO
439334a48579SRusty Russell 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
439434a48579SRusty Russell 		}
43955539ae96SRusty Russell 		/* Individual feature bits: what can host handle? */
439698e778c9SMichał Mirosław 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
439798e778c9SMichał Mirosław 			dev->hw_features |= NETIF_F_TSO;
439898e778c9SMichał Mirosław 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
439998e778c9SMichał Mirosław 			dev->hw_features |= NETIF_F_TSO6;
440098e778c9SMichał Mirosław 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
440198e778c9SMichał Mirosław 			dev->hw_features |= NETIF_F_TSO_ECN;
4402418044e1SAndrew Melnychenko 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
4403418044e1SAndrew Melnychenko 			dev->hw_features |= NETIF_F_GSO_UDP_L4;
440498e778c9SMichał Mirosław 
440541f2f127SJason Wang 		dev->features |= NETIF_F_GSO_ROBUST;
440641f2f127SJason Wang 
440798e778c9SMichał Mirosław 		if (gso)
4408e078de03SDavid S. Miller 			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
440998e778c9SMichał Mirosław 		/* (!csum && gso) case will be fixed by register_netdev() */
4410296f96fcSRusty Russell 	}
44114f49129bSThomas Huth 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
44124f49129bSThomas Huth 		dev->features |= NETIF_F_RXCSUM;
4413a02e8964SWillem de Bruijn 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4414a02e8964SWillem de Bruijn 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
4415dbcf24d1SJason Wang 		dev->features |= NETIF_F_GRO_HW;
4416cf8691cbSMichael S. Tsirkin 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
4417dbcf24d1SJason Wang 		dev->hw_features |= NETIF_F_GRO_HW;
4418296f96fcSRusty Russell 
44194fda8302SJason Wang 	dev->vlan_features = dev->features;
442066c0e13aSMarek Majtyka 	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
44214fda8302SJason Wang 
4422d0c2c997SJarod Wilson 	/* MTU range: 68 - 65535 */
4423d0c2c997SJarod Wilson 	dev->min_mtu = MIN_MTU;
4424d0c2c997SJarod Wilson 	dev->max_mtu = MAX_MTU;
4425d0c2c997SJarod Wilson 
4426296f96fcSRusty Russell 	/* Configuration may specify what MAC to use.  Otherwise random. */
4427f2edaa4aSJakub Kicinski 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4428f2edaa4aSJakub Kicinski 		u8 addr[ETH_ALEN];
4429f2edaa4aSJakub Kicinski 
4430855e0c52SRusty Russell 		virtio_cread_bytes(vdev,
4431a586d4f6SRusty Russell 				   offsetof(struct virtio_net_config, mac),
4432f2edaa4aSJakub Kicinski 				   addr, ETH_ALEN);
4433f2edaa4aSJakub Kicinski 		eth_hw_addr_set(dev, addr);
4434f2edaa4aSJakub Kicinski 	} else {
4435f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
44369f62d221SLaurent Vivier 		dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
44379f62d221SLaurent Vivier 			 dev->dev_addr);
4438f2edaa4aSJakub Kicinski 	}
4439296f96fcSRusty Russell 
4440296f96fcSRusty Russell 	/* Set up our device-specific information */
4441296f96fcSRusty Russell 	vi = netdev_priv(dev);
4442296f96fcSRusty Russell 	vi->dev = dev;
4443296f96fcSRusty Russell 	vi->vdev = vdev;
4444d9d5dcc8SChristian Borntraeger 	vdev->priv = vi;
4445827da44cSJohn Stultz 
4446586d17c5SJason Wang 	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
44475a159128SJason Wang 	spin_lock_init(&vi->refill_lock);
4448296f96fcSRusty Russell 
444930bbf891SLorenzo Bianconi 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
44503f2c31d9SMark McLoughlin 		vi->mergeable_rx_bufs = true;
445130bbf891SLorenzo Bianconi 		dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
445230bbf891SLorenzo Bianconi 	}
44533f2c31d9SMark McLoughlin 
4454699b045aSAlvaro Karsz 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4455308d7982SGavin Li 		vi->intr_coal_rx.max_usecs = 0;
4456308d7982SGavin Li 		vi->intr_coal_tx.max_usecs = 0;
4457308d7982SGavin Li 		vi->intr_coal_tx.max_packets = 0;
4458308d7982SGavin Li 		vi->intr_coal_rx.max_packets = 0;
4459699b045aSAlvaro Karsz 	}
4460699b045aSAlvaro Karsz 
446191f41f01SAndrew Melnychenko 	if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
446291f41f01SAndrew Melnychenko 		vi->has_rss_hash_report = true;
446391f41f01SAndrew Melnychenko 
446491f41f01SAndrew Melnychenko 	if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4465c7114b12SAndrew Melnychenko 		vi->has_rss = true;
446691f41f01SAndrew Melnychenko 
446791f41f01SAndrew Melnychenko 	if (vi->has_rss || vi->has_rss_hash_report) {
4468c7114b12SAndrew Melnychenko 		vi->rss_indir_table_size =
4469c7114b12SAndrew Melnychenko 			virtio_cread16(vdev, offsetof(struct virtio_net_config,
4470c7114b12SAndrew Melnychenko 				rss_max_indirection_table_length));
4471c7114b12SAndrew Melnychenko 		vi->rss_key_size =
4472c7114b12SAndrew Melnychenko 			virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
4473c7114b12SAndrew Melnychenko 
4474c7114b12SAndrew Melnychenko 		vi->rss_hash_types_supported =
4475c7114b12SAndrew Melnychenko 		    virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4476c7114b12SAndrew Melnychenko 		vi->rss_hash_types_supported &=
4477c7114b12SAndrew Melnychenko 				~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4478c7114b12SAndrew Melnychenko 				  VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4479c7114b12SAndrew Melnychenko 				  VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4480c7114b12SAndrew Melnychenko 
4481c7114b12SAndrew Melnychenko 		dev->hw_features |= NETIF_F_RXHASH;
4482c7114b12SAndrew Melnychenko 	}
448391f41f01SAndrew Melnychenko 
448491f41f01SAndrew Melnychenko 	if (vi->has_rss_hash_report)
448591f41f01SAndrew Melnychenko 		vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
448691f41f01SAndrew Melnychenko 	else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4487d04302b3SMichael S. Tsirkin 		 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4488012873d0SMichael S. Tsirkin 		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4489012873d0SMichael S. Tsirkin 	else
4490012873d0SMichael S. Tsirkin 		vi->hdr_len = sizeof(struct virtio_net_hdr);
4491012873d0SMichael S. Tsirkin 
449275993300SMichael S. Tsirkin 	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
449375993300SMichael S. Tsirkin 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4494e7428e95SMichael S. Tsirkin 		vi->any_header_sg = true;
4495e7428e95SMichael S. Tsirkin 
4496986a4f4dSJason Wang 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4497986a4f4dSJason Wang 		vi->has_cvq = true;
4498986a4f4dSJason Wang 
449914de9d11SAaron Conole 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
450014de9d11SAaron Conole 		mtu = virtio_cread16(vdev,
450114de9d11SAaron Conole 				     offsetof(struct virtio_net_config,
450214de9d11SAaron Conole 					      mtu));
450393a205eeSAaron Conole 		if (mtu < dev->min_mtu) {
4504fe36cbe0SMichael S. Tsirkin 			/* Should never trigger: MTU was previously validated
4505fe36cbe0SMichael S. Tsirkin 			 * in virtnet_validate.
4506fe36cbe0SMichael S. Tsirkin 			 */
45077934b481SYuval Shaia 			dev_err(&vdev->dev,
45087934b481SYuval Shaia 				"device MTU appears to have changed it is now %d < %d",
45097934b481SYuval Shaia 				mtu, dev->min_mtu);
4510411ea23aSDan Carpenter 			err = -EINVAL;
4511d7dfc5cfSToshiaki Makita 			goto free;
4512fe36cbe0SMichael S. Tsirkin 		}
4513fe36cbe0SMichael S. Tsirkin 
4514d0c2c997SJarod Wilson 		dev->mtu = mtu;
451593a205eeSAaron Conole 		dev->max_mtu = mtu;
451614de9d11SAaron Conole 	}
451714de9d11SAaron Conole 
45184959aebbSGavin Li 	virtnet_set_big_packets(vi, mtu);
45194959aebbSGavin Li 
4520012873d0SMichael S. Tsirkin 	if (vi->any_header_sg)
4521012873d0SMichael S. Tsirkin 		dev->needed_headroom = vi->hdr_len;
45226ebbc1a6SZhangjie \(HZ\) 
452344900010SJason Wang 	/* Enable multiqueue by default */
452444900010SJason Wang 	if (num_online_cpus() >= max_queue_pairs)
452544900010SJason Wang 		vi->curr_queue_pairs = max_queue_pairs;
452644900010SJason Wang 	else
452744900010SJason Wang 		vi->curr_queue_pairs = num_online_cpus();
4528986a4f4dSJason Wang 	vi->max_queue_pairs = max_queue_pairs;
4529986a4f4dSJason Wang 
4530986a4f4dSJason Wang 	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
45313f9c10b0SAmit Shah 	err = init_vqs(vi);
4532d2a7dddaSMichael S. Tsirkin 	if (err)
4533d7dfc5cfSToshiaki Makita 		goto free;
4534d2a7dddaSMichael S. Tsirkin 
4535fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS
4536fbf28d78SMichael Dalton 	if (vi->mergeable_rx_bufs)
4537fbf28d78SMichael Dalton 		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4538fbf28d78SMichael Dalton #endif
45390f13b66bSZhi Yong Wu 	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
45400f13b66bSZhi Yong Wu 	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4541986a4f4dSJason Wang 
45422e9ca760SMichael S. Tsirkin 	virtnet_init_settings(dev);
45432e9ca760SMichael S. Tsirkin 
4544ba5e4426SSridhar Samudrala 	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4545ba5e4426SSridhar Samudrala 		vi->failover = net_failover_create(vi->dev);
45464b8e6ac4SWei Yongjun 		if (IS_ERR(vi->failover)) {
45474b8e6ac4SWei Yongjun 			err = PTR_ERR(vi->failover);
4548ba5e4426SSridhar Samudrala 			goto free_vqs;
4549ba5e4426SSridhar Samudrala 		}
45504b8e6ac4SWei Yongjun 	}
4551ba5e4426SSridhar Samudrala 
455291f41f01SAndrew Melnychenko 	if (vi->has_rss || vi->has_rss_hash_report)
4553c7114b12SAndrew Melnychenko 		virtnet_init_default_rss(vi);
4554c7114b12SAndrew Melnychenko 
455550c0ada6SJason Wang 	/* serialize netdev register + virtio_device_ready() with ndo_open() */
455650c0ada6SJason Wang 	rtnl_lock();
455750c0ada6SJason Wang 
455850c0ada6SJason Wang 	err = register_netdevice(dev);
4559296f96fcSRusty Russell 	if (err) {
4560296f96fcSRusty Russell 		pr_debug("virtio_net: registering device failed\n");
456150c0ada6SJason Wang 		rtnl_unlock();
4562ba5e4426SSridhar Samudrala 		goto free_failover;
4563296f96fcSRusty Russell 	}
4564b3369c1fSRusty Russell 
45654baf1e33SMichael S. Tsirkin 	virtio_device_ready(vdev);
45664baf1e33SMichael S. Tsirkin 
456751b81317SJason Wang 	_virtnet_set_queues(vi, vi->curr_queue_pairs);
456851b81317SJason Wang 
45699f62d221SLaurent Vivier 	/* a random MAC address has been assigned, notify the device.
45709f62d221SLaurent Vivier 	 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
45719f62d221SLaurent Vivier 	 * because many devices work fine without getting MAC explicitly
45729f62d221SLaurent Vivier 	 */
45739f62d221SLaurent Vivier 	if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
45749f62d221SLaurent Vivier 	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
45759f62d221SLaurent Vivier 		struct scatterlist sg;
45769f62d221SLaurent Vivier 
45779f62d221SLaurent Vivier 		sg_init_one(&sg, dev->dev_addr, dev->addr_len);
45789f62d221SLaurent Vivier 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
45799f62d221SLaurent Vivier 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
45809f62d221SLaurent Vivier 			pr_debug("virtio_net: setting MAC address failed\n");
45819f62d221SLaurent Vivier 			rtnl_unlock();
45829f62d221SLaurent Vivier 			err = -EINVAL;
45839f62d221SLaurent Vivier 			goto free_unregister_netdev;
45849f62d221SLaurent Vivier 		}
45859f62d221SLaurent Vivier 	}
45869f62d221SLaurent Vivier 
458750c0ada6SJason Wang 	rtnl_unlock();
458850c0ada6SJason Wang 
45898017c279SSebastian Andrzej Siewior 	err = virtnet_cpu_notif_add(vi);
45908de4b2f3SWanlong Gao 	if (err) {
45918de4b2f3SWanlong Gao 		pr_debug("virtio_net: registering cpu notifier failed\n");
4592f00e35e2Swangyunjian 		goto free_unregister_netdev;
45938de4b2f3SWanlong Gao 	}
45948de4b2f3SWanlong Gao 
4595167c25e4SJason Wang 	/* Assume link up if device can't report link status,
4596167c25e4SJason Wang 	   otherwise get link status from config. */
4597167c25e4SJason Wang 	netif_carrier_off(dev);
4598bda7fab5SJay Vosburgh 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
45993b07e9caSTejun Heo 		schedule_work(&vi->config_work);
4600167c25e4SJason Wang 	} else {
4601167c25e4SJason Wang 		vi->status = VIRTIO_NET_S_LINK_UP;
4602faa9b39fSJason Baron 		virtnet_update_settings(vi);
46034783256eSPantelis Koukousoulas 		netif_carrier_on(dev);
4604167c25e4SJason Wang 	}
46059f4d26d0SMark McLoughlin 
46063f93522fSJason Wang 	for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
46073f93522fSJason Wang 		if (virtio_has_feature(vi->vdev, guest_offloads[i]))
46083f93522fSJason Wang 			set_bit(guest_offloads[i], &vi->guest_offloads);
4609a02e8964SWillem de Bruijn 	vi->guest_offloads_capable = vi->guest_offloads;
46103f93522fSJason Wang 
4611986a4f4dSJason Wang 	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4612986a4f4dSJason Wang 		 dev->name, max_queue_pairs);
4613986a4f4dSJason Wang 
4614296f96fcSRusty Russell 	return 0;
4615296f96fcSRusty Russell 
4616f00e35e2Swangyunjian free_unregister_netdev:
4617b3369c1fSRusty Russell 	unregister_netdev(dev);
4618ba5e4426SSridhar Samudrala free_failover:
4619ba5e4426SSridhar Samudrala 	net_failover_destroy(vi->failover);
4620d2a7dddaSMichael S. Tsirkin free_vqs:
4621b0686565SLi Zetao 	virtio_reset_device(vdev);
4622986a4f4dSJason Wang 	cancel_delayed_work_sync(&vi->refill);
4623fb51879dSMichael Dalton 	free_receive_page_frags(vi);
4624e9d7417bSJason Wang 	virtnet_del_vqs(vi);
4625296f96fcSRusty Russell free:
4626296f96fcSRusty Russell 	free_netdev(dev);
4627296f96fcSRusty Russell 	return err;
4628296f96fcSRusty Russell }
4629296f96fcSRusty Russell 
463004486ed0SAmit Shah static void remove_vq_common(struct virtnet_info *vi)
4631296f96fcSRusty Russell {
4632d9679d00SMichael S. Tsirkin 	virtio_reset_device(vi->vdev);
4633830a8a97SShirley Ma 
4634830a8a97SShirley Ma 	/* Free unused buffers in both send and recv, if any. */
46359ab86bbcSShirley Ma 	free_unused_bufs(vi);
4636fb6813f4SRusty Russell 
4637986a4f4dSJason Wang 	free_receive_bufs(vi);
4638d2a7dddaSMichael S. Tsirkin 
4639fb51879dSMichael Dalton 	free_receive_page_frags(vi);
4640fb51879dSMichael Dalton 
4641986a4f4dSJason Wang 	virtnet_del_vqs(vi);
464204486ed0SAmit Shah }
464304486ed0SAmit Shah 
46448cc085d6SBill Pemberton static void virtnet_remove(struct virtio_device *vdev)
464504486ed0SAmit Shah {
464604486ed0SAmit Shah 	struct virtnet_info *vi = vdev->priv;
464704486ed0SAmit Shah 
46488017c279SSebastian Andrzej Siewior 	virtnet_cpu_notif_remove(vi);
46498de4b2f3SWanlong Gao 
4650102a2786SMichael S. Tsirkin 	/* Make sure no work handler is accessing the device. */
4651102a2786SMichael S. Tsirkin 	flush_work(&vi->config_work);
4652586d17c5SJason Wang 
465304486ed0SAmit Shah 	unregister_netdev(vi->dev);
465404486ed0SAmit Shah 
4655ba5e4426SSridhar Samudrala 	net_failover_destroy(vi->failover);
4656ba5e4426SSridhar Samudrala 
465704486ed0SAmit Shah 	remove_vq_common(vi);
4658fb6813f4SRusty Russell 
465974b2553fSRusty Russell 	free_netdev(vi->dev);
4660296f96fcSRusty Russell }
4661296f96fcSRusty Russell 
466267a75194SArnd Bergmann static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
46630741bcb5SAmit Shah {
46640741bcb5SAmit Shah 	struct virtnet_info *vi = vdev->priv;
46650741bcb5SAmit Shah 
46668017c279SSebastian Andrzej Siewior 	virtnet_cpu_notif_remove(vi);
46679fe7bfceSJohn Fastabend 	virtnet_freeze_down(vdev);
46680741bcb5SAmit Shah 	remove_vq_common(vi);
46690741bcb5SAmit Shah 
46700741bcb5SAmit Shah 	return 0;
46710741bcb5SAmit Shah }
46720741bcb5SAmit Shah 
467367a75194SArnd Bergmann static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
46740741bcb5SAmit Shah {
46750741bcb5SAmit Shah 	struct virtnet_info *vi = vdev->priv;
46769fe7bfceSJohn Fastabend 	int err;
46770741bcb5SAmit Shah 
46789fe7bfceSJohn Fastabend 	err = virtnet_restore_up(vdev);
46790741bcb5SAmit Shah 	if (err)
46800741bcb5SAmit Shah 		return err;
4681986a4f4dSJason Wang 	virtnet_set_queues(vi, vi->curr_queue_pairs);
4682986a4f4dSJason Wang 
46838017c279SSebastian Andrzej Siewior 	err = virtnet_cpu_notif_add(vi);
46843f2869caSXie Yongji 	if (err) {
46853f2869caSXie Yongji 		virtnet_freeze_down(vdev);
46863f2869caSXie Yongji 		remove_vq_common(vi);
4687ec9debbdSJason Wang 		return err;
46883f2869caSXie Yongji 	}
4689ec9debbdSJason Wang 
46900741bcb5SAmit Shah 	return 0;
46910741bcb5SAmit Shah }
46920741bcb5SAmit Shah 
4693296f96fcSRusty Russell static struct virtio_device_id id_table[] = {
4694296f96fcSRusty Russell 	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4695296f96fcSRusty Russell 	{ 0 },
4696296f96fcSRusty Russell };
4697296f96fcSRusty Russell 
4698f3358507SMichael S. Tsirkin #define VIRTNET_FEATURES \
4699f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4700f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_MAC, \
4701f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4702f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4703f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4704418044e1SAndrew Melnychenko 	VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4705f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4706f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4707f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4708f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_CTRL_MAC_ADDR, \
4709faa9b39fSJason Baron 	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4710c7114b12SAndrew Melnychenko 	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4711be50da3eSJiri Pirko 	VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
47128af3bf66SGavin Li 	VIRTIO_NET_F_VQ_NOTF_COAL, \
4713be50da3eSJiri Pirko 	VIRTIO_NET_F_GUEST_HDRLEN
4714f3358507SMichael S. Tsirkin 
4715c45a6816SRusty Russell static unsigned int features[] = {
4716f3358507SMichael S. Tsirkin 	VIRTNET_FEATURES,
4717f3358507SMichael S. Tsirkin };
4718f3358507SMichael S. Tsirkin 
4719f3358507SMichael S. Tsirkin static unsigned int features_legacy[] = {
4720f3358507SMichael S. Tsirkin 	VIRTNET_FEATURES,
4721f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_GSO,
4722e7428e95SMichael S. Tsirkin 	VIRTIO_F_ANY_LAYOUT,
4723c45a6816SRusty Russell };
4724c45a6816SRusty Russell 
472522402529SUwe Kleine-König static struct virtio_driver virtio_net_driver = {
4726c45a6816SRusty Russell 	.feature_table = features,
4727c45a6816SRusty Russell 	.feature_table_size = ARRAY_SIZE(features),
4728f3358507SMichael S. Tsirkin 	.feature_table_legacy = features_legacy,
4729f3358507SMichael S. Tsirkin 	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
4730296f96fcSRusty Russell 	.driver.name =	KBUILD_MODNAME,
4731296f96fcSRusty Russell 	.driver.owner =	THIS_MODULE,
4732296f96fcSRusty Russell 	.id_table =	id_table,
4733fe36cbe0SMichael S. Tsirkin 	.validate =	virtnet_validate,
4734296f96fcSRusty Russell 	.probe =	virtnet_probe,
47358cc085d6SBill Pemberton 	.remove =	virtnet_remove,
47369f4d26d0SMark McLoughlin 	.config_changed = virtnet_config_changed,
473789107000SAaron Lu #ifdef CONFIG_PM_SLEEP
47380741bcb5SAmit Shah 	.freeze =	virtnet_freeze,
47390741bcb5SAmit Shah 	.restore =	virtnet_restore,
47400741bcb5SAmit Shah #endif
4741296f96fcSRusty Russell };
4742296f96fcSRusty Russell 
47438017c279SSebastian Andrzej Siewior static __init int virtio_net_driver_init(void)
47448017c279SSebastian Andrzej Siewior {
47458017c279SSebastian Andrzej Siewior 	int ret;
47468017c279SSebastian Andrzej Siewior 
474773c1b41eSThomas Gleixner 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
47488017c279SSebastian Andrzej Siewior 				      virtnet_cpu_online,
47498017c279SSebastian Andrzej Siewior 				      virtnet_cpu_down_prep);
47508017c279SSebastian Andrzej Siewior 	if (ret < 0)
47518017c279SSebastian Andrzej Siewior 		goto out;
47528017c279SSebastian Andrzej Siewior 	virtionet_online = ret;
475373c1b41eSThomas Gleixner 	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
47548017c279SSebastian Andrzej Siewior 				      NULL, virtnet_cpu_dead);
47558017c279SSebastian Andrzej Siewior 	if (ret)
47568017c279SSebastian Andrzej Siewior 		goto err_dead;
47578017c279SSebastian Andrzej Siewior 	ret = register_virtio_driver(&virtio_net_driver);
47588017c279SSebastian Andrzej Siewior 	if (ret)
47598017c279SSebastian Andrzej Siewior 		goto err_virtio;
47608017c279SSebastian Andrzej Siewior 	return 0;
47618017c279SSebastian Andrzej Siewior err_virtio:
47628017c279SSebastian Andrzej Siewior 	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
47638017c279SSebastian Andrzej Siewior err_dead:
47648017c279SSebastian Andrzej Siewior 	cpuhp_remove_multi_state(virtionet_online);
47658017c279SSebastian Andrzej Siewior out:
47668017c279SSebastian Andrzej Siewior 	return ret;
47678017c279SSebastian Andrzej Siewior }
47688017c279SSebastian Andrzej Siewior module_init(virtio_net_driver_init);
47698017c279SSebastian Andrzej Siewior 
47708017c279SSebastian Andrzej Siewior static __exit void virtio_net_driver_exit(void)
47718017c279SSebastian Andrzej Siewior {
4772cfa0ebc9SAndrew Jones 	unregister_virtio_driver(&virtio_net_driver);
47738017c279SSebastian Andrzej Siewior 	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
47748017c279SSebastian Andrzej Siewior 	cpuhp_remove_multi_state(virtionet_online);
47758017c279SSebastian Andrzej Siewior }
47768017c279SSebastian Andrzej Siewior module_exit(virtio_net_driver_exit);
4777296f96fcSRusty Russell 
4778296f96fcSRusty Russell MODULE_DEVICE_TABLE(virtio, id_table);
4779296f96fcSRusty Russell MODULE_DESCRIPTION("Virtio network driver");
4780296f96fcSRusty Russell MODULE_LICENSE("GPL");
4781