xref: /openbmc/linux/drivers/net/virtio_net.c (revision ef609fd7)
11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
248925e37SRusty Russell /* A network driver using virtio.
3296f96fcSRusty Russell  *
4296f96fcSRusty Russell  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5296f96fcSRusty Russell  */
6296f96fcSRusty Russell //#define DEBUG
7296f96fcSRusty Russell #include <linux/netdevice.h>
8296f96fcSRusty Russell #include <linux/etherdevice.h>
9a9ea3fc6SHerbert Xu #include <linux/ethtool.h>
10296f96fcSRusty Russell #include <linux/module.h>
11296f96fcSRusty Russell #include <linux/virtio.h>
12296f96fcSRusty Russell #include <linux/virtio_net.h>
13f600b690SJohn Fastabend #include <linux/bpf.h>
14a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h>
15296f96fcSRusty Russell #include <linux/scatterlist.h>
16e918085aSAlex Williamson #include <linux/if_vlan.h>
175a0e3ad6STejun Heo #include <linux/slab.h>
188de4b2f3SWanlong Gao #include <linux/cpu.h>
19ab7db917SMichael Dalton #include <linux/average.h>
20186b3c99SJason Wang #include <linux/filter.h>
212ca653d6SCaleb Raitto #include <linux/kernel.h>
22d85b758fSMichael S. Tsirkin #include <net/route.h>
23754b8a21SJesper Dangaard Brouer #include <net/xdp.h>
24ba5e4426SSridhar Samudrala #include <net/net_failover.h>
2549e47a5bSJakub Kicinski #include <net/netdev_rx_queue.h>
26296f96fcSRusty Russell 
27d34710e3SAmerigo Wang static int napi_weight = NAPI_POLL_WEIGHT;
286c0cd7c0SDor Laor module_param(napi_weight, int, 0444);
296c0cd7c0SDor Laor 
3031c03aefSWillem de Bruijn static bool csum = true, gso = true, napi_tx = true;
3134a48579SRusty Russell module_param(csum, bool, 0444);
3234a48579SRusty Russell module_param(gso, bool, 0444);
33b92f1e67SWillem de Bruijn module_param(napi_tx, bool, 0644);
3434a48579SRusty Russell 
35296f96fcSRusty Russell /* FIXME: MTU in config. */
365061de36SMichael Dalton #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
373f2c31d9SMark McLoughlin #define GOOD_COPY_LEN	128
38296f96fcSRusty Russell 
39f6b10209SJason Wang #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
40f6b10209SJason Wang 
412de2f7f4SJohn Fastabend /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
422de2f7f4SJohn Fastabend #define VIRTIO_XDP_HEADROOM 256
432de2f7f4SJohn Fastabend 
442471c75eSJesper Dangaard Brouer /* Separating two types of XDP xmit */
452471c75eSJesper Dangaard Brouer #define VIRTIO_XDP_TX		BIT(0)
462471c75eSJesper Dangaard Brouer #define VIRTIO_XDP_REDIR	BIT(1)
472471c75eSJesper Dangaard Brouer 
485050471dSToshiaki Makita #define VIRTIO_XDP_FLAG	BIT(0)
495050471dSToshiaki Makita 
505377d758SJohannes Berg /* RX packet size EWMA. The average packet size is used to determine the packet
515377d758SJohannes Berg  * buffer size when refilling RX rings. As the entire RX ring may be refilled
525377d758SJohannes Berg  * at once, the weight is chosen so that the EWMA will be insensitive to short-
535377d758SJohannes Berg  * term, transient changes in packet size.
54ab7db917SMichael Dalton  */
55eb1e011aSJohannes Berg DECLARE_EWMA(pkt_len, 0, 64)
56ab7db917SMichael Dalton 
5766846048SRick Jones #define VIRTNET_DRIVER_VERSION "1.0.0"
582a41f71dSAlex Williamson 
597acd4329SColin Ian King static const unsigned long guest_offloads[] = {
607acd4329SColin Ian King 	VIRTIO_NET_F_GUEST_TSO4,
613f93522fSJason Wang 	VIRTIO_NET_F_GUEST_TSO6,
623f93522fSJason Wang 	VIRTIO_NET_F_GUEST_ECN,
63e59ff2c4SJason Wang 	VIRTIO_NET_F_GUEST_UFO,
64418044e1SAndrew Melnychenko 	VIRTIO_NET_F_GUEST_CSUM,
65418044e1SAndrew Melnychenko 	VIRTIO_NET_F_GUEST_USO4,
66be50da3eSJiri Pirko 	VIRTIO_NET_F_GUEST_USO6,
67be50da3eSJiri Pirko 	VIRTIO_NET_F_GUEST_HDRLEN
687acd4329SColin Ian King };
693f93522fSJason Wang 
70dbcf24d1SJason Wang #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
711a03b8a3STonghao Zhang 				(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
721a03b8a3STonghao Zhang 				(1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
73418044e1SAndrew Melnychenko 				(1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
74418044e1SAndrew Melnychenko 				(1ULL << VIRTIO_NET_F_GUEST_USO4) | \
75418044e1SAndrew Melnychenko 				(1ULL << VIRTIO_NET_F_GUEST_USO6))
761a03b8a3STonghao Zhang 
77d7dfc5cfSToshiaki Makita struct virtnet_stat_desc {
78d7dfc5cfSToshiaki Makita 	char desc[ETH_GSTRING_LEN];
79d7dfc5cfSToshiaki Makita 	size_t offset;
803fa2a1dfSstephen hemminger };
813fa2a1dfSstephen hemminger 
82d7dfc5cfSToshiaki Makita struct virtnet_sq_stats {
83d7dfc5cfSToshiaki Makita 	struct u64_stats_sync syncp;
8427debe3eSEric Dumazet 	u64_stats_t packets;
8527debe3eSEric Dumazet 	u64_stats_t bytes;
8627debe3eSEric Dumazet 	u64_stats_t xdp_tx;
8727debe3eSEric Dumazet 	u64_stats_t xdp_tx_drops;
8827debe3eSEric Dumazet 	u64_stats_t kicks;
8927debe3eSEric Dumazet 	u64_stats_t tx_timeouts;
90d7dfc5cfSToshiaki Makita };
91d7dfc5cfSToshiaki Makita 
92d46eeeafSJason Wang struct virtnet_rq_stats {
93d46eeeafSJason Wang 	struct u64_stats_sync syncp;
9427debe3eSEric Dumazet 	u64_stats_t packets;
9527debe3eSEric Dumazet 	u64_stats_t bytes;
9627debe3eSEric Dumazet 	u64_stats_t drops;
9727debe3eSEric Dumazet 	u64_stats_t xdp_packets;
9827debe3eSEric Dumazet 	u64_stats_t xdp_tx;
9927debe3eSEric Dumazet 	u64_stats_t xdp_redirects;
10027debe3eSEric Dumazet 	u64_stats_t xdp_drops;
10127debe3eSEric Dumazet 	u64_stats_t kicks;
102d7dfc5cfSToshiaki Makita };
103d7dfc5cfSToshiaki Makita 
104d7dfc5cfSToshiaki Makita #define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
105d46eeeafSJason Wang #define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
106d7dfc5cfSToshiaki Makita 
107d7dfc5cfSToshiaki Makita static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
108d7dfc5cfSToshiaki Makita 	{ "packets",		VIRTNET_SQ_STAT(packets) },
109d7dfc5cfSToshiaki Makita 	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
1105b8f3c8dSToshiaki Makita 	{ "xdp_tx",		VIRTNET_SQ_STAT(xdp_tx) },
1115b8f3c8dSToshiaki Makita 	{ "xdp_tx_drops",	VIRTNET_SQ_STAT(xdp_tx_drops) },
112461f03dcSToshiaki Makita 	{ "kicks",		VIRTNET_SQ_STAT(kicks) },
113a520794bSTony Lu 	{ "tx_timeouts",	VIRTNET_SQ_STAT(tx_timeouts) },
114d7dfc5cfSToshiaki Makita };
115d7dfc5cfSToshiaki Makita 
116d7dfc5cfSToshiaki Makita static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
117d7dfc5cfSToshiaki Makita 	{ "packets",		VIRTNET_RQ_STAT(packets) },
118d7dfc5cfSToshiaki Makita 	{ "bytes",		VIRTNET_RQ_STAT(bytes) },
1192c4a2f7dSToshiaki Makita 	{ "drops",		VIRTNET_RQ_STAT(drops) },
1205b8f3c8dSToshiaki Makita 	{ "xdp_packets",	VIRTNET_RQ_STAT(xdp_packets) },
1215b8f3c8dSToshiaki Makita 	{ "xdp_tx",		VIRTNET_RQ_STAT(xdp_tx) },
1225b8f3c8dSToshiaki Makita 	{ "xdp_redirects",	VIRTNET_RQ_STAT(xdp_redirects) },
1235b8f3c8dSToshiaki Makita 	{ "xdp_drops",		VIRTNET_RQ_STAT(xdp_drops) },
124461f03dcSToshiaki Makita 	{ "kicks",		VIRTNET_RQ_STAT(kicks) },
125d7dfc5cfSToshiaki Makita };
126d7dfc5cfSToshiaki Makita 
127d7dfc5cfSToshiaki Makita #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
128d7dfc5cfSToshiaki Makita #define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
129d7dfc5cfSToshiaki Makita 
130308d7982SGavin Li struct virtnet_interrupt_coalesce {
131308d7982SGavin Li 	u32 max_packets;
132308d7982SGavin Li 	u32 max_usecs;
133308d7982SGavin Li };
134308d7982SGavin Li 
135295525e2SXuan Zhuo /* The dma information of pages allocated at a time. */
136295525e2SXuan Zhuo struct virtnet_rq_dma {
137295525e2SXuan Zhuo 	dma_addr_t addr;
138295525e2SXuan Zhuo 	u32 ref;
139295525e2SXuan Zhuo 	u16 len;
140295525e2SXuan Zhuo 	u16 need_sync;
141295525e2SXuan Zhuo };
142295525e2SXuan Zhuo 
143e9d7417bSJason Wang /* Internal representation of a send virtqueue */
144e9d7417bSJason Wang struct send_queue {
145e9d7417bSJason Wang 	/* Virtqueue associated with this send _queue */
146e9d7417bSJason Wang 	struct virtqueue *vq;
147e9d7417bSJason Wang 
148e9d7417bSJason Wang 	/* TX: fragments + linear part + virtio header */
149e9d7417bSJason Wang 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
150986a4f4dSJason Wang 
151986a4f4dSJason Wang 	/* Name of the send queue: output.$index */
152d0671115SParav Pandit 	char name[16];
153b92f1e67SWillem de Bruijn 
154d7dfc5cfSToshiaki Makita 	struct virtnet_sq_stats stats;
155d7dfc5cfSToshiaki Makita 
156394bd877SGavin Li 	struct virtnet_interrupt_coalesce intr_coal;
157394bd877SGavin Li 
158b92f1e67SWillem de Bruijn 	struct napi_struct napi;
159ebcce492SXuan Zhuo 
160ebcce492SXuan Zhuo 	/* Record whether sq is in reset state. */
161ebcce492SXuan Zhuo 	bool reset;
162e9d7417bSJason Wang };
163e9d7417bSJason Wang 
164e9d7417bSJason Wang /* Internal representation of a receive virtqueue */
165e9d7417bSJason Wang struct receive_queue {
166e9d7417bSJason Wang 	/* Virtqueue associated with this receive_queue */
167e9d7417bSJason Wang 	struct virtqueue *vq;
168e9d7417bSJason Wang 
169296f96fcSRusty Russell 	struct napi_struct napi;
170296f96fcSRusty Russell 
171f600b690SJohn Fastabend 	struct bpf_prog __rcu *xdp_prog;
172f600b690SJohn Fastabend 
173d7dfc5cfSToshiaki Makita 	struct virtnet_rq_stats stats;
174d7dfc5cfSToshiaki Makita 
175394bd877SGavin Li 	struct virtnet_interrupt_coalesce intr_coal;
176394bd877SGavin Li 
177e9d7417bSJason Wang 	/* Chain pages by the private ptr. */
178e9d7417bSJason Wang 	struct page *pages;
179e9d7417bSJason Wang 
180ab7db917SMichael Dalton 	/* Average packet length for mergeable receive buffers. */
1815377d758SJohannes Berg 	struct ewma_pkt_len mrg_avg_pkt_len;
182ab7db917SMichael Dalton 
183fb51879dSMichael Dalton 	/* Page frag for packet buffer allocation. */
184fb51879dSMichael Dalton 	struct page_frag alloc_frag;
185fb51879dSMichael Dalton 
186e9d7417bSJason Wang 	/* RX: fragments + linear part + virtio header */
187e9d7417bSJason Wang 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
188986a4f4dSJason Wang 
189d85b758fSMichael S. Tsirkin 	/* Min single buffer size for mergeable buffers case. */
190d85b758fSMichael S. Tsirkin 	unsigned int min_buf_len;
191d85b758fSMichael S. Tsirkin 
192986a4f4dSJason Wang 	/* Name of this receive queue: input.$index */
193d0671115SParav Pandit 	char name[16];
194754b8a21SJesper Dangaard Brouer 
195754b8a21SJesper Dangaard Brouer 	struct xdp_rxq_info xdp_rxq;
196295525e2SXuan Zhuo 
197295525e2SXuan Zhuo 	/* Record the last dma info to free after new pages is allocated. */
198295525e2SXuan Zhuo 	struct virtnet_rq_dma *last_dma;
199295525e2SXuan Zhuo 
200295525e2SXuan Zhuo 	/* Do dma by self */
201295525e2SXuan Zhuo 	bool do_dma;
202e9d7417bSJason Wang };
203e9d7417bSJason Wang 
204c7114b12SAndrew Melnychenko /* This structure can contain rss message with maximum settings for indirection table and keysize
205c7114b12SAndrew Melnychenko  * Note, that default structure that describes RSS configuration virtio_net_rss_config
206c7114b12SAndrew Melnychenko  * contains same info but can't handle table values.
207c7114b12SAndrew Melnychenko  * In any case, structure would be passed to virtio hw through sg_buf split by parts
208c7114b12SAndrew Melnychenko  * because table sizes may be differ according to the device configuration.
209c7114b12SAndrew Melnychenko  */
210c7114b12SAndrew Melnychenko #define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
211c7114b12SAndrew Melnychenko #define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
212c7114b12SAndrew Melnychenko struct virtio_net_ctrl_rss {
213c7114b12SAndrew Melnychenko 	u32 hash_types;
214c7114b12SAndrew Melnychenko 	u16 indirection_table_mask;
215c7114b12SAndrew Melnychenko 	u16 unclassified_queue;
216c7114b12SAndrew Melnychenko 	u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
217c7114b12SAndrew Melnychenko 	u16 max_tx_vq;
218c7114b12SAndrew Melnychenko 	u8 hash_key_length;
219c7114b12SAndrew Melnychenko 	u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
220c7114b12SAndrew Melnychenko };
221c7114b12SAndrew Melnychenko 
22212e57169SMichael S. Tsirkin /* Control VQ buffers: protected by the rtnl lock */
22312e57169SMichael S. Tsirkin struct control_buf {
22412e57169SMichael S. Tsirkin 	struct virtio_net_ctrl_hdr hdr;
22512e57169SMichael S. Tsirkin 	virtio_net_ctrl_ack status;
22612e57169SMichael S. Tsirkin 	struct virtio_net_ctrl_mq mq;
22712e57169SMichael S. Tsirkin 	u8 promisc;
22812e57169SMichael S. Tsirkin 	u8 allmulti;
229d7fad4c8SMichael S. Tsirkin 	__virtio16 vid;
230f4ee703aSMichael S. Tsirkin 	__virtio64 offloads;
231c7114b12SAndrew Melnychenko 	struct virtio_net_ctrl_rss rss;
232accc1bf2SBrett Creeley 	struct virtio_net_ctrl_coal_tx coal_tx;
233accc1bf2SBrett Creeley 	struct virtio_net_ctrl_coal_rx coal_rx;
234394bd877SGavin Li 	struct virtio_net_ctrl_coal_vq coal_vq;
23512e57169SMichael S. Tsirkin };
23612e57169SMichael S. Tsirkin 
237e9d7417bSJason Wang struct virtnet_info {
238e9d7417bSJason Wang 	struct virtio_device *vdev;
239e9d7417bSJason Wang 	struct virtqueue *cvq;
240e9d7417bSJason Wang 	struct net_device *dev;
241986a4f4dSJason Wang 	struct send_queue *sq;
242986a4f4dSJason Wang 	struct receive_queue *rq;
243e9d7417bSJason Wang 	unsigned int status;
244e9d7417bSJason Wang 
245986a4f4dSJason Wang 	/* Max # of queue pairs supported by the device */
246986a4f4dSJason Wang 	u16 max_queue_pairs;
247986a4f4dSJason Wang 
248986a4f4dSJason Wang 	/* # of queue pairs currently used by the driver */
249986a4f4dSJason Wang 	u16 curr_queue_pairs;
250986a4f4dSJason Wang 
251672aafd5SJohn Fastabend 	/* # of XDP queue pairs currently used by the driver */
252672aafd5SJohn Fastabend 	u16 xdp_queue_pairs;
253672aafd5SJohn Fastabend 
25497c2c69eSXuan Zhuo 	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
25597c2c69eSXuan Zhuo 	bool xdp_enabled;
25697c2c69eSXuan Zhuo 
25797402b96SHerbert Xu 	/* I like... big packets and I cannot lie! */
25897402b96SHerbert Xu 	bool big_packets;
25997402b96SHerbert Xu 
2604959aebbSGavin Li 	/* number of sg entries allocated for big packets */
2614959aebbSGavin Li 	unsigned int big_packets_num_skbfrags;
2624959aebbSGavin Li 
2633f2c31d9SMark McLoughlin 	/* Host will merge rx buffers for big packets (shake it! shake it!) */
2643f2c31d9SMark McLoughlin 	bool mergeable_rx_bufs;
2653f2c31d9SMark McLoughlin 
266c7114b12SAndrew Melnychenko 	/* Host supports rss and/or hash report */
267c7114b12SAndrew Melnychenko 	bool has_rss;
26891f41f01SAndrew Melnychenko 	bool has_rss_hash_report;
269c7114b12SAndrew Melnychenko 	u8 rss_key_size;
270c7114b12SAndrew Melnychenko 	u16 rss_indir_table_size;
271c7114b12SAndrew Melnychenko 	u32 rss_hash_types_supported;
272c1170820SAndrew Melnychenko 	u32 rss_hash_types_saved;
273c7114b12SAndrew Melnychenko 
274986a4f4dSJason Wang 	/* Has control virtqueue */
275986a4f4dSJason Wang 	bool has_cvq;
276986a4f4dSJason Wang 
277e7428e95SMichael S. Tsirkin 	/* Host can handle any s/g split between our header and packet data */
278e7428e95SMichael S. Tsirkin 	bool any_header_sg;
279e7428e95SMichael S. Tsirkin 
280012873d0SMichael S. Tsirkin 	/* Packet virtio header size */
281012873d0SMichael S. Tsirkin 	u8 hdr_len;
282012873d0SMichael S. Tsirkin 
2835a159128SJason Wang 	/* Work struct for delayed refilling if we run low on memory. */
2843161e453SRusty Russell 	struct delayed_work refill;
2853161e453SRusty Russell 
2865a159128SJason Wang 	/* Is delayed refill enabled? */
2875a159128SJason Wang 	bool refill_enabled;
2885a159128SJason Wang 
2895a159128SJason Wang 	/* The lock to synchronize the access to refill_enabled */
2905a159128SJason Wang 	spinlock_t refill_lock;
2915a159128SJason Wang 
292586d17c5SJason Wang 	/* Work struct for config space updates */
293586d17c5SJason Wang 	struct work_struct config_work;
294586d17c5SJason Wang 
295986a4f4dSJason Wang 	/* Does the affinity hint is set for virtqueues? */
296986a4f4dSJason Wang 	bool affinity_hint_set;
29747be2479SWanlong Gao 
2988017c279SSebastian Andrzej Siewior 	/* CPU hotplug instances for online & dead */
2998017c279SSebastian Andrzej Siewior 	struct hlist_node node;
3008017c279SSebastian Andrzej Siewior 	struct hlist_node node_dead;
3012ac46030SMichael S. Tsirkin 
30212e57169SMichael S. Tsirkin 	struct control_buf *ctrl;
30316032be5SNikolay Aleksandrov 
30416032be5SNikolay Aleksandrov 	/* Ethtool settings */
30516032be5SNikolay Aleksandrov 	u8 duplex;
30616032be5SNikolay Aleksandrov 	u32 speed;
3073f93522fSJason Wang 
308699b045aSAlvaro Karsz 	/* Interrupt coalescing settings */
309308d7982SGavin Li 	struct virtnet_interrupt_coalesce intr_coal_tx;
310308d7982SGavin Li 	struct virtnet_interrupt_coalesce intr_coal_rx;
311699b045aSAlvaro Karsz 
3123f93522fSJason Wang 	unsigned long guest_offloads;
313a02e8964SWillem de Bruijn 	unsigned long guest_offloads_capable;
314ba5e4426SSridhar Samudrala 
315ba5e4426SSridhar Samudrala 	/* failover when STANDBY feature enabled */
316ba5e4426SSridhar Samudrala 	struct failover *failover;
317296f96fcSRusty Russell };
318296f96fcSRusty Russell 
3199ab86bbcSShirley Ma struct padded_vnet_hdr {
320c1ddc42dSAndrew Melnychenko 	struct virtio_net_hdr_v1_hash hdr;
3219ab86bbcSShirley Ma 	/*
322012873d0SMichael S. Tsirkin 	 * hdr is in a separate sg buffer, and data sg buffer shares same page
323012873d0SMichael S. Tsirkin 	 * with this header sg. This padding makes next sg 16 byte aligned
324012873d0SMichael S. Tsirkin 	 * after the header.
3259ab86bbcSShirley Ma 	 */
326c1ddc42dSAndrew Melnychenko 	char padding[12];
3279ab86bbcSShirley Ma };
3289ab86bbcSShirley Ma 
329dae64749SFeng Liu struct virtio_net_common_hdr {
330dae64749SFeng Liu 	union {
331dae64749SFeng Liu 		struct virtio_net_hdr hdr;
332dae64749SFeng Liu 		struct virtio_net_hdr_mrg_rxbuf	mrg_hdr;
333dae64749SFeng Liu 		struct virtio_net_hdr_v1_hash hash_v1_hdr;
334dae64749SFeng Liu 	};
335dae64749SFeng Liu };
336dae64749SFeng Liu 
337ebcce492SXuan Zhuo static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
3386a4763e2SXuan Zhuo 
3395050471dSToshiaki Makita static bool is_xdp_frame(void *ptr)
3405050471dSToshiaki Makita {
3415050471dSToshiaki Makita 	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
3425050471dSToshiaki Makita }
3435050471dSToshiaki Makita 
3445050471dSToshiaki Makita static void *xdp_to_ptr(struct xdp_frame *ptr)
3455050471dSToshiaki Makita {
3465050471dSToshiaki Makita 	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
3475050471dSToshiaki Makita }
3485050471dSToshiaki Makita 
3495050471dSToshiaki Makita static struct xdp_frame *ptr_to_xdp(void *ptr)
3505050471dSToshiaki Makita {
3515050471dSToshiaki Makita 	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
3525050471dSToshiaki Makita }
3535050471dSToshiaki Makita 
354986a4f4dSJason Wang /* Converting between virtqueue no. and kernel tx/rx queue no.
355986a4f4dSJason Wang  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
356986a4f4dSJason Wang  */
357986a4f4dSJason Wang static int vq2txq(struct virtqueue *vq)
358986a4f4dSJason Wang {
3599d0ca6edSRusty Russell 	return (vq->index - 1) / 2;
360986a4f4dSJason Wang }
361986a4f4dSJason Wang 
362986a4f4dSJason Wang static int txq2vq(int txq)
363986a4f4dSJason Wang {
364986a4f4dSJason Wang 	return txq * 2 + 1;
365986a4f4dSJason Wang }
366986a4f4dSJason Wang 
367986a4f4dSJason Wang static int vq2rxq(struct virtqueue *vq)
368986a4f4dSJason Wang {
3699d0ca6edSRusty Russell 	return vq->index / 2;
370986a4f4dSJason Wang }
371986a4f4dSJason Wang 
372986a4f4dSJason Wang static int rxq2vq(int rxq)
373986a4f4dSJason Wang {
374986a4f4dSJason Wang 	return rxq * 2;
375986a4f4dSJason Wang }
376986a4f4dSJason Wang 
377dae64749SFeng Liu static inline struct virtio_net_common_hdr *
378dae64749SFeng Liu skb_vnet_common_hdr(struct sk_buff *skb)
379296f96fcSRusty Russell {
380dae64749SFeng Liu 	return (struct virtio_net_common_hdr *)skb->cb;
381296f96fcSRusty Russell }
382296f96fcSRusty Russell 
3839ab86bbcSShirley Ma /*
3849ab86bbcSShirley Ma  * private is used to chain pages for big packets, put the whole
3859ab86bbcSShirley Ma  * most recent used list in the beginning for reuse
3869ab86bbcSShirley Ma  */
387e9d7417bSJason Wang static void give_pages(struct receive_queue *rq, struct page *page)
388fb6813f4SRusty Russell {
3899ab86bbcSShirley Ma 	struct page *end;
3909ab86bbcSShirley Ma 
391e9d7417bSJason Wang 	/* Find end of list, sew whole thing into vi->rq.pages. */
3929ab86bbcSShirley Ma 	for (end = page; end->private; end = (struct page *)end->private);
393e9d7417bSJason Wang 	end->private = (unsigned long)rq->pages;
394e9d7417bSJason Wang 	rq->pages = page;
395fb6813f4SRusty Russell }
396fb6813f4SRusty Russell 
397e9d7417bSJason Wang static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
398fb6813f4SRusty Russell {
399e9d7417bSJason Wang 	struct page *p = rq->pages;
400fb6813f4SRusty Russell 
4019ab86bbcSShirley Ma 	if (p) {
402e9d7417bSJason Wang 		rq->pages = (struct page *)p->private;
4039ab86bbcSShirley Ma 		/* clear private here, it is used to chain pages */
4049ab86bbcSShirley Ma 		p->private = 0;
4059ab86bbcSShirley Ma 	} else
406fb6813f4SRusty Russell 		p = alloc_page(gfp_mask);
407fb6813f4SRusty Russell 	return p;
408fb6813f4SRusty Russell }
409fb6813f4SRusty Russell 
4103ffd05c2SXuan Zhuo static void virtnet_rq_free_buf(struct virtnet_info *vi,
4113ffd05c2SXuan Zhuo 				struct receive_queue *rq, void *buf)
4123ffd05c2SXuan Zhuo {
4133ffd05c2SXuan Zhuo 	if (vi->mergeable_rx_bufs)
4143ffd05c2SXuan Zhuo 		put_page(virt_to_head_page(buf));
4153ffd05c2SXuan Zhuo 	else if (vi->big_packets)
4163ffd05c2SXuan Zhuo 		give_pages(rq, buf);
4173ffd05c2SXuan Zhuo 	else
4183ffd05c2SXuan Zhuo 		put_page(virt_to_head_page(buf));
4193ffd05c2SXuan Zhuo }
4203ffd05c2SXuan Zhuo 
4215a159128SJason Wang static void enable_delayed_refill(struct virtnet_info *vi)
4225a159128SJason Wang {
4235a159128SJason Wang 	spin_lock_bh(&vi->refill_lock);
4245a159128SJason Wang 	vi->refill_enabled = true;
4255a159128SJason Wang 	spin_unlock_bh(&vi->refill_lock);
4265a159128SJason Wang }
4275a159128SJason Wang 
4285a159128SJason Wang static void disable_delayed_refill(struct virtnet_info *vi)
4295a159128SJason Wang {
4305a159128SJason Wang 	spin_lock_bh(&vi->refill_lock);
4315a159128SJason Wang 	vi->refill_enabled = false;
4325a159128SJason Wang 	spin_unlock_bh(&vi->refill_lock);
4335a159128SJason Wang }
4345a159128SJason Wang 
435e4e8452aSWillem de Bruijn static void virtqueue_napi_schedule(struct napi_struct *napi,
436e4e8452aSWillem de Bruijn 				    struct virtqueue *vq)
437e4e8452aSWillem de Bruijn {
438e4e8452aSWillem de Bruijn 	if (napi_schedule_prep(napi)) {
439e4e8452aSWillem de Bruijn 		virtqueue_disable_cb(vq);
440e4e8452aSWillem de Bruijn 		__napi_schedule(napi);
441e4e8452aSWillem de Bruijn 	}
442e4e8452aSWillem de Bruijn }
443e4e8452aSWillem de Bruijn 
444e4e8452aSWillem de Bruijn static void virtqueue_napi_complete(struct napi_struct *napi,
445e4e8452aSWillem de Bruijn 				    struct virtqueue *vq, int processed)
446e4e8452aSWillem de Bruijn {
447e4e8452aSWillem de Bruijn 	int opaque;
448e4e8452aSWillem de Bruijn 
449e4e8452aSWillem de Bruijn 	opaque = virtqueue_enable_cb_prepare(vq);
450fdaa767aSToshiaki Makita 	if (napi_complete_done(napi, processed)) {
451fdaa767aSToshiaki Makita 		if (unlikely(virtqueue_poll(vq, opaque)))
452e4e8452aSWillem de Bruijn 			virtqueue_napi_schedule(napi, vq);
453fdaa767aSToshiaki Makita 	} else {
454fdaa767aSToshiaki Makita 		virtqueue_disable_cb(vq);
455fdaa767aSToshiaki Makita 	}
456e4e8452aSWillem de Bruijn }
457e4e8452aSWillem de Bruijn 
458e9d7417bSJason Wang static void skb_xmit_done(struct virtqueue *vq)
459296f96fcSRusty Russell {
460e9d7417bSJason Wang 	struct virtnet_info *vi = vq->vdev->priv;
461b92f1e67SWillem de Bruijn 	struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
462296f96fcSRusty Russell 
4632cb9c6baSRusty Russell 	/* Suppress further interrupts. */
464e9d7417bSJason Wang 	virtqueue_disable_cb(vq);
46511a3a154SRusty Russell 
466b92f1e67SWillem de Bruijn 	if (napi->weight)
467b92f1e67SWillem de Bruijn 		virtqueue_napi_schedule(napi, vq);
468b92f1e67SWillem de Bruijn 	else
469363f1514SRusty Russell 		/* We were probably waiting for more output buffers. */
470986a4f4dSJason Wang 		netif_wake_subqueue(vi->dev, vq2txq(vq));
471296f96fcSRusty Russell }
472296f96fcSRusty Russell 
47328b39bc7SJason Wang #define MRG_CTX_HEADER_SHIFT 22
47428b39bc7SJason Wang static void *mergeable_len_to_ctx(unsigned int truesize,
47528b39bc7SJason Wang 				  unsigned int headroom)
47628b39bc7SJason Wang {
47728b39bc7SJason Wang 	return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
47828b39bc7SJason Wang }
47928b39bc7SJason Wang 
48028b39bc7SJason Wang static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
48128b39bc7SJason Wang {
48228b39bc7SJason Wang 	return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
48328b39bc7SJason Wang }
48428b39bc7SJason Wang 
48528b39bc7SJason Wang static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
48628b39bc7SJason Wang {
48728b39bc7SJason Wang 	return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
48828b39bc7SJason Wang }
48928b39bc7SJason Wang 
49021e26a71SXuan Zhuo static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
49121e26a71SXuan Zhuo 					 unsigned int headroom,
49221e26a71SXuan Zhuo 					 unsigned int len)
49321e26a71SXuan Zhuo {
49421e26a71SXuan Zhuo 	struct sk_buff *skb;
49521e26a71SXuan Zhuo 
49621e26a71SXuan Zhuo 	skb = build_skb(buf, buflen);
49721e26a71SXuan Zhuo 	if (unlikely(!skb))
49821e26a71SXuan Zhuo 		return NULL;
49921e26a71SXuan Zhuo 
50021e26a71SXuan Zhuo 	skb_reserve(skb, headroom);
50121e26a71SXuan Zhuo 	skb_put(skb, len);
50221e26a71SXuan Zhuo 
50321e26a71SXuan Zhuo 	return skb;
50421e26a71SXuan Zhuo }
50521e26a71SXuan Zhuo 
5063464645aSMike Waychison /* Called from bottom half context */
507946fa564SMichael S. Tsirkin static struct sk_buff *page_to_skb(struct virtnet_info *vi,
508946fa564SMichael S. Tsirkin 				   struct receive_queue *rq,
5092613af0eSMichael Dalton 				   struct page *page, unsigned int offset,
510fa0f1ba7SXuan Zhuo 				   unsigned int len, unsigned int truesize,
511fa0f1ba7SXuan Zhuo 				   unsigned int headroom)
5129ab86bbcSShirley Ma {
5139ab86bbcSShirley Ma 	struct sk_buff *skb;
514dae64749SFeng Liu 	struct virtio_net_common_hdr *hdr;
5152613af0eSMichael Dalton 	unsigned int copy, hdr_len, hdr_padded_len;
516af39c8f7SEric Dumazet 	struct page *page_to_free = NULL;
517fb32856bSXuan Zhuo 	int tailroom, shinfo_size;
518f80bd740SXuan Zhuo 	char *p, *hdr_p, *buf;
5199ab86bbcSShirley Ma 
5202613af0eSMichael Dalton 	p = page_address(page) + offset;
521fb32856bSXuan Zhuo 	hdr_p = p;
5229ab86bbcSShirley Ma 
523012873d0SMichael S. Tsirkin 	hdr_len = vi->hdr_len;
524012873d0SMichael S. Tsirkin 	if (vi->mergeable_rx_bufs)
525c1ddc42dSAndrew Melnychenko 		hdr_padded_len = hdr_len;
526012873d0SMichael S. Tsirkin 	else
5272613af0eSMichael Dalton 		hdr_padded_len = sizeof(struct padded_vnet_hdr);
5283f2c31d9SMark McLoughlin 
529fa0f1ba7SXuan Zhuo 	buf = p - headroom;
5309ab86bbcSShirley Ma 	len -= hdr_len;
5312613af0eSMichael Dalton 	offset += hdr_padded_len;
5322613af0eSMichael Dalton 	p += hdr_padded_len;
533fa0f1ba7SXuan Zhuo 	tailroom = truesize - headroom  - hdr_padded_len - len;
5343f2c31d9SMark McLoughlin 
535fb32856bSXuan Zhuo 	shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
536fb32856bSXuan Zhuo 
537f80bd740SXuan Zhuo 	/* copy small packet so we can reuse these pages */
538f5d7872aSEric Dumazet 	if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
53921e26a71SXuan Zhuo 		skb = virtnet_build_skb(buf, truesize, p - buf, len);
540fb32856bSXuan Zhuo 		if (unlikely(!skb))
541fb32856bSXuan Zhuo 			return NULL;
542fb32856bSXuan Zhuo 
543afd92d82SJason Wang 		page = (struct page *)page->private;
544afd92d82SJason Wang 		if (page)
545afd92d82SJason Wang 			give_pages(rq, page);
546fb32856bSXuan Zhuo 		goto ok;
547fb32856bSXuan Zhuo 	}
548fb32856bSXuan Zhuo 
549fb32856bSXuan Zhuo 	/* copy small packet so we can reuse these pages for small data */
550fb32856bSXuan Zhuo 	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
551fb32856bSXuan Zhuo 	if (unlikely(!skb))
552fb32856bSXuan Zhuo 		return NULL;
553fb32856bSXuan Zhuo 
5540f6925b3SEric Dumazet 	/* Copy all frame if it fits skb->head, otherwise
5550f6925b3SEric Dumazet 	 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
5560f6925b3SEric Dumazet 	 */
5570f6925b3SEric Dumazet 	if (len <= skb_tailroom(skb))
5583f2c31d9SMark McLoughlin 		copy = len;
5590f6925b3SEric Dumazet 	else
56018117a84SHeng Qi 		copy = ETH_HLEN;
56159ae1d12SJohannes Berg 	skb_put_data(skb, p, copy);
5623f2c31d9SMark McLoughlin 
5633f2c31d9SMark McLoughlin 	len -= copy;
5649ab86bbcSShirley Ma 	offset += copy;
5653f2c31d9SMark McLoughlin 
5662613af0eSMichael Dalton 	if (vi->mergeable_rx_bufs) {
5672613af0eSMichael Dalton 		if (len)
5682613af0eSMichael Dalton 			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
5692613af0eSMichael Dalton 		else
570af39c8f7SEric Dumazet 			page_to_free = page;
571fb32856bSXuan Zhuo 		goto ok;
5722613af0eSMichael Dalton 	}
5732613af0eSMichael Dalton 
574e878d78bSSasha Levin 	/*
575e878d78bSSasha Levin 	 * Verify that we can indeed put this data into a skb.
576e878d78bSSasha Levin 	 * This is here to handle cases when the device erroneously
577e878d78bSSasha Levin 	 * tries to receive more than is possible. This is usually
578e878d78bSSasha Levin 	 * the case of a broken device.
579e878d78bSSasha Levin 	 */
580e878d78bSSasha Levin 	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
581be443899SAmerigo Wang 		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
582e878d78bSSasha Levin 		dev_kfree_skb(skb);
583e878d78bSSasha Levin 		return NULL;
584e878d78bSSasha Levin 	}
5852613af0eSMichael Dalton 	BUG_ON(offset >= PAGE_SIZE);
5869ab86bbcSShirley Ma 	while (len) {
5872613af0eSMichael Dalton 		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
5882613af0eSMichael Dalton 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
5892613af0eSMichael Dalton 				frag_size, truesize);
5902613af0eSMichael Dalton 		len -= frag_size;
5919ab86bbcSShirley Ma 		page = (struct page *)page->private;
5929ab86bbcSShirley Ma 		offset = 0;
5933f2c31d9SMark McLoughlin 	}
5943f2c31d9SMark McLoughlin 
5959ab86bbcSShirley Ma 	if (page)
596e9d7417bSJason Wang 		give_pages(rq, page);
5973f2c31d9SMark McLoughlin 
598fb32856bSXuan Zhuo ok:
599dae64749SFeng Liu 	hdr = skb_vnet_common_hdr(skb);
600fb32856bSXuan Zhuo 	memcpy(hdr, hdr_p, hdr_len);
601af39c8f7SEric Dumazet 	if (page_to_free)
602af39c8f7SEric Dumazet 		put_page(page_to_free);
603fb32856bSXuan Zhuo 
6049ab86bbcSShirley Ma 	return skb;
6059ab86bbcSShirley Ma }
6069ab86bbcSShirley Ma 
607295525e2SXuan Zhuo static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
608295525e2SXuan Zhuo {
609295525e2SXuan Zhuo 	struct page *page = virt_to_head_page(buf);
610295525e2SXuan Zhuo 	struct virtnet_rq_dma *dma;
611295525e2SXuan Zhuo 	void *head;
612295525e2SXuan Zhuo 	int offset;
613295525e2SXuan Zhuo 
614295525e2SXuan Zhuo 	head = page_address(page);
615295525e2SXuan Zhuo 
616295525e2SXuan Zhuo 	dma = head;
617295525e2SXuan Zhuo 
618295525e2SXuan Zhuo 	--dma->ref;
619295525e2SXuan Zhuo 
620295525e2SXuan Zhuo 	if (dma->need_sync && len) {
621295525e2SXuan Zhuo 		offset = buf - (head + sizeof(*dma));
622295525e2SXuan Zhuo 
6235720c43dSXuan Zhuo 		virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
6245720c43dSXuan Zhuo 							offset, len,
6255720c43dSXuan Zhuo 							DMA_FROM_DEVICE);
626295525e2SXuan Zhuo 	}
627295525e2SXuan Zhuo 
6285720c43dSXuan Zhuo 	if (dma->ref)
629295525e2SXuan Zhuo 		return;
630295525e2SXuan Zhuo 
631295525e2SXuan Zhuo 	virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
632295525e2SXuan Zhuo 					 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
633295525e2SXuan Zhuo 	put_page(page);
634295525e2SXuan Zhuo }
635295525e2SXuan Zhuo 
636295525e2SXuan Zhuo static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
637295525e2SXuan Zhuo {
638295525e2SXuan Zhuo 	void *buf;
639295525e2SXuan Zhuo 
640295525e2SXuan Zhuo 	buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
641295525e2SXuan Zhuo 	if (buf && rq->do_dma)
642295525e2SXuan Zhuo 		virtnet_rq_unmap(rq, buf, *len);
643295525e2SXuan Zhuo 
644295525e2SXuan Zhuo 	return buf;
645295525e2SXuan Zhuo }
646295525e2SXuan Zhuo 
647295525e2SXuan Zhuo static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
648295525e2SXuan Zhuo {
649295525e2SXuan Zhuo 	struct virtnet_rq_dma *dma;
650295525e2SXuan Zhuo 	dma_addr_t addr;
651295525e2SXuan Zhuo 	u32 offset;
652295525e2SXuan Zhuo 	void *head;
653295525e2SXuan Zhuo 
654295525e2SXuan Zhuo 	if (!rq->do_dma) {
655295525e2SXuan Zhuo 		sg_init_one(rq->sg, buf, len);
656295525e2SXuan Zhuo 		return;
657295525e2SXuan Zhuo 	}
658295525e2SXuan Zhuo 
659295525e2SXuan Zhuo 	head = page_address(rq->alloc_frag.page);
660295525e2SXuan Zhuo 
661295525e2SXuan Zhuo 	offset = buf - head;
662295525e2SXuan Zhuo 
663295525e2SXuan Zhuo 	dma = head;
664295525e2SXuan Zhuo 
665295525e2SXuan Zhuo 	addr = dma->addr - sizeof(*dma) + offset;
666295525e2SXuan Zhuo 
667295525e2SXuan Zhuo 	sg_init_table(rq->sg, 1);
668295525e2SXuan Zhuo 	rq->sg[0].dma_address = addr;
669295525e2SXuan Zhuo 	rq->sg[0].length = len;
670295525e2SXuan Zhuo }
671295525e2SXuan Zhuo 
672295525e2SXuan Zhuo static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
673295525e2SXuan Zhuo {
674295525e2SXuan Zhuo 	struct page_frag *alloc_frag = &rq->alloc_frag;
675295525e2SXuan Zhuo 	struct virtnet_rq_dma *dma;
676295525e2SXuan Zhuo 	void *buf, *head;
677295525e2SXuan Zhuo 	dma_addr_t addr;
678295525e2SXuan Zhuo 
679295525e2SXuan Zhuo 	if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
680295525e2SXuan Zhuo 		return NULL;
681295525e2SXuan Zhuo 
682295525e2SXuan Zhuo 	head = page_address(alloc_frag->page);
683295525e2SXuan Zhuo 
684295525e2SXuan Zhuo 	if (rq->do_dma) {
685295525e2SXuan Zhuo 		dma = head;
686295525e2SXuan Zhuo 
687295525e2SXuan Zhuo 		/* new pages */
688295525e2SXuan Zhuo 		if (!alloc_frag->offset) {
689295525e2SXuan Zhuo 			if (rq->last_dma) {
690295525e2SXuan Zhuo 				/* Now, the new page is allocated, the last dma
691295525e2SXuan Zhuo 				 * will not be used. So the dma can be unmapped
692295525e2SXuan Zhuo 				 * if the ref is 0.
693295525e2SXuan Zhuo 				 */
694295525e2SXuan Zhuo 				virtnet_rq_unmap(rq, rq->last_dma, 0);
695295525e2SXuan Zhuo 				rq->last_dma = NULL;
696295525e2SXuan Zhuo 			}
697295525e2SXuan Zhuo 
698295525e2SXuan Zhuo 			dma->len = alloc_frag->size - sizeof(*dma);
699295525e2SXuan Zhuo 
700295525e2SXuan Zhuo 			addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
701295525e2SXuan Zhuo 							      dma->len, DMA_FROM_DEVICE, 0);
702295525e2SXuan Zhuo 			if (virtqueue_dma_mapping_error(rq->vq, addr))
703295525e2SXuan Zhuo 				return NULL;
704295525e2SXuan Zhuo 
705295525e2SXuan Zhuo 			dma->addr = addr;
706295525e2SXuan Zhuo 			dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
707295525e2SXuan Zhuo 
708295525e2SXuan Zhuo 			/* Add a reference to dma to prevent the entire dma from
709295525e2SXuan Zhuo 			 * being released during error handling. This reference
710295525e2SXuan Zhuo 			 * will be freed after the pages are no longer used.
711295525e2SXuan Zhuo 			 */
712295525e2SXuan Zhuo 			get_page(alloc_frag->page);
713295525e2SXuan Zhuo 			dma->ref = 1;
714295525e2SXuan Zhuo 			alloc_frag->offset = sizeof(*dma);
715295525e2SXuan Zhuo 
716295525e2SXuan Zhuo 			rq->last_dma = dma;
717295525e2SXuan Zhuo 		}
718295525e2SXuan Zhuo 
719295525e2SXuan Zhuo 		++dma->ref;
720295525e2SXuan Zhuo 	}
721295525e2SXuan Zhuo 
722295525e2SXuan Zhuo 	buf = head + alloc_frag->offset;
723295525e2SXuan Zhuo 
724295525e2SXuan Zhuo 	get_page(alloc_frag->page);
725295525e2SXuan Zhuo 	alloc_frag->offset += size;
726295525e2SXuan Zhuo 
727295525e2SXuan Zhuo 	return buf;
728295525e2SXuan Zhuo }
729295525e2SXuan Zhuo 
730295525e2SXuan Zhuo static void virtnet_rq_set_premapped(struct virtnet_info *vi)
731295525e2SXuan Zhuo {
732295525e2SXuan Zhuo 	int i;
733295525e2SXuan Zhuo 
734295525e2SXuan Zhuo 	/* disable for big mode */
735295525e2SXuan Zhuo 	if (!vi->mergeable_rx_bufs && vi->big_packets)
736295525e2SXuan Zhuo 		return;
737295525e2SXuan Zhuo 
738295525e2SXuan Zhuo 	for (i = 0; i < vi->max_queue_pairs; i++) {
739295525e2SXuan Zhuo 		if (virtqueue_set_dma_premapped(vi->rq[i].vq))
740295525e2SXuan Zhuo 			continue;
741295525e2SXuan Zhuo 
742295525e2SXuan Zhuo 		vi->rq[i].do_dma = true;
743295525e2SXuan Zhuo 	}
744295525e2SXuan Zhuo }
745295525e2SXuan Zhuo 
7463ffd05c2SXuan Zhuo static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
7473ffd05c2SXuan Zhuo {
7483ffd05c2SXuan Zhuo 	struct virtnet_info *vi = vq->vdev->priv;
7493ffd05c2SXuan Zhuo 	struct receive_queue *rq;
7503ffd05c2SXuan Zhuo 	int i = vq2rxq(vq);
7513ffd05c2SXuan Zhuo 
7523ffd05c2SXuan Zhuo 	rq = &vi->rq[i];
7533ffd05c2SXuan Zhuo 
7543ffd05c2SXuan Zhuo 	if (rq->do_dma)
7553ffd05c2SXuan Zhuo 		virtnet_rq_unmap(rq, buf, 0);
7563ffd05c2SXuan Zhuo 
7573ffd05c2SXuan Zhuo 	virtnet_rq_free_buf(vi, rq, buf);
7583ffd05c2SXuan Zhuo }
7593ffd05c2SXuan Zhuo 
76025074a44SXuan Zhuo static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
76125074a44SXuan Zhuo {
76225074a44SXuan Zhuo 	unsigned int len;
76325074a44SXuan Zhuo 	unsigned int packets = 0;
76425074a44SXuan Zhuo 	unsigned int bytes = 0;
76525074a44SXuan Zhuo 	void *ptr;
76625074a44SXuan Zhuo 
76725074a44SXuan Zhuo 	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
76825074a44SXuan Zhuo 		if (likely(!is_xdp_frame(ptr))) {
76925074a44SXuan Zhuo 			struct sk_buff *skb = ptr;
77025074a44SXuan Zhuo 
77125074a44SXuan Zhuo 			pr_debug("Sent skb %p\n", skb);
77225074a44SXuan Zhuo 
77325074a44SXuan Zhuo 			bytes += skb->len;
77425074a44SXuan Zhuo 			napi_consume_skb(skb, in_napi);
77525074a44SXuan Zhuo 		} else {
77625074a44SXuan Zhuo 			struct xdp_frame *frame = ptr_to_xdp(ptr);
77725074a44SXuan Zhuo 
77825074a44SXuan Zhuo 			bytes += xdp_get_frame_len(frame);
77925074a44SXuan Zhuo 			xdp_return_frame(frame);
78025074a44SXuan Zhuo 		}
78125074a44SXuan Zhuo 		packets++;
78225074a44SXuan Zhuo 	}
78325074a44SXuan Zhuo 
78425074a44SXuan Zhuo 	/* Avoid overhead when no packets have been processed
78525074a44SXuan Zhuo 	 * happens when called speculatively from start_xmit.
78625074a44SXuan Zhuo 	 */
78725074a44SXuan Zhuo 	if (!packets)
78825074a44SXuan Zhuo 		return;
78925074a44SXuan Zhuo 
79025074a44SXuan Zhuo 	u64_stats_update_begin(&sq->stats.syncp);
79127debe3eSEric Dumazet 	u64_stats_add(&sq->stats.bytes, bytes);
79227debe3eSEric Dumazet 	u64_stats_add(&sq->stats.packets, packets);
79325074a44SXuan Zhuo 	u64_stats_update_end(&sq->stats.syncp);
79425074a44SXuan Zhuo }
79525074a44SXuan Zhuo 
79625074a44SXuan Zhuo static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
79725074a44SXuan Zhuo {
79825074a44SXuan Zhuo 	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
79925074a44SXuan Zhuo 		return false;
80025074a44SXuan Zhuo 	else if (q < vi->curr_queue_pairs)
80125074a44SXuan Zhuo 		return true;
80225074a44SXuan Zhuo 	else
80325074a44SXuan Zhuo 		return false;
80425074a44SXuan Zhuo }
80525074a44SXuan Zhuo 
806b8ef4809SXuan Zhuo static void check_sq_full_and_disable(struct virtnet_info *vi,
807b8ef4809SXuan Zhuo 				      struct net_device *dev,
808b8ef4809SXuan Zhuo 				      struct send_queue *sq)
809b8ef4809SXuan Zhuo {
810b8ef4809SXuan Zhuo 	bool use_napi = sq->napi.weight;
811b8ef4809SXuan Zhuo 	int qnum;
812b8ef4809SXuan Zhuo 
813b8ef4809SXuan Zhuo 	qnum = sq - vi->sq;
814b8ef4809SXuan Zhuo 
815b8ef4809SXuan Zhuo 	/* If running out of space, stop queue to avoid getting packets that we
816b8ef4809SXuan Zhuo 	 * are then unable to transmit.
817b8ef4809SXuan Zhuo 	 * An alternative would be to force queuing layer to requeue the skb by
818b8ef4809SXuan Zhuo 	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
819b8ef4809SXuan Zhuo 	 * returned in a normal path of operation: it means that driver is not
820b8ef4809SXuan Zhuo 	 * maintaining the TX queue stop/start state properly, and causes
821b8ef4809SXuan Zhuo 	 * the stack to do a non-trivial amount of useless work.
822b8ef4809SXuan Zhuo 	 * Since most packets only take 1 or 2 ring slots, stopping the queue
823b8ef4809SXuan Zhuo 	 * early means 16 slots are typically wasted.
824b8ef4809SXuan Zhuo 	 */
825b8ef4809SXuan Zhuo 	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
826b8ef4809SXuan Zhuo 		netif_stop_subqueue(dev, qnum);
827b8ef4809SXuan Zhuo 		if (use_napi) {
828b8ef4809SXuan Zhuo 			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
829b8ef4809SXuan Zhuo 				virtqueue_napi_schedule(&sq->napi, sq->vq);
830b8ef4809SXuan Zhuo 		} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
831b8ef4809SXuan Zhuo 			/* More just got used, free them then recheck. */
832b8ef4809SXuan Zhuo 			free_old_xmit_skbs(sq, false);
833b8ef4809SXuan Zhuo 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
834b8ef4809SXuan Zhuo 				netif_start_subqueue(dev, qnum);
835b8ef4809SXuan Zhuo 				virtqueue_disable_cb(sq->vq);
836b8ef4809SXuan Zhuo 			}
837b8ef4809SXuan Zhuo 		}
838b8ef4809SXuan Zhuo 	}
839b8ef4809SXuan Zhuo }
840b8ef4809SXuan Zhuo 
841735fc405SJesper Dangaard Brouer static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
842735fc405SJesper Dangaard Brouer 				   struct send_queue *sq,
84344fa2dbdSJesper Dangaard Brouer 				   struct xdp_frame *xdpf)
84456434a01SJohn Fastabend {
84556434a01SJohn Fastabend 	struct virtio_net_hdr_mrg_rxbuf *hdr;
84697717e8dSHeng Qi 	struct skb_shared_info *shinfo;
84797717e8dSHeng Qi 	u8 nr_frags = 0;
84897717e8dSHeng Qi 	int err, i;
84956434a01SJohn Fastabend 
850cac320c8SJesper Dangaard Brouer 	if (unlikely(xdpf->headroom < vi->hdr_len))
851cac320c8SJesper Dangaard Brouer 		return -EOVERFLOW;
852cac320c8SJesper Dangaard Brouer 
85397717e8dSHeng Qi 	if (unlikely(xdp_frame_has_frags(xdpf))) {
85497717e8dSHeng Qi 		shinfo = xdp_get_shared_info_from_frame(xdpf);
85597717e8dSHeng Qi 		nr_frags = shinfo->nr_frags;
85697717e8dSHeng Qi 	}
85797717e8dSHeng Qi 
85897717e8dSHeng Qi 	/* In wrapping function virtnet_xdp_xmit(), we need to free
85997717e8dSHeng Qi 	 * up the pending old buffers, where we need to calculate the
86097717e8dSHeng Qi 	 * position of skb_shared_info in xdp_get_frame_len() and
86197717e8dSHeng Qi 	 * xdp_return_frame(), which will involve to xdpf->data and
86297717e8dSHeng Qi 	 * xdpf->headroom. Therefore, we need to update the value of
86397717e8dSHeng Qi 	 * headroom synchronously here.
86497717e8dSHeng Qi 	 */
86597717e8dSHeng Qi 	xdpf->headroom -= vi->hdr_len;
866cac320c8SJesper Dangaard Brouer 	xdpf->data -= vi->hdr_len;
86756434a01SJohn Fastabend 	/* Zero header and leave csum up to XDP layers */
868cac320c8SJesper Dangaard Brouer 	hdr = xdpf->data;
86956434a01SJohn Fastabend 	memset(hdr, 0, vi->hdr_len);
870cac320c8SJesper Dangaard Brouer 	xdpf->len   += vi->hdr_len;
87156434a01SJohn Fastabend 
87297717e8dSHeng Qi 	sg_init_table(sq->sg, nr_frags + 1);
87397717e8dSHeng Qi 	sg_set_buf(sq->sg, xdpf->data, xdpf->len);
87497717e8dSHeng Qi 	for (i = 0; i < nr_frags; i++) {
87597717e8dSHeng Qi 		skb_frag_t *frag = &shinfo->frags[i];
876bb91accfSJason Wang 
87797717e8dSHeng Qi 		sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
87897717e8dSHeng Qi 			    skb_frag_size(frag), skb_frag_off(frag));
87997717e8dSHeng Qi 	}
88097717e8dSHeng Qi 
88197717e8dSHeng Qi 	err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
88297717e8dSHeng Qi 				   xdp_to_ptr(xdpf), GFP_ATOMIC);
88311b7d897SJesper Dangaard Brouer 	if (unlikely(err))
884cac320c8SJesper Dangaard Brouer 		return -ENOSPC; /* Caller handle free/refcnt */
88556434a01SJohn Fastabend 
886cac320c8SJesper Dangaard Brouer 	return 0;
88756434a01SJohn Fastabend }
88856434a01SJohn Fastabend 
88997c2c69eSXuan Zhuo /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
89097c2c69eSXuan Zhuo  * the current cpu, so it does not need to be locked.
89197c2c69eSXuan Zhuo  *
89297c2c69eSXuan Zhuo  * Here we use marco instead of inline functions because we have to deal with
89397c2c69eSXuan Zhuo  * three issues at the same time: 1. the choice of sq. 2. judge and execute the
89497c2c69eSXuan Zhuo  * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
89597c2c69eSXuan Zhuo  * functions to perfectly solve these three problems at the same time.
89697c2c69eSXuan Zhuo  */
89797c2c69eSXuan Zhuo #define virtnet_xdp_get_sq(vi) ({                                       \
8983dcc1edcSLi RongQing 	int cpu = smp_processor_id();                                   \
89997c2c69eSXuan Zhuo 	struct netdev_queue *txq;                                       \
90097c2c69eSXuan Zhuo 	typeof(vi) v = (vi);                                            \
90197c2c69eSXuan Zhuo 	unsigned int qp;                                                \
90297c2c69eSXuan Zhuo 									\
90397c2c69eSXuan Zhuo 	if (v->curr_queue_pairs > nr_cpu_ids) {                         \
90497c2c69eSXuan Zhuo 		qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
9053dcc1edcSLi RongQing 		qp += cpu;                                              \
90697c2c69eSXuan Zhuo 		txq = netdev_get_tx_queue(v->dev, qp);                  \
90797c2c69eSXuan Zhuo 		__netif_tx_acquire(txq);                                \
90897c2c69eSXuan Zhuo 	} else {                                                        \
9093dcc1edcSLi RongQing 		qp = cpu % v->curr_queue_pairs;                         \
91097c2c69eSXuan Zhuo 		txq = netdev_get_tx_queue(v->dev, qp);                  \
9113dcc1edcSLi RongQing 		__netif_tx_lock(txq, cpu);                              \
91297c2c69eSXuan Zhuo 	}                                                               \
91397c2c69eSXuan Zhuo 	v->sq + qp;                                                     \
91497c2c69eSXuan Zhuo })
9152a43565cSToshiaki Makita 
91697c2c69eSXuan Zhuo #define virtnet_xdp_put_sq(vi, q) {                                     \
91797c2c69eSXuan Zhuo 	struct netdev_queue *txq;                                       \
91897c2c69eSXuan Zhuo 	typeof(vi) v = (vi);                                            \
91997c2c69eSXuan Zhuo 									\
92097c2c69eSXuan Zhuo 	txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
92197c2c69eSXuan Zhuo 	if (v->curr_queue_pairs > nr_cpu_ids)                           \
92297c2c69eSXuan Zhuo 		__netif_tx_release(txq);                                \
92397c2c69eSXuan Zhuo 	else                                                            \
92497c2c69eSXuan Zhuo 		__netif_tx_unlock(txq);                                 \
9252a43565cSToshiaki Makita }
9262a43565cSToshiaki Makita 
927735fc405SJesper Dangaard Brouer static int virtnet_xdp_xmit(struct net_device *dev,
92842b33468SJesper Dangaard Brouer 			    int n, struct xdp_frame **frames, u32 flags)
929186b3c99SJason Wang {
930186b3c99SJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
9318dcc5b0aSJesper Dangaard Brouer 	struct receive_queue *rq = vi->rq;
9328dcc5b0aSJesper Dangaard Brouer 	struct bpf_prog *xdp_prog;
933735fc405SJesper Dangaard Brouer 	struct send_queue *sq;
934735fc405SJesper Dangaard Brouer 	unsigned int len;
935546f2897SToshiaki Makita 	int packets = 0;
936546f2897SToshiaki Makita 	int bytes = 0;
937fdc13979SLorenzo Bianconi 	int nxmit = 0;
938461f03dcSToshiaki Makita 	int kicks = 0;
9395050471dSToshiaki Makita 	void *ptr;
940fdc13979SLorenzo Bianconi 	int ret;
941735fc405SJesper Dangaard Brouer 	int i;
942735fc405SJesper Dangaard Brouer 
9438dcc5b0aSJesper Dangaard Brouer 	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
9448dcc5b0aSJesper Dangaard Brouer 	 * indicate XDP resources have been successfully allocated.
9458dcc5b0aSJesper Dangaard Brouer 	 */
9469719c6b9SJohn Fastabend 	xdp_prog = rcu_access_pointer(rq->xdp_prog);
9471667c08aSToshiaki Makita 	if (!xdp_prog)
9481667c08aSToshiaki Makita 		return -ENXIO;
9491667c08aSToshiaki Makita 
95097c2c69eSXuan Zhuo 	sq = virtnet_xdp_get_sq(vi);
9519ab86bbcSShirley Ma 
9529ab86bbcSShirley Ma 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
953186b3c99SJason Wang 		ret = -EINVAL;
954186b3c99SJason Wang 		goto out;
955186b3c99SJason Wang 	}
956186b3c99SJason Wang 
957735fc405SJesper Dangaard Brouer 	/* Free up any pending old buffers before queueing new ones. */
9585050471dSToshiaki Makita 	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
959546f2897SToshiaki Makita 		if (likely(is_xdp_frame(ptr))) {
960546f2897SToshiaki Makita 			struct xdp_frame *frame = ptr_to_xdp(ptr);
961546f2897SToshiaki Makita 
96250bd14bcSHeng Qi 			bytes += xdp_get_frame_len(frame);
963546f2897SToshiaki Makita 			xdp_return_frame(frame);
964546f2897SToshiaki Makita 		} else {
965546f2897SToshiaki Makita 			struct sk_buff *skb = ptr;
966546f2897SToshiaki Makita 
967546f2897SToshiaki Makita 			bytes += skb->len;
968546f2897SToshiaki Makita 			napi_consume_skb(skb, false);
969546f2897SToshiaki Makita 		}
970546f2897SToshiaki Makita 		packets++;
9715050471dSToshiaki Makita 	}
972735fc405SJesper Dangaard Brouer 
973735fc405SJesper Dangaard Brouer 	for (i = 0; i < n; i++) {
974735fc405SJesper Dangaard Brouer 		struct xdp_frame *xdpf = frames[i];
975735fc405SJesper Dangaard Brouer 
976fdc13979SLorenzo Bianconi 		if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
977fdc13979SLorenzo Bianconi 			break;
978fdc13979SLorenzo Bianconi 		nxmit++;
979735fc405SJesper Dangaard Brouer 	}
980fdc13979SLorenzo Bianconi 	ret = nxmit;
9815d274cb4SJesper Dangaard Brouer 
982cd1c604aSXuan Zhuo 	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
983cd1c604aSXuan Zhuo 		check_sq_full_and_disable(vi, dev, sq);
984cd1c604aSXuan Zhuo 
985461f03dcSToshiaki Makita 	if (flags & XDP_XMIT_FLUSH) {
986461f03dcSToshiaki Makita 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
987461f03dcSToshiaki Makita 			kicks = 1;
988461f03dcSToshiaki Makita 	}
9895b8f3c8dSToshiaki Makita out:
9905b8f3c8dSToshiaki Makita 	u64_stats_update_begin(&sq->stats.syncp);
99127debe3eSEric Dumazet 	u64_stats_add(&sq->stats.bytes, bytes);
99227debe3eSEric Dumazet 	u64_stats_add(&sq->stats.packets, packets);
99327debe3eSEric Dumazet 	u64_stats_add(&sq->stats.xdp_tx, n);
99427debe3eSEric Dumazet 	u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
99527debe3eSEric Dumazet 	u64_stats_add(&sq->stats.kicks, kicks);
9965b8f3c8dSToshiaki Makita 	u64_stats_update_end(&sq->stats.syncp);
9975d274cb4SJesper Dangaard Brouer 
99897c2c69eSXuan Zhuo 	virtnet_xdp_put_sq(vi, sq);
9995b8f3c8dSToshiaki Makita 	return ret;
1000186b3c99SJason Wang }
1001186b3c99SJason Wang 
1002bb2c1e9eSXuan Zhuo static void put_xdp_frags(struct xdp_buff *xdp)
1003bb2c1e9eSXuan Zhuo {
1004bb2c1e9eSXuan Zhuo 	struct skb_shared_info *shinfo;
1005bb2c1e9eSXuan Zhuo 	struct page *xdp_page;
1006bb2c1e9eSXuan Zhuo 	int i;
1007bb2c1e9eSXuan Zhuo 
1008bb2c1e9eSXuan Zhuo 	if (xdp_buff_has_frags(xdp)) {
1009bb2c1e9eSXuan Zhuo 		shinfo = xdp_get_shared_info_from_buff(xdp);
1010bb2c1e9eSXuan Zhuo 		for (i = 0; i < shinfo->nr_frags; i++) {
1011bb2c1e9eSXuan Zhuo 			xdp_page = skb_frag_page(&shinfo->frags[i]);
1012bb2c1e9eSXuan Zhuo 			put_page(xdp_page);
1013bb2c1e9eSXuan Zhuo 		}
1014bb2c1e9eSXuan Zhuo 	}
1015bb2c1e9eSXuan Zhuo }
1016bb2c1e9eSXuan Zhuo 
101700765f8eSXuan Zhuo static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
101800765f8eSXuan Zhuo 			       struct net_device *dev,
101900765f8eSXuan Zhuo 			       unsigned int *xdp_xmit,
102000765f8eSXuan Zhuo 			       struct virtnet_rq_stats *stats)
102100765f8eSXuan Zhuo {
102200765f8eSXuan Zhuo 	struct xdp_frame *xdpf;
102300765f8eSXuan Zhuo 	int err;
102400765f8eSXuan Zhuo 	u32 act;
102500765f8eSXuan Zhuo 
102600765f8eSXuan Zhuo 	act = bpf_prog_run_xdp(xdp_prog, xdp);
102727debe3eSEric Dumazet 	u64_stats_inc(&stats->xdp_packets);
102800765f8eSXuan Zhuo 
102900765f8eSXuan Zhuo 	switch (act) {
103000765f8eSXuan Zhuo 	case XDP_PASS:
103100765f8eSXuan Zhuo 		return act;
103200765f8eSXuan Zhuo 
103300765f8eSXuan Zhuo 	case XDP_TX:
103427debe3eSEric Dumazet 		u64_stats_inc(&stats->xdp_tx);
103500765f8eSXuan Zhuo 		xdpf = xdp_convert_buff_to_frame(xdp);
103600765f8eSXuan Zhuo 		if (unlikely(!xdpf)) {
103700765f8eSXuan Zhuo 			netdev_dbg(dev, "convert buff to frame failed for xdp\n");
103800765f8eSXuan Zhuo 			return XDP_DROP;
103900765f8eSXuan Zhuo 		}
104000765f8eSXuan Zhuo 
104100765f8eSXuan Zhuo 		err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
104200765f8eSXuan Zhuo 		if (unlikely(!err)) {
104300765f8eSXuan Zhuo 			xdp_return_frame_rx_napi(xdpf);
104400765f8eSXuan Zhuo 		} else if (unlikely(err < 0)) {
104500765f8eSXuan Zhuo 			trace_xdp_exception(dev, xdp_prog, act);
104600765f8eSXuan Zhuo 			return XDP_DROP;
104700765f8eSXuan Zhuo 		}
104800765f8eSXuan Zhuo 		*xdp_xmit |= VIRTIO_XDP_TX;
104900765f8eSXuan Zhuo 		return act;
105000765f8eSXuan Zhuo 
105100765f8eSXuan Zhuo 	case XDP_REDIRECT:
105227debe3eSEric Dumazet 		u64_stats_inc(&stats->xdp_redirects);
105300765f8eSXuan Zhuo 		err = xdp_do_redirect(dev, xdp, xdp_prog);
105400765f8eSXuan Zhuo 		if (err)
105500765f8eSXuan Zhuo 			return XDP_DROP;
105600765f8eSXuan Zhuo 
105700765f8eSXuan Zhuo 		*xdp_xmit |= VIRTIO_XDP_REDIR;
105800765f8eSXuan Zhuo 		return act;
105900765f8eSXuan Zhuo 
106000765f8eSXuan Zhuo 	default:
106100765f8eSXuan Zhuo 		bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
106200765f8eSXuan Zhuo 		fallthrough;
106300765f8eSXuan Zhuo 	case XDP_ABORTED:
106400765f8eSXuan Zhuo 		trace_xdp_exception(dev, xdp_prog, act);
106500765f8eSXuan Zhuo 		fallthrough;
106600765f8eSXuan Zhuo 	case XDP_DROP:
106700765f8eSXuan Zhuo 		return XDP_DROP;
106800765f8eSXuan Zhuo 	}
106900765f8eSXuan Zhuo }
107000765f8eSXuan Zhuo 
1071f6b10209SJason Wang static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1072f6b10209SJason Wang {
107397c2c69eSXuan Zhuo 	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1074f6b10209SJason Wang }
1075f6b10209SJason Wang 
10764941d472SJason Wang /* We copy the packet for XDP in the following cases:
10774941d472SJason Wang  *
10784941d472SJason Wang  * 1) Packet is scattered across multiple rx buffers.
10794941d472SJason Wang  * 2) Headroom space is insufficient.
10804941d472SJason Wang  *
10814941d472SJason Wang  * This is inefficient but it's a temporary condition that
10824941d472SJason Wang  * we hit right after XDP is enabled and until queue is refilled
10834941d472SJason Wang  * with large buffers with sufficient headroom - so it should affect
10844941d472SJason Wang  * at most queue size packets.
10854941d472SJason Wang  * Afterwards, the conditions to enable
10864941d472SJason Wang  * XDP should preclude the underlying device from sending packets
10874941d472SJason Wang  * across multiple buffers (num_buf > 1), and we make sure buffers
10884941d472SJason Wang  * have enough headroom.
108972979a6cSJohn Fastabend  */
109072979a6cSJohn Fastabend static struct page *xdp_linearize_page(struct receive_queue *rq,
1091981f14d4SHeng Qi 				       int *num_buf,
109272979a6cSJohn Fastabend 				       struct page *p,
109372979a6cSJohn Fastabend 				       int offset,
10944941d472SJason Wang 				       int page_off,
109572979a6cSJohn Fastabend 				       unsigned int *len)
109672979a6cSJohn Fastabend {
1097853618d5SXuan Zhuo 	int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1098853618d5SXuan Zhuo 	struct page *page;
109972979a6cSJohn Fastabend 
1100853618d5SXuan Zhuo 	if (page_off + *len + tailroom > PAGE_SIZE)
1101853618d5SXuan Zhuo 		return NULL;
1102853618d5SXuan Zhuo 
1103853618d5SXuan Zhuo 	page = alloc_page(GFP_ATOMIC);
110472979a6cSJohn Fastabend 	if (!page)
110572979a6cSJohn Fastabend 		return NULL;
110672979a6cSJohn Fastabend 
110772979a6cSJohn Fastabend 	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
110872979a6cSJohn Fastabend 	page_off += *len;
110972979a6cSJohn Fastabend 
111056a86f84SJason Wang 	while (--*num_buf) {
111172979a6cSJohn Fastabend 		unsigned int buflen;
111272979a6cSJohn Fastabend 		void *buf;
111372979a6cSJohn Fastabend 		int off;
111472979a6cSJohn Fastabend 
1115295525e2SXuan Zhuo 		buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1116680557cfSMichael S. Tsirkin 		if (unlikely(!buf))
111772979a6cSJohn Fastabend 			goto err_buf;
111872979a6cSJohn Fastabend 
111972979a6cSJohn Fastabend 		p = virt_to_head_page(buf);
112072979a6cSJohn Fastabend 		off = buf - page_address(p);
112172979a6cSJohn Fastabend 
112256a86f84SJason Wang 		/* guard against a misconfigured or uncooperative backend that
112356a86f84SJason Wang 		 * is sending packet larger than the MTU.
112456a86f84SJason Wang 		 */
11253cc81a9aSJason Wang 		if ((page_off + buflen + tailroom) > PAGE_SIZE) {
112656a86f84SJason Wang 			put_page(p);
112756a86f84SJason Wang 			goto err_buf;
112856a86f84SJason Wang 		}
112956a86f84SJason Wang 
113072979a6cSJohn Fastabend 		memcpy(page_address(page) + page_off,
113172979a6cSJohn Fastabend 		       page_address(p) + off, buflen);
113272979a6cSJohn Fastabend 		page_off += buflen;
113356a86f84SJason Wang 		put_page(p);
113472979a6cSJohn Fastabend 	}
113572979a6cSJohn Fastabend 
11362de2f7f4SJohn Fastabend 	/* Headroom does not contribute to packet length */
11372de2f7f4SJohn Fastabend 	*len = page_off - VIRTIO_XDP_HEADROOM;
113872979a6cSJohn Fastabend 	return page;
113972979a6cSJohn Fastabend err_buf:
114072979a6cSJohn Fastabend 	__free_pages(page, 0);
114172979a6cSJohn Fastabend 	return NULL;
114272979a6cSJohn Fastabend }
114372979a6cSJohn Fastabend 
114419e8c85eSXuan Zhuo static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
114519e8c85eSXuan Zhuo 					       unsigned int xdp_headroom,
114619e8c85eSXuan Zhuo 					       void *buf,
114719e8c85eSXuan Zhuo 					       unsigned int len)
114819e8c85eSXuan Zhuo {
114919e8c85eSXuan Zhuo 	unsigned int header_offset;
115019e8c85eSXuan Zhuo 	unsigned int headroom;
115119e8c85eSXuan Zhuo 	unsigned int buflen;
115219e8c85eSXuan Zhuo 	struct sk_buff *skb;
115319e8c85eSXuan Zhuo 
115419e8c85eSXuan Zhuo 	header_offset = VIRTNET_RX_PAD + xdp_headroom;
115519e8c85eSXuan Zhuo 	headroom = vi->hdr_len + header_offset;
115619e8c85eSXuan Zhuo 	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
115719e8c85eSXuan Zhuo 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
115819e8c85eSXuan Zhuo 
115921e26a71SXuan Zhuo 	skb = virtnet_build_skb(buf, buflen, headroom, len);
116021e26a71SXuan Zhuo 	if (unlikely(!skb))
116119e8c85eSXuan Zhuo 		return NULL;
116219e8c85eSXuan Zhuo 
116319e8c85eSXuan Zhuo 	buf += header_offset;
1164dae64749SFeng Liu 	memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
116519e8c85eSXuan Zhuo 
116619e8c85eSXuan Zhuo 	return skb;
116719e8c85eSXuan Zhuo }
116819e8c85eSXuan Zhuo 
1169c5f3e72fSXuan Zhuo static struct sk_buff *receive_small_xdp(struct net_device *dev,
11704941d472SJason Wang 					 struct virtnet_info *vi,
11714941d472SJason Wang 					 struct receive_queue *rq,
1172c5f3e72fSXuan Zhuo 					 struct bpf_prog *xdp_prog,
1173c5f3e72fSXuan Zhuo 					 void *buf,
1174c5f3e72fSXuan Zhuo 					 unsigned int xdp_headroom,
1175186b3c99SJason Wang 					 unsigned int len,
11767d9d60fdSToshiaki Makita 					 unsigned int *xdp_xmit,
1177d46eeeafSJason Wang 					 struct virtnet_rq_stats *stats)
11784941d472SJason Wang {
11794941d472SJason Wang 	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
11804941d472SJason Wang 	unsigned int headroom = vi->hdr_len + header_offset;
11814941d472SJason Wang 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1182c5f3e72fSXuan Zhuo 	struct page *page = virt_to_head_page(buf);
1183c5f3e72fSXuan Zhuo 	struct page *xdp_page;
1184c5f3e72fSXuan Zhuo 	unsigned int buflen;
11854941d472SJason Wang 	struct xdp_buff xdp;
1186c5f3e72fSXuan Zhuo 	struct sk_buff *skb;
1187c5f3e72fSXuan Zhuo 	unsigned int metasize = 0;
11884941d472SJason Wang 	u32 act;
11894941d472SJason Wang 
119095dbe9e7SJesper Dangaard Brouer 	if (unlikely(hdr->hdr.gso_type))
11914941d472SJason Wang 		goto err_xdp;
11924941d472SJason Wang 
1193c5f3e72fSXuan Zhuo 	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1194c5f3e72fSXuan Zhuo 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1195c5f3e72fSXuan Zhuo 
11964941d472SJason Wang 	if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
11974941d472SJason Wang 		int offset = buf - page_address(page) + header_offset;
11984941d472SJason Wang 		unsigned int tlen = len + vi->hdr_len;
1199981f14d4SHeng Qi 		int num_buf = 1;
12004941d472SJason Wang 
12014941d472SJason Wang 		xdp_headroom = virtnet_get_headroom(vi);
12024941d472SJason Wang 		header_offset = VIRTNET_RX_PAD + xdp_headroom;
12034941d472SJason Wang 		headroom = vi->hdr_len + header_offset;
12044941d472SJason Wang 		buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
12054941d472SJason Wang 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
12064941d472SJason Wang 		xdp_page = xdp_linearize_page(rq, &num_buf, page,
12074941d472SJason Wang 					      offset, header_offset,
12084941d472SJason Wang 					      &tlen);
12094941d472SJason Wang 		if (!xdp_page)
12104941d472SJason Wang 			goto err_xdp;
12114941d472SJason Wang 
12124941d472SJason Wang 		buf = page_address(xdp_page);
12134941d472SJason Wang 		put_page(page);
12144941d472SJason Wang 		page = xdp_page;
12154941d472SJason Wang 	}
12164941d472SJason Wang 
121743b5169dSLorenzo Bianconi 	xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1218be9df4afSLorenzo Bianconi 	xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1219be9df4afSLorenzo Bianconi 			 xdp_headroom, len, true);
122000765f8eSXuan Zhuo 
122100765f8eSXuan Zhuo 	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
12224941d472SJason Wang 
12234941d472SJason Wang 	switch (act) {
12244941d472SJason Wang 	case XDP_PASS:
12254941d472SJason Wang 		/* Recalculate length in case bpf program changed it */
12266870de43SNikita V. Shirokov 		len = xdp.data_end - xdp.data;
1227503d539aSYuya Kusakabe 		metasize = xdp.data - xdp.data_meta;
12284941d472SJason Wang 		break;
1229c5f3e72fSXuan Zhuo 
12304941d472SJason Wang 	case XDP_TX:
1231186b3c99SJason Wang 	case XDP_REDIRECT:
12324941d472SJason Wang 		goto xdp_xmit;
1233c5f3e72fSXuan Zhuo 
12344941d472SJason Wang 	default:
12354941d472SJason Wang 		goto err_xdp;
12364941d472SJason Wang 	}
1237c5f3e72fSXuan Zhuo 
123821e26a71SXuan Zhuo 	skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
123921e26a71SXuan Zhuo 	if (unlikely(!skb))
1240c5f3e72fSXuan Zhuo 		goto err;
1241c5f3e72fSXuan Zhuo 
1242c5f3e72fSXuan Zhuo 	if (metasize)
1243c5f3e72fSXuan Zhuo 		skb_metadata_set(skb, metasize);
1244c5f3e72fSXuan Zhuo 
1245c5f3e72fSXuan Zhuo 	return skb;
1246c5f3e72fSXuan Zhuo 
1247c5f3e72fSXuan Zhuo err_xdp:
124827debe3eSEric Dumazet 	u64_stats_inc(&stats->xdp_drops);
1249c5f3e72fSXuan Zhuo err:
125027debe3eSEric Dumazet 	u64_stats_inc(&stats->drops);
1251c5f3e72fSXuan Zhuo 	put_page(page);
1252c5f3e72fSXuan Zhuo xdp_xmit:
1253c5f3e72fSXuan Zhuo 	return NULL;
1254c5f3e72fSXuan Zhuo }
1255c5f3e72fSXuan Zhuo 
1256c5f3e72fSXuan Zhuo static struct sk_buff *receive_small(struct net_device *dev,
1257c5f3e72fSXuan Zhuo 				     struct virtnet_info *vi,
1258c5f3e72fSXuan Zhuo 				     struct receive_queue *rq,
1259c5f3e72fSXuan Zhuo 				     void *buf, void *ctx,
1260c5f3e72fSXuan Zhuo 				     unsigned int len,
1261c5f3e72fSXuan Zhuo 				     unsigned int *xdp_xmit,
1262c5f3e72fSXuan Zhuo 				     struct virtnet_rq_stats *stats)
1263c5f3e72fSXuan Zhuo {
1264c5f3e72fSXuan Zhuo 	unsigned int xdp_headroom = (unsigned long)ctx;
1265c5f3e72fSXuan Zhuo 	struct page *page = virt_to_head_page(buf);
1266aef76506SXuan Zhuo 	struct sk_buff *skb;
1267c5f3e72fSXuan Zhuo 
1268c5f3e72fSXuan Zhuo 	len -= vi->hdr_len;
126927debe3eSEric Dumazet 	u64_stats_add(&stats->bytes, len);
1270c5f3e72fSXuan Zhuo 
1271c5f3e72fSXuan Zhuo 	if (unlikely(len > GOOD_PACKET_LEN)) {
1272c5f3e72fSXuan Zhuo 		pr_debug("%s: rx error: len %u exceeds max size %d\n",
1273c5f3e72fSXuan Zhuo 			 dev->name, len, GOOD_PACKET_LEN);
1274e2e5c2a3SEric Dumazet 		DEV_STATS_INC(dev, rx_length_errors);
1275c5f3e72fSXuan Zhuo 		goto err;
1276c5f3e72fSXuan Zhuo 	}
1277c5f3e72fSXuan Zhuo 
1278aef76506SXuan Zhuo 	if (unlikely(vi->xdp_enabled)) {
1279aef76506SXuan Zhuo 		struct bpf_prog *xdp_prog;
1280c5f3e72fSXuan Zhuo 
1281c5f3e72fSXuan Zhuo 		rcu_read_lock();
1282c5f3e72fSXuan Zhuo 		xdp_prog = rcu_dereference(rq->xdp_prog);
1283c5f3e72fSXuan Zhuo 		if (xdp_prog) {
1284aef76506SXuan Zhuo 			skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1285aef76506SXuan Zhuo 						xdp_headroom, len, xdp_xmit,
1286aef76506SXuan Zhuo 						stats);
1287c5f3e72fSXuan Zhuo 			rcu_read_unlock();
1288c5f3e72fSXuan Zhuo 			return skb;
12894941d472SJason Wang 		}
12904941d472SJason Wang 		rcu_read_unlock();
1291aef76506SXuan Zhuo 	}
12924941d472SJason Wang 
129319e8c85eSXuan Zhuo 	skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
129419e8c85eSXuan Zhuo 	if (likely(skb))
12954941d472SJason Wang 		return skb;
12964941d472SJason Wang 
1297053c9e18SWenliang Wang err:
129827debe3eSEric Dumazet 	u64_stats_inc(&stats->drops);
12994941d472SJason Wang 	put_page(page);
13004941d472SJason Wang 	return NULL;
13014941d472SJason Wang }
13024941d472SJason Wang 
13034941d472SJason Wang static struct sk_buff *receive_big(struct net_device *dev,
13044941d472SJason Wang 				   struct virtnet_info *vi,
13054941d472SJason Wang 				   struct receive_queue *rq,
13064941d472SJason Wang 				   void *buf,
13077d9d60fdSToshiaki Makita 				   unsigned int len,
1308d46eeeafSJason Wang 				   struct virtnet_rq_stats *stats)
13094941d472SJason Wang {
13104941d472SJason Wang 	struct page *page = buf;
1311503d539aSYuya Kusakabe 	struct sk_buff *skb =
1312fa0f1ba7SXuan Zhuo 		page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
13134941d472SJason Wang 
131427debe3eSEric Dumazet 	u64_stats_add(&stats->bytes, len - vi->hdr_len);
13154941d472SJason Wang 	if (unlikely(!skb))
13164941d472SJason Wang 		goto err;
13174941d472SJason Wang 
13184941d472SJason Wang 	return skb;
13194941d472SJason Wang 
13204941d472SJason Wang err:
132127debe3eSEric Dumazet 	u64_stats_inc(&stats->drops);
13224941d472SJason Wang 	give_pages(rq, page);
13234941d472SJason Wang 	return NULL;
13244941d472SJason Wang }
13254941d472SJason Wang 
132680f50f91SXuan Zhuo static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
132780f50f91SXuan Zhuo 			       struct net_device *dev,
132880f50f91SXuan Zhuo 			       struct virtnet_rq_stats *stats)
132980f50f91SXuan Zhuo {
133080f50f91SXuan Zhuo 	struct page *page;
133180f50f91SXuan Zhuo 	void *buf;
133280f50f91SXuan Zhuo 	int len;
133380f50f91SXuan Zhuo 
133480f50f91SXuan Zhuo 	while (num_buf-- > 1) {
1335295525e2SXuan Zhuo 		buf = virtnet_rq_get_buf(rq, &len, NULL);
133680f50f91SXuan Zhuo 		if (unlikely(!buf)) {
133780f50f91SXuan Zhuo 			pr_debug("%s: rx error: %d buffers missing\n",
133880f50f91SXuan Zhuo 				 dev->name, num_buf);
1339e2e5c2a3SEric Dumazet 			DEV_STATS_INC(dev, rx_length_errors);
134080f50f91SXuan Zhuo 			break;
134180f50f91SXuan Zhuo 		}
134227debe3eSEric Dumazet 		u64_stats_add(&stats->bytes, len);
134380f50f91SXuan Zhuo 		page = virt_to_head_page(buf);
134480f50f91SXuan Zhuo 		put_page(page);
134580f50f91SXuan Zhuo 	}
134680f50f91SXuan Zhuo }
134780f50f91SXuan Zhuo 
1348b26aa481SHeng Qi /* Why not use xdp_build_skb_from_frame() ?
1349b26aa481SHeng Qi  * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1350b26aa481SHeng Qi  * virtio-net there are 2 points that do not match its requirements:
1351b26aa481SHeng Qi  *  1. The size of the prefilled buffer is not fixed before xdp is set.
1352b26aa481SHeng Qi  *  2. xdp_build_skb_from_frame() does more checks that we don't need,
1353b26aa481SHeng Qi  *     like eth_type_trans() (which virtio-net does in receive_buf()).
1354b26aa481SHeng Qi  */
1355b26aa481SHeng Qi static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1356b26aa481SHeng Qi 					       struct virtnet_info *vi,
1357b26aa481SHeng Qi 					       struct xdp_buff *xdp,
1358b26aa481SHeng Qi 					       unsigned int xdp_frags_truesz)
1359b26aa481SHeng Qi {
1360b26aa481SHeng Qi 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1361b26aa481SHeng Qi 	unsigned int headroom, data_len;
1362b26aa481SHeng Qi 	struct sk_buff *skb;
1363b26aa481SHeng Qi 	int metasize;
1364b26aa481SHeng Qi 	u8 nr_frags;
1365b26aa481SHeng Qi 
1366b26aa481SHeng Qi 	if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1367b26aa481SHeng Qi 		pr_debug("Error building skb as missing reserved tailroom for xdp");
1368b26aa481SHeng Qi 		return NULL;
1369b26aa481SHeng Qi 	}
1370b26aa481SHeng Qi 
1371b26aa481SHeng Qi 	if (unlikely(xdp_buff_has_frags(xdp)))
1372b26aa481SHeng Qi 		nr_frags = sinfo->nr_frags;
1373b26aa481SHeng Qi 
1374b26aa481SHeng Qi 	skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1375b26aa481SHeng Qi 	if (unlikely(!skb))
1376b26aa481SHeng Qi 		return NULL;
1377b26aa481SHeng Qi 
1378b26aa481SHeng Qi 	headroom = xdp->data - xdp->data_hard_start;
1379b26aa481SHeng Qi 	data_len = xdp->data_end - xdp->data;
1380b26aa481SHeng Qi 	skb_reserve(skb, headroom);
1381b26aa481SHeng Qi 	__skb_put(skb, data_len);
1382b26aa481SHeng Qi 
1383b26aa481SHeng Qi 	metasize = xdp->data - xdp->data_meta;
1384b26aa481SHeng Qi 	metasize = metasize > 0 ? metasize : 0;
1385b26aa481SHeng Qi 	if (metasize)
1386b26aa481SHeng Qi 		skb_metadata_set(skb, metasize);
1387b26aa481SHeng Qi 
1388b26aa481SHeng Qi 	if (unlikely(xdp_buff_has_frags(xdp)))
1389b26aa481SHeng Qi 		xdp_update_skb_shared_info(skb, nr_frags,
1390b26aa481SHeng Qi 					   sinfo->xdp_frags_size,
1391b26aa481SHeng Qi 					   xdp_frags_truesz,
1392b26aa481SHeng Qi 					   xdp_buff_is_frag_pfmemalloc(xdp));
1393b26aa481SHeng Qi 
1394b26aa481SHeng Qi 	return skb;
1395b26aa481SHeng Qi }
1396b26aa481SHeng Qi 
1397ef75cb51SHeng Qi /* TODO: build xdp in big mode */
1398ef75cb51SHeng Qi static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1399ef75cb51SHeng Qi 				      struct virtnet_info *vi,
1400ef75cb51SHeng Qi 				      struct receive_queue *rq,
1401ef75cb51SHeng Qi 				      struct xdp_buff *xdp,
1402ef75cb51SHeng Qi 				      void *buf,
1403ef75cb51SHeng Qi 				      unsigned int len,
1404ef75cb51SHeng Qi 				      unsigned int frame_sz,
1405981f14d4SHeng Qi 				      int *num_buf,
1406ef75cb51SHeng Qi 				      unsigned int *xdp_frags_truesize,
1407ef75cb51SHeng Qi 				      struct virtnet_rq_stats *stats)
1408ef75cb51SHeng Qi {
1409ef75cb51SHeng Qi 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1410ef75cb51SHeng Qi 	unsigned int headroom, tailroom, room;
1411ef75cb51SHeng Qi 	unsigned int truesize, cur_frag_size;
1412ef75cb51SHeng Qi 	struct skb_shared_info *shinfo;
1413ef75cb51SHeng Qi 	unsigned int xdp_frags_truesz = 0;
1414ef75cb51SHeng Qi 	struct page *page;
1415ef75cb51SHeng Qi 	skb_frag_t *frag;
1416ef75cb51SHeng Qi 	int offset;
1417ef75cb51SHeng Qi 	void *ctx;
1418ef75cb51SHeng Qi 
1419ef75cb51SHeng Qi 	xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1420ef75cb51SHeng Qi 	xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1421ef75cb51SHeng Qi 			 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1422ef75cb51SHeng Qi 
1423981f14d4SHeng Qi 	if (!*num_buf)
1424981f14d4SHeng Qi 		return 0;
1425981f14d4SHeng Qi 
1426ef75cb51SHeng Qi 	if (*num_buf > 1) {
1427ef75cb51SHeng Qi 		/* If we want to build multi-buffer xdp, we need
1428ef75cb51SHeng Qi 		 * to specify that the flags of xdp_buff have the
1429ef75cb51SHeng Qi 		 * XDP_FLAGS_HAS_FRAG bit.
1430ef75cb51SHeng Qi 		 */
1431ef75cb51SHeng Qi 		if (!xdp_buff_has_frags(xdp))
1432ef75cb51SHeng Qi 			xdp_buff_set_frags_flag(xdp);
1433ef75cb51SHeng Qi 
1434ef75cb51SHeng Qi 		shinfo = xdp_get_shared_info_from_buff(xdp);
1435ef75cb51SHeng Qi 		shinfo->nr_frags = 0;
1436ef75cb51SHeng Qi 		shinfo->xdp_frags_size = 0;
1437ef75cb51SHeng Qi 	}
1438ef75cb51SHeng Qi 
1439981f14d4SHeng Qi 	if (*num_buf > MAX_SKB_FRAGS + 1)
1440ef75cb51SHeng Qi 		return -EINVAL;
1441ef75cb51SHeng Qi 
1442981f14d4SHeng Qi 	while (--*num_buf > 0) {
1443295525e2SXuan Zhuo 		buf = virtnet_rq_get_buf(rq, &len, &ctx);
1444ef75cb51SHeng Qi 		if (unlikely(!buf)) {
1445ef75cb51SHeng Qi 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
1446ef75cb51SHeng Qi 				 dev->name, *num_buf,
1447ef75cb51SHeng Qi 				 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1448e2e5c2a3SEric Dumazet 			DEV_STATS_INC(dev, rx_length_errors);
14494cb00b13SXuan Zhuo 			goto err;
1450ef75cb51SHeng Qi 		}
1451ef75cb51SHeng Qi 
145227debe3eSEric Dumazet 		u64_stats_add(&stats->bytes, len);
1453ef75cb51SHeng Qi 		page = virt_to_head_page(buf);
1454ef75cb51SHeng Qi 		offset = buf - page_address(page);
1455ef75cb51SHeng Qi 
1456ef75cb51SHeng Qi 		truesize = mergeable_ctx_to_truesize(ctx);
1457ef75cb51SHeng Qi 		headroom = mergeable_ctx_to_headroom(ctx);
1458ef75cb51SHeng Qi 		tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1459ef75cb51SHeng Qi 		room = SKB_DATA_ALIGN(headroom + tailroom);
1460ef75cb51SHeng Qi 
1461ef75cb51SHeng Qi 		cur_frag_size = truesize;
1462ef75cb51SHeng Qi 		xdp_frags_truesz += cur_frag_size;
1463ef75cb51SHeng Qi 		if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1464ef75cb51SHeng Qi 			put_page(page);
1465ef75cb51SHeng Qi 			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1466ef75cb51SHeng Qi 				 dev->name, len, (unsigned long)(truesize - room));
1467e2e5c2a3SEric Dumazet 			DEV_STATS_INC(dev, rx_length_errors);
14684cb00b13SXuan Zhuo 			goto err;
1469ef75cb51SHeng Qi 		}
1470ef75cb51SHeng Qi 
1471ef75cb51SHeng Qi 		frag = &shinfo->frags[shinfo->nr_frags++];
1472b51f4113SYunsheng Lin 		skb_frag_fill_page_desc(frag, page, offset, len);
1473ef75cb51SHeng Qi 		if (page_is_pfmemalloc(page))
1474ef75cb51SHeng Qi 			xdp_buff_set_frag_pfmemalloc(xdp);
1475ef75cb51SHeng Qi 
1476ef75cb51SHeng Qi 		shinfo->xdp_frags_size += len;
1477ef75cb51SHeng Qi 	}
1478ef75cb51SHeng Qi 
1479ef75cb51SHeng Qi 	*xdp_frags_truesize = xdp_frags_truesz;
1480ef75cb51SHeng Qi 	return 0;
14814cb00b13SXuan Zhuo 
14824cb00b13SXuan Zhuo err:
14834cb00b13SXuan Zhuo 	put_xdp_frags(xdp);
14844cb00b13SXuan Zhuo 	return -EINVAL;
1485ef75cb51SHeng Qi }
1486ef75cb51SHeng Qi 
1487ad4858beSXuan Zhuo static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1488ad4858beSXuan Zhuo 				   struct receive_queue *rq,
1489ad4858beSXuan Zhuo 				   struct bpf_prog *xdp_prog,
1490ad4858beSXuan Zhuo 				   void *ctx,
1491ad4858beSXuan Zhuo 				   unsigned int *frame_sz,
1492ad4858beSXuan Zhuo 				   int *num_buf,
1493ad4858beSXuan Zhuo 				   struct page **page,
1494ad4858beSXuan Zhuo 				   int offset,
1495ad4858beSXuan Zhuo 				   unsigned int *len,
1496ad4858beSXuan Zhuo 				   struct virtio_net_hdr_mrg_rxbuf *hdr)
1497ad4858beSXuan Zhuo {
1498ad4858beSXuan Zhuo 	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1499ad4858beSXuan Zhuo 	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1500ad4858beSXuan Zhuo 	struct page *xdp_page;
1501ad4858beSXuan Zhuo 	unsigned int xdp_room;
1502ad4858beSXuan Zhuo 
1503ad4858beSXuan Zhuo 	/* Transient failure which in theory could occur if
1504ad4858beSXuan Zhuo 	 * in-flight packets from before XDP was enabled reach
1505ad4858beSXuan Zhuo 	 * the receive path after XDP is loaded.
1506ad4858beSXuan Zhuo 	 */
1507ad4858beSXuan Zhuo 	if (unlikely(hdr->hdr.gso_type))
1508ad4858beSXuan Zhuo 		return NULL;
1509ad4858beSXuan Zhuo 
1510ad4858beSXuan Zhuo 	/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1511ad4858beSXuan Zhuo 	 * with headroom may add hole in truesize, which
1512ad4858beSXuan Zhuo 	 * make their length exceed PAGE_SIZE. So we disabled the
1513ad4858beSXuan Zhuo 	 * hole mechanism for xdp. See add_recvbuf_mergeable().
1514ad4858beSXuan Zhuo 	 */
1515ad4858beSXuan Zhuo 	*frame_sz = truesize;
1516ad4858beSXuan Zhuo 
1517dbe4fec2SXuan Zhuo 	if (likely(headroom >= virtnet_get_headroom(vi) &&
1518dbe4fec2SXuan Zhuo 		   (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1519dbe4fec2SXuan Zhuo 		return page_address(*page) + offset;
1520dbe4fec2SXuan Zhuo 	}
1521dbe4fec2SXuan Zhuo 
1522ad4858beSXuan Zhuo 	/* This happens when headroom is not enough because
1523ad4858beSXuan Zhuo 	 * of the buffer was prefilled before XDP is set.
1524ad4858beSXuan Zhuo 	 * This should only happen for the first several packets.
1525ad4858beSXuan Zhuo 	 * In fact, vq reset can be used here to help us clean up
1526ad4858beSXuan Zhuo 	 * the prefilled buffers, but many existing devices do not
1527ad4858beSXuan Zhuo 	 * support it, and we don't want to bother users who are
1528ad4858beSXuan Zhuo 	 * using xdp normally.
1529ad4858beSXuan Zhuo 	 */
1530dbe4fec2SXuan Zhuo 	if (!xdp_prog->aux->xdp_has_frags) {
1531ad4858beSXuan Zhuo 		/* linearize data for XDP */
1532ad4858beSXuan Zhuo 		xdp_page = xdp_linearize_page(rq, num_buf,
1533ad4858beSXuan Zhuo 					      *page, offset,
1534ad4858beSXuan Zhuo 					      VIRTIO_XDP_HEADROOM,
1535ad4858beSXuan Zhuo 					      len);
1536ad4858beSXuan Zhuo 		if (!xdp_page)
1537ad4858beSXuan Zhuo 			return NULL;
1538dbe4fec2SXuan Zhuo 	} else {
1539ad4858beSXuan Zhuo 		xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1540ad4858beSXuan Zhuo 					  sizeof(struct skb_shared_info));
1541ad4858beSXuan Zhuo 		if (*len + xdp_room > PAGE_SIZE)
1542ad4858beSXuan Zhuo 			return NULL;
1543ad4858beSXuan Zhuo 
1544ad4858beSXuan Zhuo 		xdp_page = alloc_page(GFP_ATOMIC);
1545ad4858beSXuan Zhuo 		if (!xdp_page)
1546ad4858beSXuan Zhuo 			return NULL;
1547ad4858beSXuan Zhuo 
1548ad4858beSXuan Zhuo 		memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1549ad4858beSXuan Zhuo 		       page_address(*page) + offset, *len);
1550ad4858beSXuan Zhuo 	}
1551ad4858beSXuan Zhuo 
1552dbe4fec2SXuan Zhuo 	*frame_sz = PAGE_SIZE;
1553dbe4fec2SXuan Zhuo 
1554dbe4fec2SXuan Zhuo 	put_page(*page);
1555dbe4fec2SXuan Zhuo 
1556dbe4fec2SXuan Zhuo 	*page = xdp_page;
1557dbe4fec2SXuan Zhuo 
1558dbe4fec2SXuan Zhuo 	return page_address(*page) + VIRTIO_XDP_HEADROOM;
1559ad4858beSXuan Zhuo }
1560ad4858beSXuan Zhuo 
1561d8f2835aSXuan Zhuo static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1562d8f2835aSXuan Zhuo 					     struct virtnet_info *vi,
1563d8f2835aSXuan Zhuo 					     struct receive_queue *rq,
1564d8f2835aSXuan Zhuo 					     struct bpf_prog *xdp_prog,
1565d8f2835aSXuan Zhuo 					     void *buf,
1566d8f2835aSXuan Zhuo 					     void *ctx,
1567d8f2835aSXuan Zhuo 					     unsigned int len,
1568d8f2835aSXuan Zhuo 					     unsigned int *xdp_xmit,
1569d8f2835aSXuan Zhuo 					     struct virtnet_rq_stats *stats)
1570d8f2835aSXuan Zhuo {
1571d8f2835aSXuan Zhuo 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1572d8f2835aSXuan Zhuo 	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1573d8f2835aSXuan Zhuo 	struct page *page = virt_to_head_page(buf);
1574d8f2835aSXuan Zhuo 	int offset = buf - page_address(page);
1575d8f2835aSXuan Zhuo 	unsigned int xdp_frags_truesz = 0;
1576d8f2835aSXuan Zhuo 	struct sk_buff *head_skb;
1577d8f2835aSXuan Zhuo 	unsigned int frame_sz;
1578d8f2835aSXuan Zhuo 	struct xdp_buff xdp;
1579d8f2835aSXuan Zhuo 	void *data;
1580d8f2835aSXuan Zhuo 	u32 act;
1581d8f2835aSXuan Zhuo 	int err;
1582d8f2835aSXuan Zhuo 
1583d8f2835aSXuan Zhuo 	data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1584d8f2835aSXuan Zhuo 				     offset, &len, hdr);
1585d8f2835aSXuan Zhuo 	if (unlikely(!data))
1586d8f2835aSXuan Zhuo 		goto err_xdp;
1587d8f2835aSXuan Zhuo 
1588d8f2835aSXuan Zhuo 	err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1589d8f2835aSXuan Zhuo 					 &num_buf, &xdp_frags_truesz, stats);
1590d8f2835aSXuan Zhuo 	if (unlikely(err))
1591d8f2835aSXuan Zhuo 		goto err_xdp;
1592d8f2835aSXuan Zhuo 
1593d8f2835aSXuan Zhuo 	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1594d8f2835aSXuan Zhuo 
1595d8f2835aSXuan Zhuo 	switch (act) {
1596d8f2835aSXuan Zhuo 	case XDP_PASS:
1597d8f2835aSXuan Zhuo 		head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1598d8f2835aSXuan Zhuo 		if (unlikely(!head_skb))
1599d8f2835aSXuan Zhuo 			break;
1600d8f2835aSXuan Zhuo 		return head_skb;
1601d8f2835aSXuan Zhuo 
1602d8f2835aSXuan Zhuo 	case XDP_TX:
1603d8f2835aSXuan Zhuo 	case XDP_REDIRECT:
1604d8f2835aSXuan Zhuo 		return NULL;
1605d8f2835aSXuan Zhuo 
1606d8f2835aSXuan Zhuo 	default:
1607d8f2835aSXuan Zhuo 		break;
1608d8f2835aSXuan Zhuo 	}
1609d8f2835aSXuan Zhuo 
1610d8f2835aSXuan Zhuo 	put_xdp_frags(&xdp);
1611d8f2835aSXuan Zhuo 
1612d8f2835aSXuan Zhuo err_xdp:
1613d8f2835aSXuan Zhuo 	put_page(page);
1614d8f2835aSXuan Zhuo 	mergeable_buf_free(rq, num_buf, dev, stats);
1615d8f2835aSXuan Zhuo 
161627debe3eSEric Dumazet 	u64_stats_inc(&stats->xdp_drops);
161727debe3eSEric Dumazet 	u64_stats_inc(&stats->drops);
1618d8f2835aSXuan Zhuo 	return NULL;
1619d8f2835aSXuan Zhuo }
1620d8f2835aSXuan Zhuo 
16218fc3b9e9SMichael S. Tsirkin static struct sk_buff *receive_mergeable(struct net_device *dev,
1622fdd819b2SMichael S. Tsirkin 					 struct virtnet_info *vi,
16238fc3b9e9SMichael S. Tsirkin 					 struct receive_queue *rq,
1624680557cfSMichael S. Tsirkin 					 void *buf,
1625680557cfSMichael S. Tsirkin 					 void *ctx,
1626186b3c99SJason Wang 					 unsigned int len,
16277d9d60fdSToshiaki Makita 					 unsigned int *xdp_xmit,
1628d46eeeafSJason Wang 					 struct virtnet_rq_stats *stats)
16299ab86bbcSShirley Ma {
1630012873d0SMichael S. Tsirkin 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1631981f14d4SHeng Qi 	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
16328fc3b9e9SMichael S. Tsirkin 	struct page *page = virt_to_head_page(buf);
16338fc3b9e9SMichael S. Tsirkin 	int offset = buf - page_address(page);
1634f600b690SJohn Fastabend 	struct sk_buff *head_skb, *curr_skb;
16359ce6146eSJesper Dangaard Brouer 	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
16364941d472SJason Wang 	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1637ef75cb51SHeng Qi 	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1638ef75cb51SHeng Qi 	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1639ab7db917SMichael Dalton 
164056434a01SJohn Fastabend 	head_skb = NULL;
164127debe3eSEric Dumazet 	u64_stats_add(&stats->bytes, len - vi->hdr_len);
164256434a01SJohn Fastabend 
1643ef75cb51SHeng Qi 	if (unlikely(len > truesize - room)) {
1644ad993a95SXie Yongji 		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1645ef75cb51SHeng Qi 			 dev->name, len, (unsigned long)(truesize - room));
1646e2e5c2a3SEric Dumazet 		DEV_STATS_INC(dev, rx_length_errors);
1647ad993a95SXie Yongji 		goto err_skb;
1648ad993a95SXie Yongji 	}
16496213f07cSLi RongQing 
165059ba3b1aSXuan Zhuo 	if (unlikely(vi->xdp_enabled)) {
165159ba3b1aSXuan Zhuo 		struct bpf_prog *xdp_prog;
16526213f07cSLi RongQing 
1653f600b690SJohn Fastabend 		rcu_read_lock();
1654f600b690SJohn Fastabend 		xdp_prog = rcu_dereference(rq->xdp_prog);
1655f600b690SJohn Fastabend 		if (xdp_prog) {
1656d8f2835aSXuan Zhuo 			head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1657d8f2835aSXuan Zhuo 							 len, xdp_xmit, stats);
1658fab89bafSHeng Qi 			rcu_read_unlock();
16591830f893SJason Wang 			return head_skb;
166056434a01SJohn Fastabend 		}
1661f600b690SJohn Fastabend 		rcu_read_unlock();
166259ba3b1aSXuan Zhuo 	}
1663f600b690SJohn Fastabend 
1664fa0f1ba7SXuan Zhuo 	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1665f600b690SJohn Fastabend 	curr_skb = head_skb;
16669ab86bbcSShirley Ma 
16678fc3b9e9SMichael S. Tsirkin 	if (unlikely(!curr_skb))
16688fc3b9e9SMichael S. Tsirkin 		goto err_skb;
16699ab86bbcSShirley Ma 	while (--num_buf) {
16708fc3b9e9SMichael S. Tsirkin 		int num_skb_frags;
16718fc3b9e9SMichael S. Tsirkin 
1672295525e2SXuan Zhuo 		buf = virtnet_rq_get_buf(rq, &len, &ctx);
167303e9f8a0SYunjian Wang 		if (unlikely(!buf)) {
16748fc3b9e9SMichael S. Tsirkin 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
1675fdd819b2SMichael S. Tsirkin 				 dev->name, num_buf,
1676012873d0SMichael S. Tsirkin 				 virtio16_to_cpu(vi->vdev,
1677012873d0SMichael S. Tsirkin 						 hdr->num_buffers));
1678e2e5c2a3SEric Dumazet 			DEV_STATS_INC(dev, rx_length_errors);
16798fc3b9e9SMichael S. Tsirkin 			goto err_buf;
16803f2c31d9SMark McLoughlin 		}
16818fc3b9e9SMichael S. Tsirkin 
168227debe3eSEric Dumazet 		u64_stats_add(&stats->bytes, len);
16838fc3b9e9SMichael S. Tsirkin 		page = virt_to_head_page(buf);
168428b39bc7SJason Wang 
168528b39bc7SJason Wang 		truesize = mergeable_ctx_to_truesize(ctx);
1686ef75cb51SHeng Qi 		headroom = mergeable_ctx_to_headroom(ctx);
1687ef75cb51SHeng Qi 		tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1688ef75cb51SHeng Qi 		room = SKB_DATA_ALIGN(headroom + tailroom);
1689ef75cb51SHeng Qi 		if (unlikely(len > truesize - room)) {
169056da5fd0SDan Carpenter 			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1691ef75cb51SHeng Qi 				 dev->name, len, (unsigned long)(truesize - room));
1692e2e5c2a3SEric Dumazet 			DEV_STATS_INC(dev, rx_length_errors);
1693680557cfSMichael S. Tsirkin 			goto err_skb;
1694680557cfSMichael S. Tsirkin 		}
16958fc3b9e9SMichael S. Tsirkin 
16968fc3b9e9SMichael S. Tsirkin 		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
16972613af0eSMichael Dalton 		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
16982613af0eSMichael Dalton 			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
16998fc3b9e9SMichael S. Tsirkin 
17008fc3b9e9SMichael S. Tsirkin 			if (unlikely(!nskb))
17018fc3b9e9SMichael S. Tsirkin 				goto err_skb;
17022613af0eSMichael Dalton 			if (curr_skb == head_skb)
17032613af0eSMichael Dalton 				skb_shinfo(curr_skb)->frag_list = nskb;
17042613af0eSMichael Dalton 			else
17052613af0eSMichael Dalton 				curr_skb->next = nskb;
17062613af0eSMichael Dalton 			curr_skb = nskb;
17072613af0eSMichael Dalton 			head_skb->truesize += nskb->truesize;
17082613af0eSMichael Dalton 			num_skb_frags = 0;
17092613af0eSMichael Dalton 		}
17102613af0eSMichael Dalton 		if (curr_skb != head_skb) {
17112613af0eSMichael Dalton 			head_skb->data_len += len;
17122613af0eSMichael Dalton 			head_skb->len += len;
1713fb51879dSMichael Dalton 			head_skb->truesize += truesize;
17142613af0eSMichael Dalton 		}
17158fc3b9e9SMichael S. Tsirkin 		offset = buf - page_address(page);
1716ba275241SJason Wang 		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1717ba275241SJason Wang 			put_page(page);
1718ba275241SJason Wang 			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1719fb51879dSMichael Dalton 					     len, truesize);
1720ba275241SJason Wang 		} else {
17212613af0eSMichael Dalton 			skb_add_rx_frag(curr_skb, num_skb_frags, page,
1722fb51879dSMichael Dalton 					offset, len, truesize);
1723ba275241SJason Wang 		}
17248fc3b9e9SMichael S. Tsirkin 	}
17258fc3b9e9SMichael S. Tsirkin 
17265377d758SJohannes Berg 	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
17278fc3b9e9SMichael S. Tsirkin 	return head_skb;
17288fc3b9e9SMichael S. Tsirkin 
17298fc3b9e9SMichael S. Tsirkin err_skb:
17308fc3b9e9SMichael S. Tsirkin 	put_page(page);
173180f50f91SXuan Zhuo 	mergeable_buf_free(rq, num_buf, dev, stats);
173280f50f91SXuan Zhuo 
17338fc3b9e9SMichael S. Tsirkin err_buf:
173427debe3eSEric Dumazet 	u64_stats_inc(&stats->drops);
17358fc3b9e9SMichael S. Tsirkin 	dev_kfree_skb(head_skb);
17368fc3b9e9SMichael S. Tsirkin 	return NULL;
17379ab86bbcSShirley Ma }
17389ab86bbcSShirley Ma 
173991f41f01SAndrew Melnychenko static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
174091f41f01SAndrew Melnychenko 				struct sk_buff *skb)
174191f41f01SAndrew Melnychenko {
174291f41f01SAndrew Melnychenko 	enum pkt_hash_types rss_hash_type;
174391f41f01SAndrew Melnychenko 
174491f41f01SAndrew Melnychenko 	if (!hdr_hash || !skb)
174591f41f01SAndrew Melnychenko 		return;
174691f41f01SAndrew Melnychenko 
174795bb6330SMichael S. Tsirkin 	switch (__le16_to_cpu(hdr_hash->hash_report)) {
174891f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_TCPv4:
174991f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_UDPv4:
175091f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_TCPv6:
175191f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_UDPv6:
175291f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
175391f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
175491f41f01SAndrew Melnychenko 		rss_hash_type = PKT_HASH_TYPE_L4;
175591f41f01SAndrew Melnychenko 		break;
175691f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_IPv4:
175791f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_IPv6:
175891f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_IPv6_EX:
175991f41f01SAndrew Melnychenko 		rss_hash_type = PKT_HASH_TYPE_L3;
176091f41f01SAndrew Melnychenko 		break;
176191f41f01SAndrew Melnychenko 	case VIRTIO_NET_HASH_REPORT_NONE:
176291f41f01SAndrew Melnychenko 	default:
176391f41f01SAndrew Melnychenko 		rss_hash_type = PKT_HASH_TYPE_NONE;
176491f41f01SAndrew Melnychenko 	}
176595bb6330SMichael S. Tsirkin 	skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
176691f41f01SAndrew Melnychenko }
176791f41f01SAndrew Melnychenko 
17687d9d60fdSToshiaki Makita static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
17692471c75eSJesper Dangaard Brouer 			void *buf, unsigned int len, void **ctx,
1770a0929a44SToshiaki Makita 			unsigned int *xdp_xmit,
1771d46eeeafSJason Wang 			struct virtnet_rq_stats *stats)
17729ab86bbcSShirley Ma {
1773e9d7417bSJason Wang 	struct net_device *dev = vi->dev;
17749ab86bbcSShirley Ma 	struct sk_buff *skb;
1775dae64749SFeng Liu 	struct virtio_net_common_hdr *hdr;
17769ab86bbcSShirley Ma 
1777bcff3162SMichael S. Tsirkin 	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
17789ab86bbcSShirley Ma 		pr_debug("%s: short packet %i\n", dev->name, len);
1779e2e5c2a3SEric Dumazet 		DEV_STATS_INC(dev, rx_length_errors);
17803ffd05c2SXuan Zhuo 		virtnet_rq_free_buf(vi, rq, buf);
17817d9d60fdSToshiaki Makita 		return;
17829ab86bbcSShirley Ma 	}
17839ab86bbcSShirley Ma 
1784f121159dSMichael S. Tsirkin 	if (vi->mergeable_rx_bufs)
17857d9d60fdSToshiaki Makita 		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1786a0929a44SToshiaki Makita 					stats);
1787f121159dSMichael S. Tsirkin 	else if (vi->big_packets)
1788a0929a44SToshiaki Makita 		skb = receive_big(dev, vi, rq, buf, len, stats);
1789f121159dSMichael S. Tsirkin 	else
1790a0929a44SToshiaki Makita 		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1791f121159dSMichael S. Tsirkin 
17928fc3b9e9SMichael S. Tsirkin 	if (unlikely(!skb))
17937d9d60fdSToshiaki Makita 		return;
17943f2c31d9SMark McLoughlin 
1795dae64749SFeng Liu 	hdr = skb_vnet_common_hdr(skb);
179691f41f01SAndrew Melnychenko 	if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1797dae64749SFeng Liu 		virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
17983fa2a1dfSstephen hemminger 
1799e858fae2SMike Rapoport 	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
180010a8d94aSJason Wang 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1801296f96fcSRusty Russell 
1802e858fae2SMike Rapoport 	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1803e858fae2SMike Rapoport 				  virtio_is_little_endian(vi->vdev))) {
1804e858fae2SMike Rapoport 		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1805e858fae2SMike Rapoport 				     dev->name, hdr->hdr.gso_type,
1806fdd819b2SMichael S. Tsirkin 				     hdr->hdr.gso_size);
1807296f96fcSRusty Russell 		goto frame_err;
1808296f96fcSRusty Russell 	}
1809296f96fcSRusty Russell 
1810133bbb18SWillem de Bruijn 	skb_record_rx_queue(skb, vq2rxq(rq->vq));
1811d1dc06dcSMike Rapoport 	skb->protocol = eth_type_trans(skb, dev);
1812d1dc06dcSMike Rapoport 	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1813d1dc06dcSMike Rapoport 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
1814d1dc06dcSMike Rapoport 
18150fbd050aSEric Dumazet 	napi_gro_receive(&rq->napi, skb);
18167d9d60fdSToshiaki Makita 	return;
1817296f96fcSRusty Russell 
1818296f96fcSRusty Russell frame_err:
1819e2e5c2a3SEric Dumazet 	DEV_STATS_INC(dev, rx_frame_errors);
1820296f96fcSRusty Russell 	dev_kfree_skb(skb);
1821296f96fcSRusty Russell }
1822296f96fcSRusty Russell 
1823192f68cfSJason Wang /* Unlike mergeable buffers, all buffers are allocated to the
1824192f68cfSJason Wang  * same size, except for the headroom. For this reason we do
1825192f68cfSJason Wang  * not need to use  mergeable_len_to_ctx here - it is enough
1826192f68cfSJason Wang  * to store the headroom as the context ignoring the truesize.
1827192f68cfSJason Wang  */
1828946fa564SMichael S. Tsirkin static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1829946fa564SMichael S. Tsirkin 			     gfp_t gfp)
1830296f96fcSRusty Russell {
1831f6b10209SJason Wang 	char *buf;
18322de2f7f4SJohn Fastabend 	unsigned int xdp_headroom = virtnet_get_headroom(vi);
1833192f68cfSJason Wang 	void *ctx = (void *)(unsigned long)xdp_headroom;
1834f6b10209SJason Wang 	int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
18359ab86bbcSShirley Ma 	int err;
18363f2c31d9SMark McLoughlin 
1837f6b10209SJason Wang 	len = SKB_DATA_ALIGN(len) +
1838f6b10209SJason Wang 	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1839295525e2SXuan Zhuo 
1840295525e2SXuan Zhuo 	buf = virtnet_rq_alloc(rq, len, gfp);
1841295525e2SXuan Zhuo 	if (unlikely(!buf))
18429ab86bbcSShirley Ma 		return -ENOMEM;
1843296f96fcSRusty Russell 
1844295525e2SXuan Zhuo 	virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
1845f6b10209SJason Wang 			       vi->hdr_len + GOOD_PACKET_LEN);
1846295525e2SXuan Zhuo 
1847192f68cfSJason Wang 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1848295525e2SXuan Zhuo 	if (err < 0) {
1849295525e2SXuan Zhuo 		if (rq->do_dma)
1850295525e2SXuan Zhuo 			virtnet_rq_unmap(rq, buf, 0);
1851f6b10209SJason Wang 		put_page(virt_to_head_page(buf));
1852295525e2SXuan Zhuo 	}
1853295525e2SXuan Zhuo 
18549ab86bbcSShirley Ma 	return err;
185597402b96SHerbert Xu }
185697402b96SHerbert Xu 
1857012873d0SMichael S. Tsirkin static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1858012873d0SMichael S. Tsirkin 			   gfp_t gfp)
18599ab86bbcSShirley Ma {
18609ab86bbcSShirley Ma 	struct page *first, *list = NULL;
18619ab86bbcSShirley Ma 	char *p;
18629ab86bbcSShirley Ma 	int i, err, offset;
1863296f96fcSRusty Russell 
18644959aebbSGavin Li 	sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1865a5835440SRusty Russell 
18664959aebbSGavin Li 	/* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
18674959aebbSGavin Li 	for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1868e9d7417bSJason Wang 		first = get_a_page(rq, gfp);
18699ab86bbcSShirley Ma 		if (!first) {
18709ab86bbcSShirley Ma 			if (list)
1871e9d7417bSJason Wang 				give_pages(rq, list);
18729ab86bbcSShirley Ma 			return -ENOMEM;
1873296f96fcSRusty Russell 		}
1874e9d7417bSJason Wang 		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
18759ab86bbcSShirley Ma 
18769ab86bbcSShirley Ma 		/* chain new page in list head to match sg */
18779ab86bbcSShirley Ma 		first->private = (unsigned long)list;
18789ab86bbcSShirley Ma 		list = first;
18799ab86bbcSShirley Ma 	}
18809ab86bbcSShirley Ma 
1881e9d7417bSJason Wang 	first = get_a_page(rq, gfp);
18829ab86bbcSShirley Ma 	if (!first) {
1883e9d7417bSJason Wang 		give_pages(rq, list);
18849ab86bbcSShirley Ma 		return -ENOMEM;
18859ab86bbcSShirley Ma 	}
18869ab86bbcSShirley Ma 	p = page_address(first);
18879ab86bbcSShirley Ma 
1888e9d7417bSJason Wang 	/* rq->sg[0], rq->sg[1] share the same page */
1889012873d0SMichael S. Tsirkin 	/* a separated rq->sg[0] for header - required in case !any_header_sg */
1890012873d0SMichael S. Tsirkin 	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
18919ab86bbcSShirley Ma 
1892e9d7417bSJason Wang 	/* rq->sg[1] for data packet, from offset */
18939ab86bbcSShirley Ma 	offset = sizeof(struct padded_vnet_hdr);
1894e9d7417bSJason Wang 	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
18959ab86bbcSShirley Ma 
18969ab86bbcSShirley Ma 	/* chain first in list head */
18979ab86bbcSShirley Ma 	first->private = (unsigned long)list;
18984959aebbSGavin Li 	err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1899aa989f5eSMichael S. Tsirkin 				  first, gfp);
19009ab86bbcSShirley Ma 	if (err < 0)
1901e9d7417bSJason Wang 		give_pages(rq, first);
19029ab86bbcSShirley Ma 
19039ab86bbcSShirley Ma 	return err;
19049ab86bbcSShirley Ma }
19059ab86bbcSShirley Ma 
1906d85b758fSMichael S. Tsirkin static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
19073cc81a9aSJason Wang 					  struct ewma_pkt_len *avg_pkt_len,
19083cc81a9aSJason Wang 					  unsigned int room)
19099ab86bbcSShirley Ma {
1910c1ddc42dSAndrew Melnychenko 	struct virtnet_info *vi = rq->vq->vdev->priv;
1911c1ddc42dSAndrew Melnychenko 	const size_t hdr_len = vi->hdr_len;
1912fbf28d78SMichael Dalton 	unsigned int len;
1913fbf28d78SMichael Dalton 
19143cc81a9aSJason Wang 	if (room)
19153cc81a9aSJason Wang 		return PAGE_SIZE - room;
19163cc81a9aSJason Wang 
19175377d758SJohannes Berg 	len = hdr_len +	clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1918f0c3192cSMichael S. Tsirkin 				rq->min_buf_len, PAGE_SIZE - hdr_len);
19193cc81a9aSJason Wang 
1920e377fcc8SMichael S. Tsirkin 	return ALIGN(len, L1_CACHE_BYTES);
1921fbf28d78SMichael Dalton }
1922fbf28d78SMichael Dalton 
19232de2f7f4SJohn Fastabend static int add_recvbuf_mergeable(struct virtnet_info *vi,
19242de2f7f4SJohn Fastabend 				 struct receive_queue *rq, gfp_t gfp)
1925fbf28d78SMichael Dalton {
1926fb51879dSMichael Dalton 	struct page_frag *alloc_frag = &rq->alloc_frag;
19272de2f7f4SJohn Fastabend 	unsigned int headroom = virtnet_get_headroom(vi);
19283cc81a9aSJason Wang 	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
19293cc81a9aSJason Wang 	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1930fb51879dSMichael Dalton 	unsigned int len, hole;
1931295525e2SXuan Zhuo 	void *ctx;
1932295525e2SXuan Zhuo 	char *buf;
1933295525e2SXuan Zhuo 	int err;
19349ab86bbcSShirley Ma 
19353cc81a9aSJason Wang 	/* Extra tailroom is needed to satisfy XDP's assumption. This
19363cc81a9aSJason Wang 	 * means rx frags coalescing won't work, but consider we've
19373cc81a9aSJason Wang 	 * disabled GSO for XDP, it won't be a big issue.
19383cc81a9aSJason Wang 	 */
19393cc81a9aSJason Wang 	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1940295525e2SXuan Zhuo 
1941295525e2SXuan Zhuo 	buf = virtnet_rq_alloc(rq, len + room, gfp);
1942295525e2SXuan Zhuo 	if (unlikely(!buf))
19439ab86bbcSShirley Ma 		return -ENOMEM;
1944ab7db917SMichael Dalton 
19452de2f7f4SJohn Fastabend 	buf += headroom; /* advance address leaving hole at front of pkt */
1946fb51879dSMichael Dalton 	hole = alloc_frag->size - alloc_frag->offset;
19473cc81a9aSJason Wang 	if (hole < len + room) {
1948ab7db917SMichael Dalton 		/* To avoid internal fragmentation, if there is very likely not
1949ab7db917SMichael Dalton 		 * enough space for another buffer, add the remaining space to
19501daa8790SMichael S. Tsirkin 		 * the current buffer.
1951484beac2SHeng Qi 		 * XDP core assumes that frame_size of xdp_buff and the length
1952484beac2SHeng Qi 		 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1953ab7db917SMichael Dalton 		 */
1954484beac2SHeng Qi 		if (!headroom)
1955fb51879dSMichael Dalton 			len += hole;
1956fb51879dSMichael Dalton 		alloc_frag->offset += hole;
1957fb51879dSMichael Dalton 	}
19589ab86bbcSShirley Ma 
1959295525e2SXuan Zhuo 	virtnet_rq_init_one_sg(rq, buf, len);
1960295525e2SXuan Zhuo 
1961ef75cb51SHeng Qi 	ctx = mergeable_len_to_ctx(len + room, headroom);
1962680557cfSMichael S. Tsirkin 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1963295525e2SXuan Zhuo 	if (err < 0) {
1964295525e2SXuan Zhuo 		if (rq->do_dma)
1965295525e2SXuan Zhuo 			virtnet_rq_unmap(rq, buf, 0);
19662613af0eSMichael Dalton 		put_page(virt_to_head_page(buf));
1967295525e2SXuan Zhuo 	}
19689ab86bbcSShirley Ma 
19699ab86bbcSShirley Ma 	return err;
1970296f96fcSRusty Russell }
1971296f96fcSRusty Russell 
1972b2baed69SRusty Russell /*
1973b2baed69SRusty Russell  * Returns false if we couldn't fill entirely (OOM).
1974b2baed69SRusty Russell  *
1975b2baed69SRusty Russell  * Normally run in the receive path, but can also be run from ndo_open
1976b2baed69SRusty Russell  * before we're receiving packets, or from refill_work which is
1977b2baed69SRusty Russell  * careful to disable receiving (using napi_disable).
1978b2baed69SRusty Russell  */
1979946fa564SMichael S. Tsirkin static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1980946fa564SMichael S. Tsirkin 			  gfp_t gfp)
19813f2c31d9SMark McLoughlin {
19823f2c31d9SMark McLoughlin 	int err;
19831788f495SMichael S. Tsirkin 	bool oom;
19843f2c31d9SMark McLoughlin 
19850aea51c3SAmit Shah 	do {
19869ab86bbcSShirley Ma 		if (vi->mergeable_rx_bufs)
19872de2f7f4SJohn Fastabend 			err = add_recvbuf_mergeable(vi, rq, gfp);
19889ab86bbcSShirley Ma 		else if (vi->big_packets)
1989012873d0SMichael S. Tsirkin 			err = add_recvbuf_big(vi, rq, gfp);
19909ab86bbcSShirley Ma 		else
1991946fa564SMichael S. Tsirkin 			err = add_recvbuf_small(vi, rq, gfp);
19923f2c31d9SMark McLoughlin 
19931788f495SMichael S. Tsirkin 		oom = err == -ENOMEM;
19949ed4cb07SRusty Russell 		if (err)
19953f2c31d9SMark McLoughlin 			break;
1996b7dfde95SLinus Torvalds 	} while (rq->vq->num_free);
1997461f03dcSToshiaki Makita 	if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
199801c32598SMichael S. Tsirkin 		unsigned long flags;
199901c32598SMichael S. Tsirkin 
200001c32598SMichael S. Tsirkin 		flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
200127debe3eSEric Dumazet 		u64_stats_inc(&rq->stats.kicks);
200201c32598SMichael S. Tsirkin 		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2003461f03dcSToshiaki Makita 	}
2004461f03dcSToshiaki Makita 
20053161e453SRusty Russell 	return !oom;
20063f2c31d9SMark McLoughlin }
20073f2c31d9SMark McLoughlin 
200818445c4dSRusty Russell static void skb_recv_done(struct virtqueue *rvq)
2009296f96fcSRusty Russell {
2010296f96fcSRusty Russell 	struct virtnet_info *vi = rvq->vdev->priv;
2011986a4f4dSJason Wang 	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2012e9d7417bSJason Wang 
2013e4e8452aSWillem de Bruijn 	virtqueue_napi_schedule(&rq->napi, rvq);
2014296f96fcSRusty Russell }
2015296f96fcSRusty Russell 
2016e4e8452aSWillem de Bruijn static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
20173e9d08ecSBruce Rogers {
2018e4e8452aSWillem de Bruijn 	napi_enable(napi);
20193e9d08ecSBruce Rogers 
20203e9d08ecSBruce Rogers 	/* If all buffers were filled by other side before we napi_enabled, we
2021e4e8452aSWillem de Bruijn 	 * won't get another interrupt, so process any outstanding packets now.
2022e4e8452aSWillem de Bruijn 	 * Call local_bh_enable after to trigger softIRQ processing.
2023e4e8452aSWillem de Bruijn 	 */
2024ec13ee80SMichael S. Tsirkin 	local_bh_disable();
2025e4e8452aSWillem de Bruijn 	virtqueue_napi_schedule(napi, vq);
2026ec13ee80SMichael S. Tsirkin 	local_bh_enable();
20273e9d08ecSBruce Rogers }
20283e9d08ecSBruce Rogers 
2029b92f1e67SWillem de Bruijn static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2030b92f1e67SWillem de Bruijn 				   struct virtqueue *vq,
2031b92f1e67SWillem de Bruijn 				   struct napi_struct *napi)
2032b92f1e67SWillem de Bruijn {
2033b92f1e67SWillem de Bruijn 	if (!napi->weight)
2034b92f1e67SWillem de Bruijn 		return;
2035b92f1e67SWillem de Bruijn 
2036b92f1e67SWillem de Bruijn 	/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2037b92f1e67SWillem de Bruijn 	 * enable the feature if this is likely affine with the transmit path.
2038b92f1e67SWillem de Bruijn 	 */
2039b92f1e67SWillem de Bruijn 	if (!vi->affinity_hint_set) {
2040b92f1e67SWillem de Bruijn 		napi->weight = 0;
2041b92f1e67SWillem de Bruijn 		return;
2042b92f1e67SWillem de Bruijn 	}
2043b92f1e67SWillem de Bruijn 
2044b92f1e67SWillem de Bruijn 	return virtnet_napi_enable(vq, napi);
2045b92f1e67SWillem de Bruijn }
2046b92f1e67SWillem de Bruijn 
204778a57b48SWillem de Bruijn static void virtnet_napi_tx_disable(struct napi_struct *napi)
204878a57b48SWillem de Bruijn {
204978a57b48SWillem de Bruijn 	if (napi->weight)
205078a57b48SWillem de Bruijn 		napi_disable(napi);
205178a57b48SWillem de Bruijn }
205278a57b48SWillem de Bruijn 
20533161e453SRusty Russell static void refill_work(struct work_struct *work)
20543161e453SRusty Russell {
2055e9d7417bSJason Wang 	struct virtnet_info *vi =
2056e9d7417bSJason Wang 		container_of(work, struct virtnet_info, refill.work);
20573161e453SRusty Russell 	bool still_empty;
2058986a4f4dSJason Wang 	int i;
20593161e453SRusty Russell 
206055257d72SSasha Levin 	for (i = 0; i < vi->curr_queue_pairs; i++) {
2061986a4f4dSJason Wang 		struct receive_queue *rq = &vi->rq[i];
2062986a4f4dSJason Wang 
2063986a4f4dSJason Wang 		napi_disable(&rq->napi);
2064946fa564SMichael S. Tsirkin 		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2065e4e8452aSWillem de Bruijn 		virtnet_napi_enable(rq->vq, &rq->napi);
20663161e453SRusty Russell 
20673161e453SRusty Russell 		/* In theory, this can happen: if we don't get any buffers in
2068986a4f4dSJason Wang 		 * we will *never* try to fill again.
2069986a4f4dSJason Wang 		 */
20703161e453SRusty Russell 		if (still_empty)
20713b07e9caSTejun Heo 			schedule_delayed_work(&vi->refill, HZ/2);
20723161e453SRusty Russell 	}
2073986a4f4dSJason Wang }
20743161e453SRusty Russell 
20752471c75eSJesper Dangaard Brouer static int virtnet_receive(struct receive_queue *rq, int budget,
20762471c75eSJesper Dangaard Brouer 			   unsigned int *xdp_xmit)
2077296f96fcSRusty Russell {
2078e9d7417bSJason Wang 	struct virtnet_info *vi = rq->vq->vdev->priv;
2079d46eeeafSJason Wang 	struct virtnet_rq_stats stats = {};
2080a0929a44SToshiaki Makita 	unsigned int len;
208127debe3eSEric Dumazet 	int packets = 0;
20829ab86bbcSShirley Ma 	void *buf;
2083a0929a44SToshiaki Makita 	int i;
2084296f96fcSRusty Russell 
2085192f68cfSJason Wang 	if (!vi->big_packets || vi->mergeable_rx_bufs) {
2086680557cfSMichael S. Tsirkin 		void *ctx;
2087680557cfSMichael S. Tsirkin 
208827debe3eSEric Dumazet 		while (packets < budget &&
2089295525e2SXuan Zhuo 		       (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2090a0929a44SToshiaki Makita 			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
209127debe3eSEric Dumazet 			packets++;
2092680557cfSMichael S. Tsirkin 		}
2093680557cfSMichael S. Tsirkin 	} else {
209427debe3eSEric Dumazet 		while (packets < budget &&
2095295525e2SXuan Zhuo 		       (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
2096a0929a44SToshiaki Makita 			receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
209727debe3eSEric Dumazet 			packets++;
2098296f96fcSRusty Russell 		}
2099680557cfSMichael S. Tsirkin 	}
2100296f96fcSRusty Russell 
2101718be6baS? jiang 	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
21025a159128SJason Wang 		if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
21035a159128SJason Wang 			spin_lock(&vi->refill_lock);
21045a159128SJason Wang 			if (vi->refill_enabled)
21053b07e9caSTejun Heo 				schedule_delayed_work(&vi->refill, 0);
21065a159128SJason Wang 			spin_unlock(&vi->refill_lock);
21075a159128SJason Wang 		}
21083161e453SRusty Russell 	}
2109296f96fcSRusty Russell 
211027debe3eSEric Dumazet 	u64_stats_set(&stats.packets, packets);
2111d7dfc5cfSToshiaki Makita 	u64_stats_update_begin(&rq->stats.syncp);
2112a0929a44SToshiaki Makita 	for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
2113a0929a44SToshiaki Makita 		size_t offset = virtnet_rq_stats_desc[i].offset;
211427debe3eSEric Dumazet 		u64_stats_t *item, *src;
2115a0929a44SToshiaki Makita 
211627debe3eSEric Dumazet 		item = (u64_stats_t *)((u8 *)&rq->stats + offset);
211727debe3eSEric Dumazet 		src = (u64_stats_t *)((u8 *)&stats + offset);
211827debe3eSEric Dumazet 		u64_stats_add(item, u64_stats_read(src));
2119a0929a44SToshiaki Makita 	}
2120d7dfc5cfSToshiaki Makita 	u64_stats_update_end(&rq->stats.syncp);
212161845d20SJason Wang 
212227debe3eSEric Dumazet 	return packets;
21232ffa7598SJason Wang }
21242ffa7598SJason Wang 
21257b0411efSWillem de Bruijn static void virtnet_poll_cleantx(struct receive_queue *rq)
21267b0411efSWillem de Bruijn {
21277b0411efSWillem de Bruijn 	struct virtnet_info *vi = rq->vq->vdev->priv;
21287b0411efSWillem de Bruijn 	unsigned int index = vq2rxq(rq->vq);
21297b0411efSWillem de Bruijn 	struct send_queue *sq = &vi->sq[index];
21307b0411efSWillem de Bruijn 	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
21317b0411efSWillem de Bruijn 
2132534da5e8SToshiaki Makita 	if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
21337b0411efSWillem de Bruijn 		return;
21347b0411efSWillem de Bruijn 
21357b0411efSWillem de Bruijn 	if (__netif_tx_trylock(txq)) {
2136ebcce492SXuan Zhuo 		if (sq->reset) {
2137ebcce492SXuan Zhuo 			__netif_tx_unlock(txq);
2138ebcce492SXuan Zhuo 			return;
2139ebcce492SXuan Zhuo 		}
2140ebcce492SXuan Zhuo 
2141a7766ef1SMichael S. Tsirkin 		do {
2142a7766ef1SMichael S. Tsirkin 			virtqueue_disable_cb(sq->vq);
2143df133f3fSMichael S. Tsirkin 			free_old_xmit_skbs(sq, true);
2144a7766ef1SMichael S. Tsirkin 		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
21457b0411efSWillem de Bruijn 
21467b0411efSWillem de Bruijn 		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
21477b0411efSWillem de Bruijn 			netif_tx_wake_queue(txq);
214822bc63c5SMichael S. Tsirkin 
214922bc63c5SMichael S. Tsirkin 		__netif_tx_unlock(txq);
215022bc63c5SMichael S. Tsirkin 	}
21517b0411efSWillem de Bruijn }
21527b0411efSWillem de Bruijn 
21532ffa7598SJason Wang static int virtnet_poll(struct napi_struct *napi, int budget)
21542ffa7598SJason Wang {
21552ffa7598SJason Wang 	struct receive_queue *rq =
21562ffa7598SJason Wang 		container_of(napi, struct receive_queue, napi);
21579267c430SJason Wang 	struct virtnet_info *vi = rq->vq->vdev->priv;
21589267c430SJason Wang 	struct send_queue *sq;
21592a43565cSToshiaki Makita 	unsigned int received;
21602471c75eSJesper Dangaard Brouer 	unsigned int xdp_xmit = 0;
21612ffa7598SJason Wang 
21627b0411efSWillem de Bruijn 	virtnet_poll_cleantx(rq);
21637b0411efSWillem de Bruijn 
2164186b3c99SJason Wang 	received = virtnet_receive(rq, budget, &xdp_xmit);
21652ffa7598SJason Wang 
2166ad7e615fSMagnus Karlsson 	if (xdp_xmit & VIRTIO_XDP_REDIR)
2167ad7e615fSMagnus Karlsson 		xdp_do_flush();
2168ad7e615fSMagnus Karlsson 
21698329d98eSRusty Russell 	/* Out of packets? */
2170e4e8452aSWillem de Bruijn 	if (received < budget)
2171e4e8452aSWillem de Bruijn 		virtqueue_napi_complete(napi, rq->vq, received);
2172296f96fcSRusty Russell 
21732471c75eSJesper Dangaard Brouer 	if (xdp_xmit & VIRTIO_XDP_TX) {
217497c2c69eSXuan Zhuo 		sq = virtnet_xdp_get_sq(vi);
2175461f03dcSToshiaki Makita 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2176461f03dcSToshiaki Makita 			u64_stats_update_begin(&sq->stats.syncp);
217727debe3eSEric Dumazet 			u64_stats_inc(&sq->stats.kicks);
2178461f03dcSToshiaki Makita 			u64_stats_update_end(&sq->stats.syncp);
2179461f03dcSToshiaki Makita 		}
218097c2c69eSXuan Zhuo 		virtnet_xdp_put_sq(vi, sq);
21819267c430SJason Wang 	}
2182186b3c99SJason Wang 
2183296f96fcSRusty Russell 	return received;
2184296f96fcSRusty Russell }
2185296f96fcSRusty Russell 
21865306623aSFeng Liu static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
21875306623aSFeng Liu {
21885306623aSFeng Liu 	virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
21895306623aSFeng Liu 	napi_disable(&vi->rq[qp_index].napi);
21905306623aSFeng Liu 	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
21915306623aSFeng Liu }
21925306623aSFeng Liu 
21935306623aSFeng Liu static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
21945306623aSFeng Liu {
21955306623aSFeng Liu 	struct net_device *dev = vi->dev;
21965306623aSFeng Liu 	int err;
21975306623aSFeng Liu 
21985306623aSFeng Liu 	err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
21995306623aSFeng Liu 			       vi->rq[qp_index].napi.napi_id);
22005306623aSFeng Liu 	if (err < 0)
22015306623aSFeng Liu 		return err;
22025306623aSFeng Liu 
22035306623aSFeng Liu 	err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
22045306623aSFeng Liu 					 MEM_TYPE_PAGE_SHARED, NULL);
22055306623aSFeng Liu 	if (err < 0)
22065306623aSFeng Liu 		goto err_xdp_reg_mem_model;
22075306623aSFeng Liu 
22085306623aSFeng Liu 	virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
22095306623aSFeng Liu 	virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
22105306623aSFeng Liu 
22115306623aSFeng Liu 	return 0;
22125306623aSFeng Liu 
22135306623aSFeng Liu err_xdp_reg_mem_model:
22145306623aSFeng Liu 	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
22155306623aSFeng Liu 	return err;
22165306623aSFeng Liu }
22175306623aSFeng Liu 
2218986a4f4dSJason Wang static int virtnet_open(struct net_device *dev)
2219986a4f4dSJason Wang {
2220986a4f4dSJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
2221754b8a21SJesper Dangaard Brouer 	int i, err;
2222986a4f4dSJason Wang 
22235a159128SJason Wang 	enable_delayed_refill(vi);
22245a159128SJason Wang 
2225e4166625SJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
2226e4166625SJason Wang 		if (i < vi->curr_queue_pairs)
2227986a4f4dSJason Wang 			/* Make sure we have some buffers: if oom use wq. */
2228946fa564SMichael S. Tsirkin 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2229986a4f4dSJason Wang 				schedule_delayed_work(&vi->refill, 0);
2230754b8a21SJesper Dangaard Brouer 
22315306623aSFeng Liu 		err = virtnet_enable_queue_pair(vi, i);
2232754b8a21SJesper Dangaard Brouer 		if (err < 0)
22335306623aSFeng Liu 			goto err_enable_qp;
2234986a4f4dSJason Wang 	}
2235986a4f4dSJason Wang 
2236986a4f4dSJason Wang 	return 0;
22375306623aSFeng Liu 
22385306623aSFeng Liu err_enable_qp:
22395306623aSFeng Liu 	disable_delayed_refill(vi);
22405306623aSFeng Liu 	cancel_delayed_work_sync(&vi->refill);
22415306623aSFeng Liu 
22425306623aSFeng Liu 	for (i--; i >= 0; i--)
22435306623aSFeng Liu 		virtnet_disable_queue_pair(vi, i);
22445306623aSFeng Liu 	return err;
2245986a4f4dSJason Wang }
2246986a4f4dSJason Wang 
2247b92f1e67SWillem de Bruijn static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2248b92f1e67SWillem de Bruijn {
2249b92f1e67SWillem de Bruijn 	struct send_queue *sq = container_of(napi, struct send_queue, napi);
2250b92f1e67SWillem de Bruijn 	struct virtnet_info *vi = sq->vq->vdev->priv;
2251534da5e8SToshiaki Makita 	unsigned int index = vq2txq(sq->vq);
2252534da5e8SToshiaki Makita 	struct netdev_queue *txq;
22535a2f966dSMichael S. Tsirkin 	int opaque;
22545a2f966dSMichael S. Tsirkin 	bool done;
2255b92f1e67SWillem de Bruijn 
2256534da5e8SToshiaki Makita 	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2257534da5e8SToshiaki Makita 		/* We don't need to enable cb for XDP */
2258534da5e8SToshiaki Makita 		napi_complete_done(napi, 0);
2259534da5e8SToshiaki Makita 		return 0;
2260534da5e8SToshiaki Makita 	}
2261534da5e8SToshiaki Makita 
2262534da5e8SToshiaki Makita 	txq = netdev_get_tx_queue(vi->dev, index);
2263b92f1e67SWillem de Bruijn 	__netif_tx_lock(txq, raw_smp_processor_id());
22645a2f966dSMichael S. Tsirkin 	virtqueue_disable_cb(sq->vq);
2265df133f3fSMichael S. Tsirkin 	free_old_xmit_skbs(sq, true);
22665a2f966dSMichael S. Tsirkin 
226722bc63c5SMichael S. Tsirkin 	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
226822bc63c5SMichael S. Tsirkin 		netif_tx_wake_queue(txq);
226922bc63c5SMichael S. Tsirkin 
22705a2f966dSMichael S. Tsirkin 	opaque = virtqueue_enable_cb_prepare(sq->vq);
22715a2f966dSMichael S. Tsirkin 
22725a2f966dSMichael S. Tsirkin 	done = napi_complete_done(napi, 0);
22735a2f966dSMichael S. Tsirkin 
22745a2f966dSMichael S. Tsirkin 	if (!done)
22755a2f966dSMichael S. Tsirkin 		virtqueue_disable_cb(sq->vq);
22765a2f966dSMichael S. Tsirkin 
2277b92f1e67SWillem de Bruijn 	__netif_tx_unlock(txq);
2278b92f1e67SWillem de Bruijn 
22795a2f966dSMichael S. Tsirkin 	if (done) {
22805a2f966dSMichael S. Tsirkin 		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
22815a2f966dSMichael S. Tsirkin 			if (napi_schedule_prep(napi)) {
22825a2f966dSMichael S. Tsirkin 				__netif_tx_lock(txq, raw_smp_processor_id());
22835a2f966dSMichael S. Tsirkin 				virtqueue_disable_cb(sq->vq);
22845a2f966dSMichael S. Tsirkin 				__netif_tx_unlock(txq);
22855a2f966dSMichael S. Tsirkin 				__napi_schedule(napi);
22865a2f966dSMichael S. Tsirkin 			}
22875a2f966dSMichael S. Tsirkin 		}
22885a2f966dSMichael S. Tsirkin 	}
2289b92f1e67SWillem de Bruijn 
2290b92f1e67SWillem de Bruijn 	return 0;
2291b92f1e67SWillem de Bruijn }
2292b92f1e67SWillem de Bruijn 
2293e9d7417bSJason Wang static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2294296f96fcSRusty Russell {
2295012873d0SMichael S. Tsirkin 	struct virtio_net_hdr_mrg_rxbuf *hdr;
2296296f96fcSRusty Russell 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2297e9d7417bSJason Wang 	struct virtnet_info *vi = sq->vq->vdev->priv;
2298e2fcad58SJason A. Donenfeld 	int num_sg;
2299012873d0SMichael S. Tsirkin 	unsigned hdr_len = vi->hdr_len;
2300e7428e95SMichael S. Tsirkin 	bool can_push;
2301296f96fcSRusty Russell 
2302e174961cSJohannes Berg 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2303e7428e95SMichael S. Tsirkin 
2304e7428e95SMichael S. Tsirkin 	can_push = vi->any_header_sg &&
2305e7428e95SMichael S. Tsirkin 		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2306e7428e95SMichael S. Tsirkin 		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2307e7428e95SMichael S. Tsirkin 	/* Even if we can, don't push here yet as this would skew
2308e7428e95SMichael S. Tsirkin 	 * csum_start offset below. */
2309e7428e95SMichael S. Tsirkin 	if (can_push)
2310012873d0SMichael S. Tsirkin 		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2311e7428e95SMichael S. Tsirkin 	else
2312dae64749SFeng Liu 		hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
2313296f96fcSRusty Russell 
2314e858fae2SMike Rapoport 	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2315fd3a8862SWillem de Bruijn 				    virtio_is_little_endian(vi->vdev), false,
2316fd3a8862SWillem de Bruijn 				    0))
231785eb1389SXianting Tian 		return -EPROTO;
2318296f96fcSRusty Russell 
2319e7428e95SMichael S. Tsirkin 	if (vi->mergeable_rx_bufs)
2320012873d0SMichael S. Tsirkin 		hdr->num_buffers = 0;
23213f2c31d9SMark McLoughlin 
2322547c890cSJason Wang 	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2323e7428e95SMichael S. Tsirkin 	if (can_push) {
2324e7428e95SMichael S. Tsirkin 		__skb_push(skb, hdr_len);
2325e7428e95SMichael S. Tsirkin 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2326e2fcad58SJason A. Donenfeld 		if (unlikely(num_sg < 0))
2327e2fcad58SJason A. Donenfeld 			return num_sg;
2328e7428e95SMichael S. Tsirkin 		/* Pull header back to avoid skew in tx bytes calculations. */
2329e7428e95SMichael S. Tsirkin 		__skb_pull(skb, hdr_len);
2330e7428e95SMichael S. Tsirkin 	} else {
2331e7428e95SMichael S. Tsirkin 		sg_set_buf(sq->sg, hdr, hdr_len);
2332e2fcad58SJason A. Donenfeld 		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2333e2fcad58SJason A. Donenfeld 		if (unlikely(num_sg < 0))
2334e2fcad58SJason A. Donenfeld 			return num_sg;
2335e2fcad58SJason A. Donenfeld 		num_sg++;
2336e7428e95SMichael S. Tsirkin 	}
23379dc7b9e4SRusty Russell 	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
233811a3a154SRusty Russell }
233911a3a154SRusty Russell 
2340424efe9cSStephen Hemminger static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
234199ffc696SRusty Russell {
234299ffc696SRusty Russell 	struct virtnet_info *vi = netdev_priv(dev);
2343986a4f4dSJason Wang 	int qnum = skb_get_queue_mapping(skb);
2344986a4f4dSJason Wang 	struct send_queue *sq = &vi->sq[qnum];
23459ed4cb07SRusty Russell 	int err;
23464b7fd2e6SMichael S. Tsirkin 	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
23476b16f9eeSFlorian Westphal 	bool kick = !netdev_xmit_more();
2348b92f1e67SWillem de Bruijn 	bool use_napi = sq->napi.weight;
23492cb9c6baSRusty Russell 
23502cb9c6baSRusty Russell 	/* Free up any pending old buffers before queueing new ones. */
2351a7766ef1SMichael S. Tsirkin 	do {
2352a7766ef1SMichael S. Tsirkin 		if (use_napi)
2353a7766ef1SMichael S. Tsirkin 			virtqueue_disable_cb(sq->vq);
2354a7766ef1SMichael S. Tsirkin 
2355df133f3fSMichael S. Tsirkin 		free_old_xmit_skbs(sq, false);
235699ffc696SRusty Russell 
2357a7766ef1SMichael S. Tsirkin 	} while (use_napi && kick &&
2358a7766ef1SMichael S. Tsirkin 	       unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2359bdb12e0dSWillem de Bruijn 
2360074c3582SJacob Keller 	/* timestamp packet in software */
2361074c3582SJacob Keller 	skb_tx_timestamp(skb);
2362074c3582SJacob Keller 
236303f191baSMichael S. Tsirkin 	/* Try to transmit */
2364b7dfde95SLinus Torvalds 	err = xmit_skb(sq, skb);
236599ffc696SRusty Russell 
23669ed4cb07SRusty Russell 	/* This should not happen! */
2367681daee2SJason Wang 	if (unlikely(err)) {
2368e2e5c2a3SEric Dumazet 		DEV_STATS_INC(dev, tx_fifo_errors);
23692e57b79cSRick Jones 		if (net_ratelimit())
237058eba97dSRusty Russell 			dev_warn(&dev->dev,
23717934b481SYuval Shaia 				 "Unexpected TXQ (%d) queue failure: %d\n",
23727934b481SYuval Shaia 				 qnum, err);
2373e2e5c2a3SEric Dumazet 		DEV_STATS_INC(dev, tx_dropped);
237485e94525SEric W. Biederman 		dev_kfree_skb_any(skb);
237558eba97dSRusty Russell 		return NETDEV_TX_OK;
2376296f96fcSRusty Russell 	}
237703f191baSMichael S. Tsirkin 
237848925e37SRusty Russell 	/* Don't wait up for transmitted skbs to be freed. */
2379b92f1e67SWillem de Bruijn 	if (!use_napi) {
238048925e37SRusty Russell 		skb_orphan(skb);
2381895b5c9fSFlorian Westphal 		nf_reset_ct(skb);
2382b92f1e67SWillem de Bruijn 	}
238348925e37SRusty Russell 
2384b8ef4809SXuan Zhuo 	check_sq_full_and_disable(vi, dev, sq);
238548925e37SRusty Russell 
2386461f03dcSToshiaki Makita 	if (kick || netif_xmit_stopped(txq)) {
2387461f03dcSToshiaki Makita 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2388461f03dcSToshiaki Makita 			u64_stats_update_begin(&sq->stats.syncp);
238927debe3eSEric Dumazet 			u64_stats_inc(&sq->stats.kicks);
2390461f03dcSToshiaki Makita 			u64_stats_update_end(&sq->stats.syncp);
2391461f03dcSToshiaki Makita 		}
2392461f03dcSToshiaki Makita 	}
23930b725a2cSDavid S. Miller 
23940b725a2cSDavid S. Miller 	return NETDEV_TX_OK;
2395c223a078SDavid S. Miller }
2396c223a078SDavid S. Miller 
23976a4763e2SXuan Zhuo static int virtnet_rx_resize(struct virtnet_info *vi,
23986a4763e2SXuan Zhuo 			     struct receive_queue *rq, u32 ring_num)
23996a4763e2SXuan Zhuo {
24006a4763e2SXuan Zhuo 	bool running = netif_running(vi->dev);
24016a4763e2SXuan Zhuo 	int err, qindex;
24026a4763e2SXuan Zhuo 
24036a4763e2SXuan Zhuo 	qindex = rq - vi->rq;
24046a4763e2SXuan Zhuo 
24056a4763e2SXuan Zhuo 	if (running)
24066a4763e2SXuan Zhuo 		napi_disable(&rq->napi);
24076a4763e2SXuan Zhuo 
24083ffd05c2SXuan Zhuo 	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
24096a4763e2SXuan Zhuo 	if (err)
24106a4763e2SXuan Zhuo 		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
24116a4763e2SXuan Zhuo 
24126a4763e2SXuan Zhuo 	if (!try_fill_recv(vi, rq, GFP_KERNEL))
24136a4763e2SXuan Zhuo 		schedule_delayed_work(&vi->refill, 0);
24146a4763e2SXuan Zhuo 
24156a4763e2SXuan Zhuo 	if (running)
24166a4763e2SXuan Zhuo 		virtnet_napi_enable(rq->vq, &rq->napi);
24176a4763e2SXuan Zhuo 	return err;
24186a4763e2SXuan Zhuo }
24196a4763e2SXuan Zhuo 
2420ebcce492SXuan Zhuo static int virtnet_tx_resize(struct virtnet_info *vi,
2421ebcce492SXuan Zhuo 			     struct send_queue *sq, u32 ring_num)
2422ebcce492SXuan Zhuo {
2423ebcce492SXuan Zhuo 	bool running = netif_running(vi->dev);
2424ebcce492SXuan Zhuo 	struct netdev_queue *txq;
2425ebcce492SXuan Zhuo 	int err, qindex;
2426ebcce492SXuan Zhuo 
2427ebcce492SXuan Zhuo 	qindex = sq - vi->sq;
2428ebcce492SXuan Zhuo 
2429ebcce492SXuan Zhuo 	if (running)
2430ebcce492SXuan Zhuo 		virtnet_napi_tx_disable(&sq->napi);
2431ebcce492SXuan Zhuo 
2432ebcce492SXuan Zhuo 	txq = netdev_get_tx_queue(vi->dev, qindex);
2433ebcce492SXuan Zhuo 
2434ebcce492SXuan Zhuo 	/* 1. wait all ximt complete
2435ebcce492SXuan Zhuo 	 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2436ebcce492SXuan Zhuo 	 */
2437ebcce492SXuan Zhuo 	__netif_tx_lock_bh(txq);
2438ebcce492SXuan Zhuo 
2439ebcce492SXuan Zhuo 	/* Prevent rx poll from accessing sq. */
2440ebcce492SXuan Zhuo 	sq->reset = true;
2441ebcce492SXuan Zhuo 
2442ebcce492SXuan Zhuo 	/* Prevent the upper layer from trying to send packets. */
2443ebcce492SXuan Zhuo 	netif_stop_subqueue(vi->dev, qindex);
2444ebcce492SXuan Zhuo 
2445ebcce492SXuan Zhuo 	__netif_tx_unlock_bh(txq);
2446ebcce492SXuan Zhuo 
2447ebcce492SXuan Zhuo 	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2448ebcce492SXuan Zhuo 	if (err)
2449ebcce492SXuan Zhuo 		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2450ebcce492SXuan Zhuo 
2451ebcce492SXuan Zhuo 	__netif_tx_lock_bh(txq);
2452ebcce492SXuan Zhuo 	sq->reset = false;
2453ebcce492SXuan Zhuo 	netif_tx_wake_queue(txq);
2454ebcce492SXuan Zhuo 	__netif_tx_unlock_bh(txq);
2455ebcce492SXuan Zhuo 
2456ebcce492SXuan Zhuo 	if (running)
2457ebcce492SXuan Zhuo 		virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2458ebcce492SXuan Zhuo 	return err;
2459ebcce492SXuan Zhuo }
2460ebcce492SXuan Zhuo 
246140cbfc37SAmos Kong /*
246240cbfc37SAmos Kong  * Send command via the control virtqueue and check status.  Commands
246340cbfc37SAmos Kong  * supported by the hypervisor, as indicated by feature bits, should
2464788a8b6dSstephen hemminger  * never fail unless improperly formatted.
246540cbfc37SAmos Kong  */
246640cbfc37SAmos Kong static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2467d24bae32Sstephen hemminger 				 struct scatterlist *out)
246840cbfc37SAmos Kong {
2469f7bc9594SRusty Russell 	struct scatterlist *sgs[4], hdr, stat;
2470d24bae32Sstephen hemminger 	unsigned out_num = 0, tmp;
2471222722bcSYunjian Wang 	int ret;
247240cbfc37SAmos Kong 
247340cbfc37SAmos Kong 	/* Caller should know better */
2474f7bc9594SRusty Russell 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
247540cbfc37SAmos Kong 
247612e57169SMichael S. Tsirkin 	vi->ctrl->status = ~0;
247712e57169SMichael S. Tsirkin 	vi->ctrl->hdr.class = class;
247812e57169SMichael S. Tsirkin 	vi->ctrl->hdr.cmd = cmd;
2479f7bc9594SRusty Russell 	/* Add header */
248012e57169SMichael S. Tsirkin 	sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2481f7bc9594SRusty Russell 	sgs[out_num++] = &hdr;
248240cbfc37SAmos Kong 
2483f7bc9594SRusty Russell 	if (out)
2484f7bc9594SRusty Russell 		sgs[out_num++] = out;
248540cbfc37SAmos Kong 
2486f7bc9594SRusty Russell 	/* Add return status. */
248712e57169SMichael S. Tsirkin 	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2488d24bae32Sstephen hemminger 	sgs[out_num] = &stat;
248940cbfc37SAmos Kong 
2490d24bae32Sstephen hemminger 	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
2491222722bcSYunjian Wang 	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2492222722bcSYunjian Wang 	if (ret < 0) {
2493222722bcSYunjian Wang 		dev_warn(&vi->vdev->dev,
2494222722bcSYunjian Wang 			 "Failed to add sgs for command vq: %d\n.", ret);
2495222722bcSYunjian Wang 		return false;
2496222722bcSYunjian Wang 	}
249740cbfc37SAmos Kong 
249867975901SHeinz Graalfs 	if (unlikely(!virtqueue_kick(vi->cvq)))
249912e57169SMichael S. Tsirkin 		return vi->ctrl->status == VIRTIO_NET_OK;
250040cbfc37SAmos Kong 
250140cbfc37SAmos Kong 	/* Spin for a response, the kick causes an ioport write, trapping
250240cbfc37SAmos Kong 	 * into the hypervisor, so the request should be handled immediately.
250340cbfc37SAmos Kong 	 */
2504047b9b94SHeinz Graalfs 	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2505047b9b94SHeinz Graalfs 	       !virtqueue_is_broken(vi->cvq))
250640cbfc37SAmos Kong 		cpu_relax();
250740cbfc37SAmos Kong 
250812e57169SMichael S. Tsirkin 	return vi->ctrl->status == VIRTIO_NET_OK;
250940cbfc37SAmos Kong }
251040cbfc37SAmos Kong 
25119c46f6d4SAlex Williamson static int virtnet_set_mac_address(struct net_device *dev, void *p)
25129c46f6d4SAlex Williamson {
25139c46f6d4SAlex Williamson 	struct virtnet_info *vi = netdev_priv(dev);
25149c46f6d4SAlex Williamson 	struct virtio_device *vdev = vi->vdev;
2515f2f2c8b4SJiri Pirko 	int ret;
2516e37e2ff3SAndy Lutomirski 	struct sockaddr *addr;
25177e58d5aeSAmos Kong 	struct scatterlist sg;
25189c46f6d4SAlex Williamson 
2519ba5e4426SSridhar Samudrala 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2520ba5e4426SSridhar Samudrala 		return -EOPNOTSUPP;
2521ba5e4426SSridhar Samudrala 
2522801822d1SShyam Saini 	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2523e37e2ff3SAndy Lutomirski 	if (!addr)
2524e37e2ff3SAndy Lutomirski 		return -ENOMEM;
2525e37e2ff3SAndy Lutomirski 
2526e37e2ff3SAndy Lutomirski 	ret = eth_prepare_mac_addr_change(dev, addr);
2527f2f2c8b4SJiri Pirko 	if (ret)
2528e37e2ff3SAndy Lutomirski 		goto out;
25299c46f6d4SAlex Williamson 
25307e58d5aeSAmos Kong 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
25317e58d5aeSAmos Kong 		sg_init_one(&sg, addr->sa_data, dev->addr_len);
25327e58d5aeSAmos Kong 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2533d24bae32Sstephen hemminger 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
25347e58d5aeSAmos Kong 			dev_warn(&vdev->dev,
25357e58d5aeSAmos Kong 				 "Failed to set mac address by vq command.\n");
2536e37e2ff3SAndy Lutomirski 			ret = -EINVAL;
2537e37e2ff3SAndy Lutomirski 			goto out;
25387e58d5aeSAmos Kong 		}
25397e93a02fSMichael S. Tsirkin 	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
25407e93a02fSMichael S. Tsirkin 		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2541855e0c52SRusty Russell 		unsigned int i;
2542855e0c52SRusty Russell 
2543855e0c52SRusty Russell 		/* Naturally, this has an atomicity problem. */
2544855e0c52SRusty Russell 		for (i = 0; i < dev->addr_len; i++)
2545855e0c52SRusty Russell 			virtio_cwrite8(vdev,
2546855e0c52SRusty Russell 				       offsetof(struct virtio_net_config, mac) +
2547855e0c52SRusty Russell 				       i, addr->sa_data[i]);
25487e58d5aeSAmos Kong 	}
25497e58d5aeSAmos Kong 
25507e58d5aeSAmos Kong 	eth_commit_mac_addr_change(dev, p);
2551e37e2ff3SAndy Lutomirski 	ret = 0;
25529c46f6d4SAlex Williamson 
2553e37e2ff3SAndy Lutomirski out:
2554e37e2ff3SAndy Lutomirski 	kfree(addr);
2555e37e2ff3SAndy Lutomirski 	return ret;
25569c46f6d4SAlex Williamson }
25579c46f6d4SAlex Williamson 
2558bc1f4470Sstephen hemminger static void virtnet_stats(struct net_device *dev,
25593fa2a1dfSstephen hemminger 			  struct rtnl_link_stats64 *tot)
25603fa2a1dfSstephen hemminger {
25613fa2a1dfSstephen hemminger 	struct virtnet_info *vi = netdev_priv(dev);
25623fa2a1dfSstephen hemminger 	unsigned int start;
2563d7dfc5cfSToshiaki Makita 	int i;
25643fa2a1dfSstephen hemminger 
2565d7dfc5cfSToshiaki Makita 	for (i = 0; i < vi->max_queue_pairs; i++) {
2566a520794bSTony Lu 		u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2567d7dfc5cfSToshiaki Makita 		struct receive_queue *rq = &vi->rq[i];
2568d7dfc5cfSToshiaki Makita 		struct send_queue *sq = &vi->sq[i];
25693fa2a1dfSstephen hemminger 
25703fa2a1dfSstephen hemminger 		do {
2571068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&sq->stats.syncp);
257227debe3eSEric Dumazet 			tpackets = u64_stats_read(&sq->stats.packets);
257327debe3eSEric Dumazet 			tbytes   = u64_stats_read(&sq->stats.bytes);
257427debe3eSEric Dumazet 			terrors  = u64_stats_read(&sq->stats.tx_timeouts);
2575068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
257683a27052SEric Dumazet 
257783a27052SEric Dumazet 		do {
2578068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&rq->stats.syncp);
257927debe3eSEric Dumazet 			rpackets = u64_stats_read(&rq->stats.packets);
258027debe3eSEric Dumazet 			rbytes   = u64_stats_read(&rq->stats.bytes);
258127debe3eSEric Dumazet 			rdrops   = u64_stats_read(&rq->stats.drops);
2582068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
25833fa2a1dfSstephen hemminger 
25843fa2a1dfSstephen hemminger 		tot->rx_packets += rpackets;
25853fa2a1dfSstephen hemminger 		tot->tx_packets += tpackets;
25863fa2a1dfSstephen hemminger 		tot->rx_bytes   += rbytes;
25873fa2a1dfSstephen hemminger 		tot->tx_bytes   += tbytes;
25882c4a2f7dSToshiaki Makita 		tot->rx_dropped += rdrops;
2589a520794bSTony Lu 		tot->tx_errors  += terrors;
25903fa2a1dfSstephen hemminger 	}
25913fa2a1dfSstephen hemminger 
2592e2e5c2a3SEric Dumazet 	tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
2593e2e5c2a3SEric Dumazet 	tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
2594e2e5c2a3SEric Dumazet 	tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
2595e2e5c2a3SEric Dumazet 	tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
25963fa2a1dfSstephen hemminger }
25973fa2a1dfSstephen hemminger 
2598586d17c5SJason Wang static void virtnet_ack_link_announce(struct virtnet_info *vi)
2599586d17c5SJason Wang {
2600586d17c5SJason Wang 	rtnl_lock();
2601586d17c5SJason Wang 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2602d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2603586d17c5SJason Wang 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2604586d17c5SJason Wang 	rtnl_unlock();
2605586d17c5SJason Wang }
2606586d17c5SJason Wang 
260747315329SJohn Fastabend static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2608986a4f4dSJason Wang {
2609986a4f4dSJason Wang 	struct scatterlist sg;
2610986a4f4dSJason Wang 	struct net_device *dev = vi->dev;
2611986a4f4dSJason Wang 
2612986a4f4dSJason Wang 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2613986a4f4dSJason Wang 		return 0;
2614986a4f4dSJason Wang 
261512e57169SMichael S. Tsirkin 	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
261612e57169SMichael S. Tsirkin 	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
2617986a4f4dSJason Wang 
2618986a4f4dSJason Wang 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2619d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2620986a4f4dSJason Wang 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2621986a4f4dSJason Wang 			 queue_pairs);
2622986a4f4dSJason Wang 		return -EINVAL;
262355257d72SSasha Levin 	} else {
2624986a4f4dSJason Wang 		vi->curr_queue_pairs = queue_pairs;
262535ed159bSJason Wang 		/* virtnet_open() will refill when device is going to up. */
262635ed159bSJason Wang 		if (dev->flags & IFF_UP)
26279b9cd802SJason Wang 			schedule_delayed_work(&vi->refill, 0);
262855257d72SSasha Levin 	}
2629986a4f4dSJason Wang 
2630986a4f4dSJason Wang 	return 0;
2631986a4f4dSJason Wang }
2632986a4f4dSJason Wang 
263347315329SJohn Fastabend static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
263447315329SJohn Fastabend {
263547315329SJohn Fastabend 	int err;
263647315329SJohn Fastabend 
263747315329SJohn Fastabend 	rtnl_lock();
263847315329SJohn Fastabend 	err = _virtnet_set_queues(vi, queue_pairs);
263947315329SJohn Fastabend 	rtnl_unlock();
264047315329SJohn Fastabend 	return err;
264147315329SJohn Fastabend }
264247315329SJohn Fastabend 
2643296f96fcSRusty Russell static int virtnet_close(struct net_device *dev)
2644296f96fcSRusty Russell {
2645296f96fcSRusty Russell 	struct virtnet_info *vi = netdev_priv(dev);
2646986a4f4dSJason Wang 	int i;
2647296f96fcSRusty Russell 
26485a159128SJason Wang 	/* Make sure NAPI doesn't schedule refill work */
26495a159128SJason Wang 	disable_delayed_refill(vi);
2650b2baed69SRusty Russell 	/* Make sure refill_work doesn't re-enable napi! */
2651b2baed69SRusty Russell 	cancel_delayed_work_sync(&vi->refill);
2652986a4f4dSJason Wang 
26535306623aSFeng Liu 	for (i = 0; i < vi->max_queue_pairs; i++)
26545306623aSFeng Liu 		virtnet_disable_queue_pair(vi, i);
2655296f96fcSRusty Russell 
2656296f96fcSRusty Russell 	return 0;
2657296f96fcSRusty Russell }
2658296f96fcSRusty Russell 
26592af7698eSAlex Williamson static void virtnet_set_rx_mode(struct net_device *dev)
26602af7698eSAlex Williamson {
26612af7698eSAlex Williamson 	struct virtnet_info *vi = netdev_priv(dev);
2662f565a7c2SAlex Williamson 	struct scatterlist sg[2];
2663f565a7c2SAlex Williamson 	struct virtio_net_ctrl_mac *mac_data;
2664ccffad25SJiri Pirko 	struct netdev_hw_addr *ha;
266532e7bfc4SJiri Pirko 	int uc_count;
26664cd24eafSJiri Pirko 	int mc_count;
2667f565a7c2SAlex Williamson 	void *buf;
2668f565a7c2SAlex Williamson 	int i;
26692af7698eSAlex Williamson 
2670788a8b6dSstephen hemminger 	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
26712af7698eSAlex Williamson 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
26722af7698eSAlex Williamson 		return;
26732af7698eSAlex Williamson 
267412e57169SMichael S. Tsirkin 	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
267512e57169SMichael S. Tsirkin 	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
26762af7698eSAlex Williamson 
267712e57169SMichael S. Tsirkin 	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
26782af7698eSAlex Williamson 
26792af7698eSAlex Williamson 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2680d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
26812af7698eSAlex Williamson 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
268212e57169SMichael S. Tsirkin 			 vi->ctrl->promisc ? "en" : "dis");
26832af7698eSAlex Williamson 
268412e57169SMichael S. Tsirkin 	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
26852af7698eSAlex Williamson 
26862af7698eSAlex Williamson 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2687d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
26882af7698eSAlex Williamson 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
268912e57169SMichael S. Tsirkin 			 vi->ctrl->allmulti ? "en" : "dis");
2690f565a7c2SAlex Williamson 
269132e7bfc4SJiri Pirko 	uc_count = netdev_uc_count(dev);
26924cd24eafSJiri Pirko 	mc_count = netdev_mc_count(dev);
2693f565a7c2SAlex Williamson 	/* MAC filter - use one buffer for both lists */
26944cd24eafSJiri Pirko 	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2695f565a7c2SAlex Williamson 		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
26964cd24eafSJiri Pirko 	mac_data = buf;
2697e68ed8f0SJoe Perches 	if (!buf)
2698f565a7c2SAlex Williamson 		return;
2699f565a7c2SAlex Williamson 
270023e258e1SAlex Williamson 	sg_init_table(sg, 2);
270123e258e1SAlex Williamson 
2702f565a7c2SAlex Williamson 	/* Store the unicast list and count in the front of the buffer */
2703fdd819b2SMichael S. Tsirkin 	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2704ccffad25SJiri Pirko 	i = 0;
270532e7bfc4SJiri Pirko 	netdev_for_each_uc_addr(ha, dev)
2706ccffad25SJiri Pirko 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2707f565a7c2SAlex Williamson 
2708f565a7c2SAlex Williamson 	sg_set_buf(&sg[0], mac_data,
270932e7bfc4SJiri Pirko 		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2710f565a7c2SAlex Williamson 
2711f565a7c2SAlex Williamson 	/* multicast list and count fill the end */
271232e7bfc4SJiri Pirko 	mac_data = (void *)&mac_data->macs[uc_count][0];
2713f565a7c2SAlex Williamson 
2714fdd819b2SMichael S. Tsirkin 	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2715567ec874SJiri Pirko 	i = 0;
271622bedad3SJiri Pirko 	netdev_for_each_mc_addr(ha, dev)
271722bedad3SJiri Pirko 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2718f565a7c2SAlex Williamson 
2719f565a7c2SAlex Williamson 	sg_set_buf(&sg[1], mac_data,
27204cd24eafSJiri Pirko 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2721f565a7c2SAlex Williamson 
2722f565a7c2SAlex Williamson 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2723d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
272499e872aeSThomas Huth 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2725f565a7c2SAlex Williamson 
2726f565a7c2SAlex Williamson 	kfree(buf);
27272af7698eSAlex Williamson }
27282af7698eSAlex Williamson 
272980d5c368SPatrick McHardy static int virtnet_vlan_rx_add_vid(struct net_device *dev,
273080d5c368SPatrick McHardy 				   __be16 proto, u16 vid)
27310bde9569SAlex Williamson {
27320bde9569SAlex Williamson 	struct virtnet_info *vi = netdev_priv(dev);
27330bde9569SAlex Williamson 	struct scatterlist sg;
27340bde9569SAlex Williamson 
2735d7fad4c8SMichael S. Tsirkin 	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
273612e57169SMichael S. Tsirkin 	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
27370bde9569SAlex Williamson 
27380bde9569SAlex Williamson 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2739d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
27400bde9569SAlex Williamson 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
27418e586137SJiri Pirko 	return 0;
27420bde9569SAlex Williamson }
27430bde9569SAlex Williamson 
274480d5c368SPatrick McHardy static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
274580d5c368SPatrick McHardy 				    __be16 proto, u16 vid)
27460bde9569SAlex Williamson {
27470bde9569SAlex Williamson 	struct virtnet_info *vi = netdev_priv(dev);
27480bde9569SAlex Williamson 	struct scatterlist sg;
27490bde9569SAlex Williamson 
2750d7fad4c8SMichael S. Tsirkin 	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
275112e57169SMichael S. Tsirkin 	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
27520bde9569SAlex Williamson 
27530bde9569SAlex Williamson 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2754d24bae32Sstephen hemminger 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
27550bde9569SAlex Williamson 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
27568e586137SJiri Pirko 	return 0;
27570bde9569SAlex Williamson }
27580bde9569SAlex Williamson 
2759310974faSPeter Xu static void virtnet_clean_affinity(struct virtnet_info *vi)
2760986a4f4dSJason Wang {
2761986a4f4dSJason Wang 	int i;
27628898c21cSWanlong Gao 
27638898c21cSWanlong Gao 	if (vi->affinity_hint_set) {
27648898c21cSWanlong Gao 		for (i = 0; i < vi->max_queue_pairs; i++) {
276519e226e8SCaleb Raitto 			virtqueue_set_affinity(vi->rq[i].vq, NULL);
276619e226e8SCaleb Raitto 			virtqueue_set_affinity(vi->sq[i].vq, NULL);
27678898c21cSWanlong Gao 		}
27688898c21cSWanlong Gao 
27698898c21cSWanlong Gao 		vi->affinity_hint_set = false;
27708898c21cSWanlong Gao 	}
27718898c21cSWanlong Gao }
27728898c21cSWanlong Gao 
27738898c21cSWanlong Gao static void virtnet_set_affinity(struct virtnet_info *vi)
2774986a4f4dSJason Wang {
27752ca653d6SCaleb Raitto 	cpumask_var_t mask;
27762ca653d6SCaleb Raitto 	int stragglers;
27772ca653d6SCaleb Raitto 	int group_size;
27782ca653d6SCaleb Raitto 	int i, j, cpu;
27792ca653d6SCaleb Raitto 	int num_cpu;
27802ca653d6SCaleb Raitto 	int stride;
2781986a4f4dSJason Wang 
27822ca653d6SCaleb Raitto 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2783310974faSPeter Xu 		virtnet_clean_affinity(vi);
2784986a4f4dSJason Wang 		return;
2785986a4f4dSJason Wang 	}
2786986a4f4dSJason Wang 
27872ca653d6SCaleb Raitto 	num_cpu = num_online_cpus();
27882ca653d6SCaleb Raitto 	stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
27892ca653d6SCaleb Raitto 	stragglers = num_cpu >= vi->curr_queue_pairs ?
27902ca653d6SCaleb Raitto 			num_cpu % vi->curr_queue_pairs :
27912ca653d6SCaleb Raitto 			0;
27929b51d9d8SYury Norov 	cpu = cpumask_first(cpu_online_mask);
27934d99f660SAndrei Vagin 
27942ca653d6SCaleb Raitto 	for (i = 0; i < vi->curr_queue_pairs; i++) {
27952ca653d6SCaleb Raitto 		group_size = stride + (i < stragglers ? 1 : 0);
27962ca653d6SCaleb Raitto 
27972ca653d6SCaleb Raitto 		for (j = 0; j < group_size; j++) {
27982ca653d6SCaleb Raitto 			cpumask_set_cpu(cpu, mask);
27992ca653d6SCaleb Raitto 			cpu = cpumask_next_wrap(cpu, cpu_online_mask,
28002ca653d6SCaleb Raitto 						nr_cpu_ids, false);
28012ca653d6SCaleb Raitto 		}
28022ca653d6SCaleb Raitto 		virtqueue_set_affinity(vi->rq[i].vq, mask);
28032ca653d6SCaleb Raitto 		virtqueue_set_affinity(vi->sq[i].vq, mask);
2804044ab86dSAntoine Tenart 		__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
28052ca653d6SCaleb Raitto 		cpumask_clear(mask);
2806986a4f4dSJason Wang 	}
2807986a4f4dSJason Wang 
2808986a4f4dSJason Wang 	vi->affinity_hint_set = true;
28092ca653d6SCaleb Raitto 	free_cpumask_var(mask);
281047be2479SWanlong Gao }
2811986a4f4dSJason Wang 
28128017c279SSebastian Andrzej Siewior static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
28138de4b2f3SWanlong Gao {
28148017c279SSebastian Andrzej Siewior 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
28158017c279SSebastian Andrzej Siewior 						   node);
28168de4b2f3SWanlong Gao 	virtnet_set_affinity(vi);
28178017c279SSebastian Andrzej Siewior 	return 0;
28188de4b2f3SWanlong Gao }
28193ab098dfSJason Wang 
28208017c279SSebastian Andrzej Siewior static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
28218017c279SSebastian Andrzej Siewior {
28228017c279SSebastian Andrzej Siewior 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
28238017c279SSebastian Andrzej Siewior 						   node_dead);
28248017c279SSebastian Andrzej Siewior 	virtnet_set_affinity(vi);
28258017c279SSebastian Andrzej Siewior 	return 0;
28268017c279SSebastian Andrzej Siewior }
28278017c279SSebastian Andrzej Siewior 
28288017c279SSebastian Andrzej Siewior static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
28298017c279SSebastian Andrzej Siewior {
28308017c279SSebastian Andrzej Siewior 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
28318017c279SSebastian Andrzej Siewior 						   node);
28328017c279SSebastian Andrzej Siewior 
2833310974faSPeter Xu 	virtnet_clean_affinity(vi);
28348017c279SSebastian Andrzej Siewior 	return 0;
28358017c279SSebastian Andrzej Siewior }
28368017c279SSebastian Andrzej Siewior 
28378017c279SSebastian Andrzej Siewior static enum cpuhp_state virtionet_online;
28388017c279SSebastian Andrzej Siewior 
28398017c279SSebastian Andrzej Siewior static int virtnet_cpu_notif_add(struct virtnet_info *vi)
28408017c279SSebastian Andrzej Siewior {
28418017c279SSebastian Andrzej Siewior 	int ret;
28428017c279SSebastian Andrzej Siewior 
28438017c279SSebastian Andrzej Siewior 	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
28448017c279SSebastian Andrzej Siewior 	if (ret)
28458017c279SSebastian Andrzej Siewior 		return ret;
28468017c279SSebastian Andrzej Siewior 	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
28478017c279SSebastian Andrzej Siewior 					       &vi->node_dead);
28488017c279SSebastian Andrzej Siewior 	if (!ret)
28498017c279SSebastian Andrzej Siewior 		return ret;
28508017c279SSebastian Andrzej Siewior 	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
28518017c279SSebastian Andrzej Siewior 	return ret;
28528017c279SSebastian Andrzej Siewior }
28538017c279SSebastian Andrzej Siewior 
28548017c279SSebastian Andrzej Siewior static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
28558017c279SSebastian Andrzej Siewior {
28568017c279SSebastian Andrzej Siewior 	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
28578017c279SSebastian Andrzej Siewior 	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
28588017c279SSebastian Andrzej Siewior 					    &vi->node_dead);
2859a9ea3fc6SHerbert Xu }
2860a9ea3fc6SHerbert Xu 
28618f9f4668SRick Jones static void virtnet_get_ringparam(struct net_device *dev,
286274624944SHao Chen 				  struct ethtool_ringparam *ring,
286374624944SHao Chen 				  struct kernel_ethtool_ringparam *kernel_ring,
286474624944SHao Chen 				  struct netlink_ext_ack *extack)
28658f9f4668SRick Jones {
28668f9f4668SRick Jones 	struct virtnet_info *vi = netdev_priv(dev);
28678f9f4668SRick Jones 
28688597b5ddSXuan Zhuo 	ring->rx_max_pending = vi->rq[0].vq->num_max;
28698597b5ddSXuan Zhuo 	ring->tx_max_pending = vi->sq[0].vq->num_max;
28708597b5ddSXuan Zhuo 	ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
28718597b5ddSXuan Zhuo 	ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
28728f9f4668SRick Jones }
28738f9f4668SRick Jones 
287439d591b0SHeng Qi static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
287539d591b0SHeng Qi 					 u16 vqn, u32 max_usecs, u32 max_packets);
287639d591b0SHeng Qi 
2877a335b33fSXuan Zhuo static int virtnet_set_ringparam(struct net_device *dev,
2878a335b33fSXuan Zhuo 				 struct ethtool_ringparam *ring,
2879a335b33fSXuan Zhuo 				 struct kernel_ethtool_ringparam *kernel_ring,
2880a335b33fSXuan Zhuo 				 struct netlink_ext_ack *extack)
2881a335b33fSXuan Zhuo {
2882a335b33fSXuan Zhuo 	struct virtnet_info *vi = netdev_priv(dev);
2883a335b33fSXuan Zhuo 	u32 rx_pending, tx_pending;
2884a335b33fSXuan Zhuo 	struct receive_queue *rq;
2885a335b33fSXuan Zhuo 	struct send_queue *sq;
2886a335b33fSXuan Zhuo 	int i, err;
2887a335b33fSXuan Zhuo 
2888a335b33fSXuan Zhuo 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2889a335b33fSXuan Zhuo 		return -EINVAL;
2890a335b33fSXuan Zhuo 
2891a335b33fSXuan Zhuo 	rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2892a335b33fSXuan Zhuo 	tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2893a335b33fSXuan Zhuo 
2894a335b33fSXuan Zhuo 	if (ring->rx_pending == rx_pending &&
2895a335b33fSXuan Zhuo 	    ring->tx_pending == tx_pending)
2896a335b33fSXuan Zhuo 		return 0;
2897a335b33fSXuan Zhuo 
2898a335b33fSXuan Zhuo 	if (ring->rx_pending > vi->rq[0].vq->num_max)
2899a335b33fSXuan Zhuo 		return -EINVAL;
2900a335b33fSXuan Zhuo 
2901a335b33fSXuan Zhuo 	if (ring->tx_pending > vi->sq[0].vq->num_max)
2902a335b33fSXuan Zhuo 		return -EINVAL;
2903a335b33fSXuan Zhuo 
2904a335b33fSXuan Zhuo 	for (i = 0; i < vi->max_queue_pairs; i++) {
2905a335b33fSXuan Zhuo 		rq = vi->rq + i;
2906a335b33fSXuan Zhuo 		sq = vi->sq + i;
2907a335b33fSXuan Zhuo 
2908a335b33fSXuan Zhuo 		if (ring->tx_pending != tx_pending) {
2909a335b33fSXuan Zhuo 			err = virtnet_tx_resize(vi, sq, ring->tx_pending);
2910a335b33fSXuan Zhuo 			if (err)
2911a335b33fSXuan Zhuo 				return err;
291239d591b0SHeng Qi 
291339d591b0SHeng Qi 			/* Upon disabling and re-enabling a transmit virtqueue, the device must
291439d591b0SHeng Qi 			 * set the coalescing parameters of the virtqueue to those configured
291539d591b0SHeng Qi 			 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
291639d591b0SHeng Qi 			 * did not set any TX coalescing parameters, to 0.
291739d591b0SHeng Qi 			 */
291839d591b0SHeng Qi 			err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i),
291939d591b0SHeng Qi 							    vi->intr_coal_tx.max_usecs,
292039d591b0SHeng Qi 							    vi->intr_coal_tx.max_packets);
292139d591b0SHeng Qi 			if (err)
292239d591b0SHeng Qi 				return err;
292339d591b0SHeng Qi 
292439d591b0SHeng Qi 			vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs;
292539d591b0SHeng Qi 			vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets;
2926a335b33fSXuan Zhuo 		}
2927a335b33fSXuan Zhuo 
2928a335b33fSXuan Zhuo 		if (ring->rx_pending != rx_pending) {
2929a335b33fSXuan Zhuo 			err = virtnet_rx_resize(vi, rq, ring->rx_pending);
2930a335b33fSXuan Zhuo 			if (err)
2931a335b33fSXuan Zhuo 				return err;
293239d591b0SHeng Qi 
293339d591b0SHeng Qi 			/* The reason is same as the transmit virtqueue reset */
293439d591b0SHeng Qi 			err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i),
293539d591b0SHeng Qi 							    vi->intr_coal_rx.max_usecs,
293639d591b0SHeng Qi 							    vi->intr_coal_rx.max_packets);
293739d591b0SHeng Qi 			if (err)
293839d591b0SHeng Qi 				return err;
293939d591b0SHeng Qi 
294039d591b0SHeng Qi 			vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs;
294139d591b0SHeng Qi 			vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets;
2942a335b33fSXuan Zhuo 		}
2943a335b33fSXuan Zhuo 	}
2944a335b33fSXuan Zhuo 
2945a335b33fSXuan Zhuo 	return 0;
2946a9ea3fc6SHerbert Xu }
2947a9ea3fc6SHerbert Xu 
2948c7114b12SAndrew Melnychenko static bool virtnet_commit_rss_command(struct virtnet_info *vi)
2949c7114b12SAndrew Melnychenko {
2950c7114b12SAndrew Melnychenko 	struct net_device *dev = vi->dev;
2951c7114b12SAndrew Melnychenko 	struct scatterlist sgs[4];
2952c7114b12SAndrew Melnychenko 	unsigned int sg_buf_size;
2953c7114b12SAndrew Melnychenko 
2954c7114b12SAndrew Melnychenko 	/* prepare sgs */
2955c7114b12SAndrew Melnychenko 	sg_init_table(sgs, 4);
2956c7114b12SAndrew Melnychenko 
2957c7114b12SAndrew Melnychenko 	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
2958c7114b12SAndrew Melnychenko 	sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
2959c7114b12SAndrew Melnychenko 
2960c7114b12SAndrew Melnychenko 	sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
2961c7114b12SAndrew Melnychenko 	sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
2962c7114b12SAndrew Melnychenko 
2963c7114b12SAndrew Melnychenko 	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
2964c7114b12SAndrew Melnychenko 			- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
2965c7114b12SAndrew Melnychenko 	sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
2966c7114b12SAndrew Melnychenko 
2967c7114b12SAndrew Melnychenko 	sg_buf_size = vi->rss_key_size;
2968c7114b12SAndrew Melnychenko 	sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
2969c7114b12SAndrew Melnychenko 
2970c7114b12SAndrew Melnychenko 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
297191f41f01SAndrew Melnychenko 				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
297291f41f01SAndrew Melnychenko 				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
2973c7114b12SAndrew Melnychenko 		dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
2974c7114b12SAndrew Melnychenko 		return false;
2975c7114b12SAndrew Melnychenko 	}
2976c7114b12SAndrew Melnychenko 	return true;
2977c7114b12SAndrew Melnychenko }
2978c7114b12SAndrew Melnychenko 
2979c7114b12SAndrew Melnychenko static void virtnet_init_default_rss(struct virtnet_info *vi)
2980c7114b12SAndrew Melnychenko {
2981c7114b12SAndrew Melnychenko 	u32 indir_val = 0;
2982c7114b12SAndrew Melnychenko 	int i = 0;
2983c7114b12SAndrew Melnychenko 
2984c7114b12SAndrew Melnychenko 	vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
2985c1170820SAndrew Melnychenko 	vi->rss_hash_types_saved = vi->rss_hash_types_supported;
2986c7114b12SAndrew Melnychenko 	vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
2987c7114b12SAndrew Melnychenko 						? vi->rss_indir_table_size - 1 : 0;
2988c7114b12SAndrew Melnychenko 	vi->ctrl->rss.unclassified_queue = 0;
2989c7114b12SAndrew Melnychenko 
2990c7114b12SAndrew Melnychenko 	for (; i < vi->rss_indir_table_size; ++i) {
2991c7114b12SAndrew Melnychenko 		indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
2992c7114b12SAndrew Melnychenko 		vi->ctrl->rss.indirection_table[i] = indir_val;
2993c7114b12SAndrew Melnychenko 	}
2994c7114b12SAndrew Melnychenko 
29952c507ce9SHawkins Jiawei 	vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
2996c7114b12SAndrew Melnychenko 	vi->ctrl->rss.hash_key_length = vi->rss_key_size;
2997c7114b12SAndrew Melnychenko 
2998c7114b12SAndrew Melnychenko 	netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
2999c7114b12SAndrew Melnychenko }
3000c7114b12SAndrew Melnychenko 
3001c1170820SAndrew Melnychenko static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
3002c1170820SAndrew Melnychenko {
3003c1170820SAndrew Melnychenko 	info->data = 0;
3004c1170820SAndrew Melnychenko 	switch (info->flow_type) {
3005c1170820SAndrew Melnychenko 	case TCP_V4_FLOW:
3006c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
3007c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST |
3008c1170820SAndrew Melnychenko 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3009c1170820SAndrew Melnychenko 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3010c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
3011c1170820SAndrew Melnychenko 		}
3012c1170820SAndrew Melnychenko 		break;
3013c1170820SAndrew Melnychenko 	case TCP_V6_FLOW:
3014c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
3015c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST |
3016c1170820SAndrew Melnychenko 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3017c1170820SAndrew Melnychenko 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3018c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
3019c1170820SAndrew Melnychenko 		}
3020c1170820SAndrew Melnychenko 		break;
3021c1170820SAndrew Melnychenko 	case UDP_V4_FLOW:
3022c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
3023c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST |
3024c1170820SAndrew Melnychenko 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3025c1170820SAndrew Melnychenko 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3026c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
3027c1170820SAndrew Melnychenko 		}
3028c1170820SAndrew Melnychenko 		break;
3029c1170820SAndrew Melnychenko 	case UDP_V6_FLOW:
3030c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
3031c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST |
3032c1170820SAndrew Melnychenko 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3033c1170820SAndrew Melnychenko 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3034c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
3035c1170820SAndrew Melnychenko 		}
3036c1170820SAndrew Melnychenko 		break;
3037c1170820SAndrew Melnychenko 	case IPV4_FLOW:
3038c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
3039c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
3040c1170820SAndrew Melnychenko 
3041c1170820SAndrew Melnychenko 		break;
3042c1170820SAndrew Melnychenko 	case IPV6_FLOW:
3043c1170820SAndrew Melnychenko 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3044c1170820SAndrew Melnychenko 			info->data = RXH_IP_SRC | RXH_IP_DST;
3045c1170820SAndrew Melnychenko 
3046c1170820SAndrew Melnychenko 		break;
3047c1170820SAndrew Melnychenko 	default:
3048c1170820SAndrew Melnychenko 		info->data = 0;
3049c1170820SAndrew Melnychenko 		break;
3050c1170820SAndrew Melnychenko 	}
3051c1170820SAndrew Melnychenko }
3052c1170820SAndrew Melnychenko 
3053c1170820SAndrew Melnychenko static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3054c1170820SAndrew Melnychenko {
3055c1170820SAndrew Melnychenko 	u32 new_hashtypes = vi->rss_hash_types_saved;
3056c1170820SAndrew Melnychenko 	bool is_disable = info->data & RXH_DISCARD;
3057c1170820SAndrew Melnychenko 	bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
3058c1170820SAndrew Melnychenko 
3059c1170820SAndrew Melnychenko 	/* supports only 'sd', 'sdfn' and 'r' */
3060c1170820SAndrew Melnychenko 	if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
3061c1170820SAndrew Melnychenko 		return false;
3062c1170820SAndrew Melnychenko 
3063c1170820SAndrew Melnychenko 	switch (info->flow_type) {
3064c1170820SAndrew Melnychenko 	case TCP_V4_FLOW:
3065c1170820SAndrew Melnychenko 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
3066c1170820SAndrew Melnychenko 		if (!is_disable)
3067c1170820SAndrew Melnychenko 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3068c1170820SAndrew Melnychenko 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
3069c1170820SAndrew Melnychenko 		break;
3070c1170820SAndrew Melnychenko 	case UDP_V4_FLOW:
3071c1170820SAndrew Melnychenko 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
3072c1170820SAndrew Melnychenko 		if (!is_disable)
3073c1170820SAndrew Melnychenko 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3074c1170820SAndrew Melnychenko 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
3075c1170820SAndrew Melnychenko 		break;
3076c1170820SAndrew Melnychenko 	case IPV4_FLOW:
3077c1170820SAndrew Melnychenko 		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3078c1170820SAndrew Melnychenko 		if (!is_disable)
3079c1170820SAndrew Melnychenko 			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3080c1170820SAndrew Melnychenko 		break;
3081c1170820SAndrew Melnychenko 	case TCP_V6_FLOW:
3082c1170820SAndrew Melnychenko 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
3083c1170820SAndrew Melnychenko 		if (!is_disable)
3084c1170820SAndrew Melnychenko 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3085c1170820SAndrew Melnychenko 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
3086c1170820SAndrew Melnychenko 		break;
3087c1170820SAndrew Melnychenko 	case UDP_V6_FLOW:
3088c1170820SAndrew Melnychenko 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
3089c1170820SAndrew Melnychenko 		if (!is_disable)
3090c1170820SAndrew Melnychenko 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3091c1170820SAndrew Melnychenko 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
3092c1170820SAndrew Melnychenko 		break;
3093c1170820SAndrew Melnychenko 	case IPV6_FLOW:
3094c1170820SAndrew Melnychenko 		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3095c1170820SAndrew Melnychenko 		if (!is_disable)
3096c1170820SAndrew Melnychenko 			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3097c1170820SAndrew Melnychenko 		break;
3098c1170820SAndrew Melnychenko 	default:
3099c1170820SAndrew Melnychenko 		/* unsupported flow */
3100c1170820SAndrew Melnychenko 		return false;
3101c1170820SAndrew Melnychenko 	}
3102c1170820SAndrew Melnychenko 
3103c1170820SAndrew Melnychenko 	/* if unsupported hashtype was set */
3104c1170820SAndrew Melnychenko 	if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3105c1170820SAndrew Melnychenko 		return false;
3106c1170820SAndrew Melnychenko 
3107c1170820SAndrew Melnychenko 	if (new_hashtypes != vi->rss_hash_types_saved) {
3108c1170820SAndrew Melnychenko 		vi->rss_hash_types_saved = new_hashtypes;
3109c1170820SAndrew Melnychenko 		vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3110c1170820SAndrew Melnychenko 		if (vi->dev->features & NETIF_F_RXHASH)
3111c1170820SAndrew Melnychenko 			return virtnet_commit_rss_command(vi);
3112c1170820SAndrew Melnychenko 	}
3113c1170820SAndrew Melnychenko 
3114c1170820SAndrew Melnychenko 	return true;
3115c1170820SAndrew Melnychenko }
311666846048SRick Jones 
311766846048SRick Jones static void virtnet_get_drvinfo(struct net_device *dev,
311866846048SRick Jones 				struct ethtool_drvinfo *info)
311966846048SRick Jones {
312066846048SRick Jones 	struct virtnet_info *vi = netdev_priv(dev);
312166846048SRick Jones 	struct virtio_device *vdev = vi->vdev;
312266846048SRick Jones 
3123fb3ceec1SWolfram Sang 	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
3124fb3ceec1SWolfram Sang 	strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
3125fb3ceec1SWolfram Sang 	strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
312666846048SRick Jones 
312766846048SRick Jones }
312866846048SRick Jones 
3129d73bcd2cSJason Wang /* TODO: Eliminate OOO packets during switching */
3130d73bcd2cSJason Wang static int virtnet_set_channels(struct net_device *dev,
3131d73bcd2cSJason Wang 				struct ethtool_channels *channels)
3132d73bcd2cSJason Wang {
3133d73bcd2cSJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
3134d73bcd2cSJason Wang 	u16 queue_pairs = channels->combined_count;
3135d73bcd2cSJason Wang 	int err;
3136d73bcd2cSJason Wang 
3137d73bcd2cSJason Wang 	/* We don't support separate rx/tx channels.
3138d73bcd2cSJason Wang 	 * We don't allow setting 'other' channels.
3139d73bcd2cSJason Wang 	 */
3140d73bcd2cSJason Wang 	if (channels->rx_count || channels->tx_count || channels->other_count)
3141d73bcd2cSJason Wang 		return -EINVAL;
3142d73bcd2cSJason Wang 
3143c18e9cd6SAmos Kong 	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3144d73bcd2cSJason Wang 		return -EINVAL;
3145d73bcd2cSJason Wang 
3146f600b690SJohn Fastabend 	/* For now we don't support modifying channels while XDP is loaded
3147f600b690SJohn Fastabend 	 * also when XDP is loaded all RX queues have XDP programs so we only
3148f600b690SJohn Fastabend 	 * need to check a single RX queue.
3149f600b690SJohn Fastabend 	 */
3150f600b690SJohn Fastabend 	if (vi->rq[0].xdp_prog)
3151f600b690SJohn Fastabend 		return -EINVAL;
3152f600b690SJohn Fastabend 
3153a0d1d0f4SSebastian Andrzej Siewior 	cpus_read_lock();
315447315329SJohn Fastabend 	err = _virtnet_set_queues(vi, queue_pairs);
3155de33212fSJeff Dike 	if (err) {
3156a0d1d0f4SSebastian Andrzej Siewior 		cpus_read_unlock();
3157de33212fSJeff Dike 		goto err;
3158d73bcd2cSJason Wang 	}
3159de33212fSJeff Dike 	virtnet_set_affinity(vi);
3160a0d1d0f4SSebastian Andrzej Siewior 	cpus_read_unlock();
3161d73bcd2cSJason Wang 
3162de33212fSJeff Dike 	netif_set_real_num_tx_queues(dev, queue_pairs);
3163de33212fSJeff Dike 	netif_set_real_num_rx_queues(dev, queue_pairs);
3164de33212fSJeff Dike  err:
3165d73bcd2cSJason Wang 	return err;
3166d73bcd2cSJason Wang }
3167d73bcd2cSJason Wang 
3168d7dfc5cfSToshiaki Makita static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3169d7dfc5cfSToshiaki Makita {
3170d7dfc5cfSToshiaki Makita 	struct virtnet_info *vi = netdev_priv(dev);
3171d7dfc5cfSToshiaki Makita 	unsigned int i, j;
3172d7a9a01bSAlexander Duyck 	u8 *p = data;
3173d7dfc5cfSToshiaki Makita 
3174d7dfc5cfSToshiaki Makita 	switch (stringset) {
3175d7dfc5cfSToshiaki Makita 	case ETH_SS_STATS:
3176d7dfc5cfSToshiaki Makita 		for (i = 0; i < vi->curr_queue_pairs; i++) {
3177d7a9a01bSAlexander Duyck 			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
3178d7a9a01bSAlexander Duyck 				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
3179d7a9a01bSAlexander Duyck 						virtnet_rq_stats_desc[j].desc);
3180d7dfc5cfSToshiaki Makita 		}
3181d7dfc5cfSToshiaki Makita 
3182d7dfc5cfSToshiaki Makita 		for (i = 0; i < vi->curr_queue_pairs; i++) {
3183d7a9a01bSAlexander Duyck 			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
3184d7a9a01bSAlexander Duyck 				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
3185d7a9a01bSAlexander Duyck 						virtnet_sq_stats_desc[j].desc);
3186d7dfc5cfSToshiaki Makita 		}
3187d7dfc5cfSToshiaki Makita 		break;
3188d7dfc5cfSToshiaki Makita 	}
3189d7dfc5cfSToshiaki Makita }
3190d7dfc5cfSToshiaki Makita 
3191d7dfc5cfSToshiaki Makita static int virtnet_get_sset_count(struct net_device *dev, int sset)
3192d7dfc5cfSToshiaki Makita {
3193d7dfc5cfSToshiaki Makita 	struct virtnet_info *vi = netdev_priv(dev);
3194d7dfc5cfSToshiaki Makita 
3195d7dfc5cfSToshiaki Makita 	switch (sset) {
3196d7dfc5cfSToshiaki Makita 	case ETH_SS_STATS:
3197d7dfc5cfSToshiaki Makita 		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
3198d7dfc5cfSToshiaki Makita 					       VIRTNET_SQ_STATS_LEN);
3199d7dfc5cfSToshiaki Makita 	default:
3200d7dfc5cfSToshiaki Makita 		return -EOPNOTSUPP;
3201d7dfc5cfSToshiaki Makita 	}
3202d7dfc5cfSToshiaki Makita }
3203d7dfc5cfSToshiaki Makita 
3204d7dfc5cfSToshiaki Makita static void virtnet_get_ethtool_stats(struct net_device *dev,
3205d7dfc5cfSToshiaki Makita 				      struct ethtool_stats *stats, u64 *data)
3206d7dfc5cfSToshiaki Makita {
3207d7dfc5cfSToshiaki Makita 	struct virtnet_info *vi = netdev_priv(dev);
3208d7dfc5cfSToshiaki Makita 	unsigned int idx = 0, start, i, j;
3209d7dfc5cfSToshiaki Makita 	const u8 *stats_base;
321027debe3eSEric Dumazet 	const u64_stats_t *p;
3211d7dfc5cfSToshiaki Makita 	size_t offset;
3212d7dfc5cfSToshiaki Makita 
3213d7dfc5cfSToshiaki Makita 	for (i = 0; i < vi->curr_queue_pairs; i++) {
3214d7dfc5cfSToshiaki Makita 		struct receive_queue *rq = &vi->rq[i];
3215d7dfc5cfSToshiaki Makita 
321627debe3eSEric Dumazet 		stats_base = (const u8 *)&rq->stats;
3217d7dfc5cfSToshiaki Makita 		do {
3218068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&rq->stats.syncp);
3219d7dfc5cfSToshiaki Makita 			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
3220d7dfc5cfSToshiaki Makita 				offset = virtnet_rq_stats_desc[j].offset;
322127debe3eSEric Dumazet 				p = (const u64_stats_t *)(stats_base + offset);
322227debe3eSEric Dumazet 				data[idx + j] = u64_stats_read(p);
3223d7dfc5cfSToshiaki Makita 			}
3224068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3225d7dfc5cfSToshiaki Makita 		idx += VIRTNET_RQ_STATS_LEN;
3226d7dfc5cfSToshiaki Makita 	}
3227d7dfc5cfSToshiaki Makita 
3228d7dfc5cfSToshiaki Makita 	for (i = 0; i < vi->curr_queue_pairs; i++) {
3229d7dfc5cfSToshiaki Makita 		struct send_queue *sq = &vi->sq[i];
3230d7dfc5cfSToshiaki Makita 
323127debe3eSEric Dumazet 		stats_base = (const u8 *)&sq->stats;
3232d7dfc5cfSToshiaki Makita 		do {
3233068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&sq->stats.syncp);
3234d7dfc5cfSToshiaki Makita 			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
3235d7dfc5cfSToshiaki Makita 				offset = virtnet_sq_stats_desc[j].offset;
323627debe3eSEric Dumazet 				p = (const u64_stats_t *)(stats_base + offset);
323727debe3eSEric Dumazet 				data[idx + j] = u64_stats_read(p);
3238d7dfc5cfSToshiaki Makita 			}
3239068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3240d7dfc5cfSToshiaki Makita 		idx += VIRTNET_SQ_STATS_LEN;
3241d7dfc5cfSToshiaki Makita 	}
3242d7dfc5cfSToshiaki Makita }
3243d7dfc5cfSToshiaki Makita 
3244d73bcd2cSJason Wang static void virtnet_get_channels(struct net_device *dev,
3245d73bcd2cSJason Wang 				 struct ethtool_channels *channels)
3246d73bcd2cSJason Wang {
3247d73bcd2cSJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
3248d73bcd2cSJason Wang 
3249d73bcd2cSJason Wang 	channels->combined_count = vi->curr_queue_pairs;
3250d73bcd2cSJason Wang 	channels->max_combined = vi->max_queue_pairs;
3251d73bcd2cSJason Wang 	channels->max_other = 0;
3252d73bcd2cSJason Wang 	channels->rx_count = 0;
3253d73bcd2cSJason Wang 	channels->tx_count = 0;
3254d73bcd2cSJason Wang 	channels->other_count = 0;
3255d73bcd2cSJason Wang }
3256d73bcd2cSJason Wang 
3257ebb6b4b1SPhilippe Reynes static int virtnet_set_link_ksettings(struct net_device *dev,
3258ebb6b4b1SPhilippe Reynes 				      const struct ethtool_link_ksettings *cmd)
325916032be5SNikolay Aleksandrov {
326016032be5SNikolay Aleksandrov 	struct virtnet_info *vi = netdev_priv(dev);
326116032be5SNikolay Aleksandrov 
32629aedc6e2SCris Forno 	return ethtool_virtdev_set_link_ksettings(dev, cmd,
32639aedc6e2SCris Forno 						  &vi->speed, &vi->duplex);
326416032be5SNikolay Aleksandrov }
326516032be5SNikolay Aleksandrov 
3266ebb6b4b1SPhilippe Reynes static int virtnet_get_link_ksettings(struct net_device *dev,
3267ebb6b4b1SPhilippe Reynes 				      struct ethtool_link_ksettings *cmd)
326816032be5SNikolay Aleksandrov {
326916032be5SNikolay Aleksandrov 	struct virtnet_info *vi = netdev_priv(dev);
327016032be5SNikolay Aleksandrov 
3271ebb6b4b1SPhilippe Reynes 	cmd->base.speed = vi->speed;
3272ebb6b4b1SPhilippe Reynes 	cmd->base.duplex = vi->duplex;
3273ebb6b4b1SPhilippe Reynes 	cmd->base.port = PORT_OTHER;
327416032be5SNikolay Aleksandrov 
327516032be5SNikolay Aleksandrov 	return 0;
327616032be5SNikolay Aleksandrov }
327716032be5SNikolay Aleksandrov 
3278699b045aSAlvaro Karsz static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3279699b045aSAlvaro Karsz 				       struct ethtool_coalesce *ec)
3280699b045aSAlvaro Karsz {
3281699b045aSAlvaro Karsz 	struct scatterlist sgs_tx, sgs_rx;
3282829cce76SHeng Qi 	int i;
3283699b045aSAlvaro Karsz 
3284accc1bf2SBrett Creeley 	vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3285accc1bf2SBrett Creeley 	vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3286accc1bf2SBrett Creeley 	sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
3287699b045aSAlvaro Karsz 
3288699b045aSAlvaro Karsz 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3289699b045aSAlvaro Karsz 				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
3290699b045aSAlvaro Karsz 				  &sgs_tx))
3291699b045aSAlvaro Karsz 		return -EINVAL;
3292699b045aSAlvaro Karsz 
3293699b045aSAlvaro Karsz 	/* Save parameters */
3294308d7982SGavin Li 	vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
3295308d7982SGavin Li 	vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
3296829cce76SHeng Qi 	for (i = 0; i < vi->max_queue_pairs; i++) {
3297829cce76SHeng Qi 		vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3298829cce76SHeng Qi 		vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3299829cce76SHeng Qi 	}
3300699b045aSAlvaro Karsz 
3301accc1bf2SBrett Creeley 	vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3302accc1bf2SBrett Creeley 	vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3303accc1bf2SBrett Creeley 	sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3304699b045aSAlvaro Karsz 
3305699b045aSAlvaro Karsz 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3306699b045aSAlvaro Karsz 				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
3307699b045aSAlvaro Karsz 				  &sgs_rx))
3308699b045aSAlvaro Karsz 		return -EINVAL;
3309699b045aSAlvaro Karsz 
3310699b045aSAlvaro Karsz 	/* Save parameters */
3311308d7982SGavin Li 	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
3312308d7982SGavin Li 	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
3313829cce76SHeng Qi 	for (i = 0; i < vi->max_queue_pairs; i++) {
3314829cce76SHeng Qi 		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3315829cce76SHeng Qi 		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3316829cce76SHeng Qi 	}
3317699b045aSAlvaro Karsz 
3318699b045aSAlvaro Karsz 	return 0;
3319699b045aSAlvaro Karsz }
3320699b045aSAlvaro Karsz 
3321394bd877SGavin Li static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3322394bd877SGavin Li 					 u16 vqn, u32 max_usecs, u32 max_packets)
3323394bd877SGavin Li {
3324394bd877SGavin Li 	struct scatterlist sgs;
3325394bd877SGavin Li 
3326394bd877SGavin Li 	vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
3327394bd877SGavin Li 	vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
3328394bd877SGavin Li 	vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
3329394bd877SGavin Li 	sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
3330394bd877SGavin Li 
3331394bd877SGavin Li 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3332394bd877SGavin Li 				  VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
3333394bd877SGavin Li 				  &sgs))
3334394bd877SGavin Li 		return -EINVAL;
3335394bd877SGavin Li 
3336394bd877SGavin Li 	return 0;
3337394bd877SGavin Li }
3338394bd877SGavin Li 
3339394bd877SGavin Li static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3340394bd877SGavin Li 					  struct ethtool_coalesce *ec,
3341394bd877SGavin Li 					  u16 queue)
3342394bd877SGavin Li {
3343394bd877SGavin Li 	int err;
3344394bd877SGavin Li 
3345394bd877SGavin Li 	err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
3346394bd877SGavin Li 					    ec->rx_coalesce_usecs,
3347394bd877SGavin Li 					    ec->rx_max_coalesced_frames);
3348394bd877SGavin Li 	if (err)
3349394bd877SGavin Li 		return err;
33507c1453abSHeng Qi 
3351394bd877SGavin Li 	vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3352394bd877SGavin Li 	vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3353394bd877SGavin Li 
3354394bd877SGavin Li 	err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
3355394bd877SGavin Li 					    ec->tx_coalesce_usecs,
3356394bd877SGavin Li 					    ec->tx_max_coalesced_frames);
3357394bd877SGavin Li 	if (err)
3358394bd877SGavin Li 		return err;
33597c1453abSHeng Qi 
3360394bd877SGavin Li 	vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3361394bd877SGavin Li 	vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3362394bd877SGavin Li 
3363394bd877SGavin Li 	return 0;
3364394bd877SGavin Li }
3365394bd877SGavin Li 
3366699b045aSAlvaro Karsz static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
3367699b045aSAlvaro Karsz {
3368699b045aSAlvaro Karsz 	/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3369699b045aSAlvaro Karsz 	 * feature is negotiated.
3370699b045aSAlvaro Karsz 	 */
3371699b045aSAlvaro Karsz 	if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
3372699b045aSAlvaro Karsz 		return -EOPNOTSUPP;
3373699b045aSAlvaro Karsz 
3374699b045aSAlvaro Karsz 	if (ec->tx_max_coalesced_frames > 1 ||
3375699b045aSAlvaro Karsz 	    ec->rx_max_coalesced_frames != 1)
3376699b045aSAlvaro Karsz 		return -EINVAL;
3377699b045aSAlvaro Karsz 
3378699b045aSAlvaro Karsz 	return 0;
3379699b045aSAlvaro Karsz }
3380699b045aSAlvaro Karsz 
3381394bd877SGavin Li static int virtnet_should_update_vq_weight(int dev_flags, int weight,
3382394bd877SGavin Li 					   int vq_weight, bool *should_update)
3383394bd877SGavin Li {
3384394bd877SGavin Li 	if (weight ^ vq_weight) {
3385394bd877SGavin Li 		if (dev_flags & IFF_UP)
3386394bd877SGavin Li 			return -EBUSY;
3387394bd877SGavin Li 		*should_update = true;
3388394bd877SGavin Li 	}
3389394bd877SGavin Li 
3390394bd877SGavin Li 	return 0;
3391394bd877SGavin Li }
3392394bd877SGavin Li 
33930c465be1SJason Wang static int virtnet_set_coalesce(struct net_device *dev,
3394f3ccfda1SYufeng Mo 				struct ethtool_coalesce *ec,
3395f3ccfda1SYufeng Mo 				struct kernel_ethtool_coalesce *kernel_coal,
3396f3ccfda1SYufeng Mo 				struct netlink_ext_ack *extack)
33970c465be1SJason Wang {
33980c465be1SJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
3399394bd877SGavin Li 	int ret, queue_number, napi_weight;
3400699b045aSAlvaro Karsz 	bool update_napi = false;
34010c465be1SJason Wang 
3402699b045aSAlvaro Karsz 	/* Can't change NAPI weight if the link is up */
34030c465be1SJason Wang 	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3404394bd877SGavin Li 	for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
3405394bd877SGavin Li 		ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3406394bd877SGavin Li 						      vi->sq[queue_number].napi.weight,
3407394bd877SGavin Li 						      &update_napi);
3408394bd877SGavin Li 		if (ret)
3409394bd877SGavin Li 			return ret;
3410394bd877SGavin Li 
3411394bd877SGavin Li 		if (update_napi) {
3412394bd877SGavin Li 			/* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3413394bd877SGavin Li 			 * updated for the sake of simplicity, which might not be necessary
3414394bd877SGavin Li 			 */
3415394bd877SGavin Li 			break;
3416394bd877SGavin Li 		}
3417699b045aSAlvaro Karsz 	}
3418699b045aSAlvaro Karsz 
3419699b045aSAlvaro Karsz 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3420699b045aSAlvaro Karsz 		ret = virtnet_send_notf_coal_cmds(vi, ec);
3421699b045aSAlvaro Karsz 	else
3422699b045aSAlvaro Karsz 		ret = virtnet_coal_params_supported(ec);
3423699b045aSAlvaro Karsz 
3424699b045aSAlvaro Karsz 	if (ret)
3425699b045aSAlvaro Karsz 		return ret;
3426699b045aSAlvaro Karsz 
3427699b045aSAlvaro Karsz 	if (update_napi) {
3428394bd877SGavin Li 		for (; queue_number < vi->max_queue_pairs; queue_number++)
3429394bd877SGavin Li 			vi->sq[queue_number].napi.weight = napi_weight;
34300c465be1SJason Wang 	}
34310c465be1SJason Wang 
3432699b045aSAlvaro Karsz 	return ret;
34330c465be1SJason Wang }
34340c465be1SJason Wang 
34350c465be1SJason Wang static int virtnet_get_coalesce(struct net_device *dev,
3436f3ccfda1SYufeng Mo 				struct ethtool_coalesce *ec,
3437f3ccfda1SYufeng Mo 				struct kernel_ethtool_coalesce *kernel_coal,
3438f3ccfda1SYufeng Mo 				struct netlink_ext_ack *extack)
34390c465be1SJason Wang {
34400c465be1SJason Wang 	struct virtnet_info *vi = netdev_priv(dev);
34410c465be1SJason Wang 
3442699b045aSAlvaro Karsz 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3443308d7982SGavin Li 		ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
3444308d7982SGavin Li 		ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
3445308d7982SGavin Li 		ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
3446308d7982SGavin Li 		ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3447699b045aSAlvaro Karsz 	} else {
3448699b045aSAlvaro Karsz 		ec->rx_max_coalesced_frames = 1;
34490c465be1SJason Wang 
34500c465be1SJason Wang 		if (vi->sq[0].napi.weight)
34510c465be1SJason Wang 			ec->tx_max_coalesced_frames = 1;
3452699b045aSAlvaro Karsz 	}
34530c465be1SJason Wang 
34540c465be1SJason Wang 	return 0;
34550c465be1SJason Wang }
34560c465be1SJason Wang 
3457394bd877SGavin Li static int virtnet_set_per_queue_coalesce(struct net_device *dev,
3458394bd877SGavin Li 					  u32 queue,
3459394bd877SGavin Li 					  struct ethtool_coalesce *ec)
3460394bd877SGavin Li {
3461394bd877SGavin Li 	struct virtnet_info *vi = netdev_priv(dev);
3462394bd877SGavin Li 	int ret, napi_weight;
3463394bd877SGavin Li 	bool update_napi = false;
3464394bd877SGavin Li 
3465394bd877SGavin Li 	if (queue >= vi->max_queue_pairs)
3466394bd877SGavin Li 		return -EINVAL;
3467394bd877SGavin Li 
3468394bd877SGavin Li 	/* Can't change NAPI weight if the link is up */
3469394bd877SGavin Li 	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3470394bd877SGavin Li 	ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3471394bd877SGavin Li 					      vi->sq[queue].napi.weight,
3472394bd877SGavin Li 					      &update_napi);
3473394bd877SGavin Li 	if (ret)
3474394bd877SGavin Li 		return ret;
3475394bd877SGavin Li 
3476394bd877SGavin Li 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3477394bd877SGavin Li 		ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
3478394bd877SGavin Li 	else
3479394bd877SGavin Li 		ret = virtnet_coal_params_supported(ec);
3480394bd877SGavin Li 
3481394bd877SGavin Li 	if (ret)
3482394bd877SGavin Li 		return ret;
3483394bd877SGavin Li 
3484394bd877SGavin Li 	if (update_napi)
3485394bd877SGavin Li 		vi->sq[queue].napi.weight = napi_weight;
3486394bd877SGavin Li 
3487394bd877SGavin Li 	return 0;
3488394bd877SGavin Li }
3489394bd877SGavin Li 
3490394bd877SGavin Li static int virtnet_get_per_queue_coalesce(struct net_device *dev,
3491394bd877SGavin Li 					  u32 queue,
3492394bd877SGavin Li 					  struct ethtool_coalesce *ec)
3493394bd877SGavin Li {
3494394bd877SGavin Li 	struct virtnet_info *vi = netdev_priv(dev);
3495394bd877SGavin Li 
3496394bd877SGavin Li 	if (queue >= vi->max_queue_pairs)
3497394bd877SGavin Li 		return -EINVAL;
3498394bd877SGavin Li 
3499394bd877SGavin Li 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
3500394bd877SGavin Li 		ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
3501394bd877SGavin Li 		ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
3502394bd877SGavin Li 		ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
3503394bd877SGavin Li 		ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3504394bd877SGavin Li 	} else {
3505394bd877SGavin Li 		ec->rx_max_coalesced_frames = 1;
3506394bd877SGavin Li 
350784a056f7SHeng Qi 		if (vi->sq[queue].napi.weight)
3508394bd877SGavin Li 			ec->tx_max_coalesced_frames = 1;
3509394bd877SGavin Li 	}
3510394bd877SGavin Li 
3511394bd877SGavin Li 	return 0;
3512394bd877SGavin Li }
3513394bd877SGavin Li 
351416032be5SNikolay Aleksandrov static void virtnet_init_settings(struct net_device *dev)
351516032be5SNikolay Aleksandrov {
351616032be5SNikolay Aleksandrov 	struct virtnet_info *vi = netdev_priv(dev);
351716032be5SNikolay Aleksandrov 
351816032be5SNikolay Aleksandrov 	vi->speed = SPEED_UNKNOWN;
351916032be5SNikolay Aleksandrov 	vi->duplex = DUPLEX_UNKNOWN;
352016032be5SNikolay Aleksandrov }
352116032be5SNikolay Aleksandrov 
3522faa9b39fSJason Baron static void virtnet_update_settings(struct virtnet_info *vi)
3523faa9b39fSJason Baron {
3524faa9b39fSJason Baron 	u32 speed;
3525faa9b39fSJason Baron 	u8 duplex;
3526faa9b39fSJason Baron 
3527faa9b39fSJason Baron 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3528faa9b39fSJason Baron 		return;
3529faa9b39fSJason Baron 
353064ffa39dSMichael S. Tsirkin 	virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
353164ffa39dSMichael S. Tsirkin 
3532faa9b39fSJason Baron 	if (ethtool_validate_speed(speed))
3533faa9b39fSJason Baron 		vi->speed = speed;
353464ffa39dSMichael S. Tsirkin 
353564ffa39dSMichael S. Tsirkin 	virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
353664ffa39dSMichael S. Tsirkin 
3537faa9b39fSJason Baron 	if (ethtool_validate_duplex(duplex))
3538faa9b39fSJason Baron 		vi->duplex = duplex;
3539faa9b39fSJason Baron }
3540faa9b39fSJason Baron 
3541c7114b12SAndrew Melnychenko static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3542c7114b12SAndrew Melnychenko {
3543c7114b12SAndrew Melnychenko 	return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3544c7114b12SAndrew Melnychenko }
3545c7114b12SAndrew Melnychenko 
3546c7114b12SAndrew Melnychenko static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3547c7114b12SAndrew Melnychenko {
3548c7114b12SAndrew Melnychenko 	return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3549c7114b12SAndrew Melnychenko }
3550c7114b12SAndrew Melnychenko 
3551c7114b12SAndrew Melnychenko static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
3552c7114b12SAndrew Melnychenko {
3553c7114b12SAndrew Melnychenko 	struct virtnet_info *vi = netdev_priv(dev);
3554c7114b12SAndrew Melnychenko 	int i;
3555c7114b12SAndrew Melnychenko 
3556c7114b12SAndrew Melnychenko 	if (indir) {
3557c7114b12SAndrew Melnychenko 		for (i = 0; i < vi->rss_indir_table_size; ++i)
3558c7114b12SAndrew Melnychenko 			indir[i] = vi->ctrl->rss.indirection_table[i];
3559c7114b12SAndrew Melnychenko 	}
3560c7114b12SAndrew Melnychenko 
3561c7114b12SAndrew Melnychenko 	if (key)
3562c7114b12SAndrew Melnychenko 		memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
3563c7114b12SAndrew Melnychenko 
3564c7114b12SAndrew Melnychenko 	if (hfunc)
3565c7114b12SAndrew Melnychenko 		*hfunc = ETH_RSS_HASH_TOP;
3566c7114b12SAndrew Melnychenko 
3567c7114b12SAndrew Melnychenko 	return 0;
3568c7114b12SAndrew Melnychenko }
3569c7114b12SAndrew Melnychenko 
3570c7114b12SAndrew Melnychenko static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
3571c7114b12SAndrew Melnychenko {
3572c7114b12SAndrew Melnychenko 	struct virtnet_info *vi = netdev_priv(dev);
357343a71c1bSBreno Leitao 	bool update = false;
3574c7114b12SAndrew Melnychenko 	int i;
3575c7114b12SAndrew Melnychenko 
3576c7114b12SAndrew Melnychenko 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
3577c7114b12SAndrew Melnychenko 		return -EOPNOTSUPP;
3578c7114b12SAndrew Melnychenko 
3579c7114b12SAndrew Melnychenko 	if (indir) {
358043a71c1bSBreno Leitao 		if (!vi->has_rss)
358143a71c1bSBreno Leitao 			return -EOPNOTSUPP;
358243a71c1bSBreno Leitao 
3583c7114b12SAndrew Melnychenko 		for (i = 0; i < vi->rss_indir_table_size; ++i)
3584c7114b12SAndrew Melnychenko 			vi->ctrl->rss.indirection_table[i] = indir[i];
358543a71c1bSBreno Leitao 		update = true;
3586c7114b12SAndrew Melnychenko 	}
358743a71c1bSBreno Leitao 	if (key) {
358843a71c1bSBreno Leitao 		/* If either _F_HASH_REPORT or _F_RSS are negotiated, the
358943a71c1bSBreno Leitao 		 * device provides hash calculation capabilities, that is,
359043a71c1bSBreno Leitao 		 * hash_key is configured.
359143a71c1bSBreno Leitao 		 */
359243a71c1bSBreno Leitao 		if (!vi->has_rss && !vi->has_rss_hash_report)
359343a71c1bSBreno Leitao 			return -EOPNOTSUPP;
3594c7114b12SAndrew Melnychenko 
359543a71c1bSBreno Leitao 		memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
359643a71c1bSBreno Leitao 		update = true;
359743a71c1bSBreno Leitao 	}
359843a71c1bSBreno Leitao 
359943a71c1bSBreno Leitao 	if (update)
3600c7114b12SAndrew Melnychenko 		virtnet_commit_rss_command(vi);
3601c7114b12SAndrew Melnychenko 
3602c7114b12SAndrew Melnychenko 	return 0;
3603c7114b12SAndrew Melnychenko }
3604c7114b12SAndrew Melnychenko 
3605c7114b12SAndrew Melnychenko static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3606c7114b12SAndrew Melnychenko {
3607c7114b12SAndrew Melnychenko 	struct virtnet_info *vi = netdev_priv(dev);
3608c7114b12SAndrew Melnychenko 	int rc = 0;
3609c7114b12SAndrew Melnychenko 
3610c7114b12SAndrew Melnychenko 	switch (info->cmd) {
3611c7114b12SAndrew Melnychenko 	case ETHTOOL_GRXRINGS:
3612c7114b12SAndrew Melnychenko 		info->data = vi->curr_queue_pairs;
3613c7114b12SAndrew Melnychenko 		break;
3614c1170820SAndrew Melnychenko 	case ETHTOOL_GRXFH:
3615c1170820SAndrew Melnychenko 		virtnet_get_hashflow(vi, info);
3616c1170820SAndrew Melnychenko 		break;
3617c1170820SAndrew Melnychenko 	default:
3618c1170820SAndrew Melnychenko 		rc = -EOPNOTSUPP;
3619c1170820SAndrew Melnychenko 	}
3620c1170820SAndrew Melnychenko 
3621c1170820SAndrew Melnychenko 	return rc;
3622c1170820SAndrew Melnychenko }
3623c1170820SAndrew Melnychenko 
3624c1170820SAndrew Melnychenko static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3625c1170820SAndrew Melnychenko {
3626c1170820SAndrew Melnychenko 	struct virtnet_info *vi = netdev_priv(dev);
3627c1170820SAndrew Melnychenko 	int rc = 0;
3628c1170820SAndrew Melnychenko 
3629c1170820SAndrew Melnychenko 	switch (info->cmd) {
3630c1170820SAndrew Melnychenko 	case ETHTOOL_SRXFH:
3631c1170820SAndrew Melnychenko 		if (!virtnet_set_hashflow(vi, info))
3632c1170820SAndrew Melnychenko 			rc = -EINVAL;
3633c1170820SAndrew Melnychenko 
3634c1170820SAndrew Melnychenko 		break;
3635c7114b12SAndrew Melnychenko 	default:
3636c7114b12SAndrew Melnychenko 		rc = -EOPNOTSUPP;
3637c7114b12SAndrew Melnychenko 	}
3638c7114b12SAndrew Melnychenko 
3639c7114b12SAndrew Melnychenko 	return rc;
3640c7114b12SAndrew Melnychenko }
3641c7114b12SAndrew Melnychenko 
36420fc0b732SStephen Hemminger static const struct ethtool_ops virtnet_ethtool_ops = {
3643699b045aSAlvaro Karsz 	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3644699b045aSAlvaro Karsz 		ETHTOOL_COALESCE_USECS,
364566846048SRick Jones 	.get_drvinfo = virtnet_get_drvinfo,
36469f4d26d0SMark McLoughlin 	.get_link = ethtool_op_get_link,
36478f9f4668SRick Jones 	.get_ringparam = virtnet_get_ringparam,
3648a335b33fSXuan Zhuo 	.set_ringparam = virtnet_set_ringparam,
3649d7dfc5cfSToshiaki Makita 	.get_strings = virtnet_get_strings,
3650d7dfc5cfSToshiaki Makita 	.get_sset_count = virtnet_get_sset_count,
3651d7dfc5cfSToshiaki Makita 	.get_ethtool_stats = virtnet_get_ethtool_stats,
3652d73bcd2cSJason Wang 	.set_channels = virtnet_set_channels,
3653d73bcd2cSJason Wang 	.get_channels = virtnet_get_channels,
3654074c3582SJacob Keller 	.get_ts_info = ethtool_op_get_ts_info,
3655ebb6b4b1SPhilippe Reynes 	.get_link_ksettings = virtnet_get_link_ksettings,
3656ebb6b4b1SPhilippe Reynes 	.set_link_ksettings = virtnet_set_link_ksettings,
36570c465be1SJason Wang 	.set_coalesce = virtnet_set_coalesce,
36580c465be1SJason Wang 	.get_coalesce = virtnet_get_coalesce,
3659394bd877SGavin Li 	.set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
3660394bd877SGavin Li 	.get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
3661c7114b12SAndrew Melnychenko 	.get_rxfh_key_size = virtnet_get_rxfh_key_size,
3662c7114b12SAndrew Melnychenko 	.get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3663c7114b12SAndrew Melnychenko 	.get_rxfh = virtnet_get_rxfh,
3664c7114b12SAndrew Melnychenko 	.set_rxfh = virtnet_set_rxfh,
3665c7114b12SAndrew Melnychenko 	.get_rxnfc = virtnet_get_rxnfc,
3666c1170820SAndrew Melnychenko 	.set_rxnfc = virtnet_set_rxnfc,
3667a9ea3fc6SHerbert Xu };
3668a9ea3fc6SHerbert Xu 
36699fe7bfceSJohn Fastabend static void virtnet_freeze_down(struct virtio_device *vdev)
36709fe7bfceSJohn Fastabend {
36719fe7bfceSJohn Fastabend 	struct virtnet_info *vi = vdev->priv;
36729fe7bfceSJohn Fastabend 
36739fe7bfceSJohn Fastabend 	/* Make sure no work handler is accessing the device */
36749fe7bfceSJohn Fastabend 	flush_work(&vi->config_work);
36759fe7bfceSJohn Fastabend 
367605c998b7SAke Koomsin 	netif_tx_lock_bh(vi->dev);
36779fe7bfceSJohn Fastabend 	netif_device_detach(vi->dev);
367805c998b7SAke Koomsin 	netif_tx_unlock_bh(vi->dev);
36798af52fe9SStephan Gerhold 	if (netif_running(vi->dev))
36808af52fe9SStephan Gerhold 		virtnet_close(vi->dev);
36819fe7bfceSJohn Fastabend }
36829fe7bfceSJohn Fastabend 
36839fe7bfceSJohn Fastabend static int init_vqs(struct virtnet_info *vi);
36849fe7bfceSJohn Fastabend 
36859fe7bfceSJohn Fastabend static int virtnet_restore_up(struct virtio_device *vdev)
36869fe7bfceSJohn Fastabend {
36879fe7bfceSJohn Fastabend 	struct virtnet_info *vi = vdev->priv;
36888af52fe9SStephan Gerhold 	int err;
36899fe7bfceSJohn Fastabend 
36909fe7bfceSJohn Fastabend 	err = init_vqs(vi);
36919fe7bfceSJohn Fastabend 	if (err)
36929fe7bfceSJohn Fastabend 		return err;
36939fe7bfceSJohn Fastabend 
36949fe7bfceSJohn Fastabend 	virtio_device_ready(vdev);
36959fe7bfceSJohn Fastabend 
36965a159128SJason Wang 	enable_delayed_refill(vi);
36975a159128SJason Wang 
36989fe7bfceSJohn Fastabend 	if (netif_running(vi->dev)) {
36998af52fe9SStephan Gerhold 		err = virtnet_open(vi->dev);
37008af52fe9SStephan Gerhold 		if (err)
37018af52fe9SStephan Gerhold 			return err;
37029fe7bfceSJohn Fastabend 	}
37039fe7bfceSJohn Fastabend 
370405c998b7SAke Koomsin 	netif_tx_lock_bh(vi->dev);
37059fe7bfceSJohn Fastabend 	netif_device_attach(vi->dev);
370605c998b7SAke Koomsin 	netif_tx_unlock_bh(vi->dev);
37079fe7bfceSJohn Fastabend 	return err;
37089fe7bfceSJohn Fastabend }
37099fe7bfceSJohn Fastabend 
37103f93522fSJason Wang static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
37113f93522fSJason Wang {
37123f93522fSJason Wang 	struct scatterlist sg;
371312e57169SMichael S. Tsirkin 	vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
37143f93522fSJason Wang 
371512e57169SMichael S. Tsirkin 	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
37163f93522fSJason Wang 
37173f93522fSJason Wang 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
37183f93522fSJason Wang 				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
37193f93522fSJason Wang 		dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
37203f93522fSJason Wang 		return -EINVAL;
37213f93522fSJason Wang 	}
37223f93522fSJason Wang 
37233f93522fSJason Wang 	return 0;
37243f93522fSJason Wang }
37253f93522fSJason Wang 
37263f93522fSJason Wang static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
37273f93522fSJason Wang {
37283f93522fSJason Wang 	u64 offloads = 0;
37293f93522fSJason Wang 
37303f93522fSJason Wang 	if (!vi->guest_offloads)
37313f93522fSJason Wang 		return 0;
37323f93522fSJason Wang 
37333f93522fSJason Wang 	return virtnet_set_guest_offloads(vi, offloads);
37343f93522fSJason Wang }
37353f93522fSJason Wang 
37363f93522fSJason Wang static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
37373f93522fSJason Wang {
37383f93522fSJason Wang 	u64 offloads = vi->guest_offloads;
37393f93522fSJason Wang 
37403f93522fSJason Wang 	if (!vi->guest_offloads)
37413f93522fSJason Wang 		return 0;
37423f93522fSJason Wang 
37433f93522fSJason Wang 	return virtnet_set_guest_offloads(vi, offloads);
37443f93522fSJason Wang }
37453f93522fSJason Wang 
37469861ce03SJakub Kicinski static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
37479861ce03SJakub Kicinski 			   struct netlink_ext_ack *extack)
3748f600b690SJohn Fastabend {
3749e814b958SHeng Qi 	unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3750e814b958SHeng Qi 					   sizeof(struct skb_shared_info));
3751e814b958SHeng Qi 	unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3752f600b690SJohn Fastabend 	struct virtnet_info *vi = netdev_priv(dev);
3753f600b690SJohn Fastabend 	struct bpf_prog *old_prog;
3754017b29c3SJason Wang 	u16 xdp_qp = 0, curr_qp;
3755672aafd5SJohn Fastabend 	int i, err;
3756f600b690SJohn Fastabend 
37573f93522fSJason Wang 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
37583f93522fSJason Wang 	    && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
375992502fe8SJason Wang 	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
376092502fe8SJason Wang 	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
376118ba58e1SJason Wang 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3762418044e1SAndrew Melnychenko 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3763418044e1SAndrew Melnychenko 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3764418044e1SAndrew Melnychenko 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3765dbcf24d1SJason Wang 		NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
3766f600b690SJohn Fastabend 		return -EOPNOTSUPP;
3767f600b690SJohn Fastabend 	}
3768f600b690SJohn Fastabend 
3769f600b690SJohn Fastabend 	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
37704d463c4dSDaniel Borkmann 		NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3771f600b690SJohn Fastabend 		return -EINVAL;
3772f600b690SJohn Fastabend 	}
3773f600b690SJohn Fastabend 
37748d9bc36dSHeng Qi 	if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
37758d9bc36dSHeng Qi 		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
37768d9bc36dSHeng Qi 		netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3777f600b690SJohn Fastabend 		return -EINVAL;
3778f600b690SJohn Fastabend 	}
3779f600b690SJohn Fastabend 
3780672aafd5SJohn Fastabend 	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3781672aafd5SJohn Fastabend 	if (prog)
3782672aafd5SJohn Fastabend 		xdp_qp = nr_cpu_ids;
3783672aafd5SJohn Fastabend 
3784672aafd5SJohn Fastabend 	/* XDP requires extra queues for XDP_TX */
3785672aafd5SJohn Fastabend 	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
37869ce4e3d6SXuan Zhuo 		netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3787672aafd5SJohn Fastabend 				 curr_qp + xdp_qp, vi->max_queue_pairs);
378897c2c69eSXuan Zhuo 		xdp_qp = 0;
3789672aafd5SJohn Fastabend 	}
3790672aafd5SJohn Fastabend 
379103aa6d34SToshiaki Makita 	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
379203aa6d34SToshiaki Makita 	if (!prog && !old_prog)
379303aa6d34SToshiaki Makita 		return 0;
379403aa6d34SToshiaki Makita 
379585192dbfSAndrii Nakryiko 	if (prog)
379685192dbfSAndrii Nakryiko 		bpf_prog_add(prog, vi->max_queue_pairs - 1);
37972de2f7f4SJohn Fastabend 
37984941d472SJason Wang 	/* Make sure NAPI is not using any XDP TX queues for RX. */
3799534da5e8SToshiaki Makita 	if (netif_running(dev)) {
3800534da5e8SToshiaki Makita 		for (i = 0; i < vi->max_queue_pairs; i++) {
38014941d472SJason Wang 			napi_disable(&vi->rq[i].napi);
3802534da5e8SToshiaki Makita 			virtnet_napi_tx_disable(&vi->sq[i].napi);
3803534da5e8SToshiaki Makita 		}
3804534da5e8SToshiaki Makita 	}
38052de2f7f4SJohn Fastabend 
380603aa6d34SToshiaki Makita 	if (!prog) {
380703aa6d34SToshiaki Makita 		for (i = 0; i < vi->max_queue_pairs; i++) {
380803aa6d34SToshiaki Makita 			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
380903aa6d34SToshiaki Makita 			if (i == 0)
381003aa6d34SToshiaki Makita 				virtnet_restore_guest_offloads(vi);
381103aa6d34SToshiaki Makita 		}
381203aa6d34SToshiaki Makita 		synchronize_net();
381303aa6d34SToshiaki Makita 	}
381403aa6d34SToshiaki Makita 
38154941d472SJason Wang 	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
38164941d472SJason Wang 	if (err)
38174941d472SJason Wang 		goto err;
3818188313c1SToshiaki Makita 	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
38194941d472SJason Wang 	vi->xdp_queue_pairs = xdp_qp;
3820f600b690SJohn Fastabend 
382103aa6d34SToshiaki Makita 	if (prog) {
382297c2c69eSXuan Zhuo 		vi->xdp_enabled = true;
3823f600b690SJohn Fastabend 		for (i = 0; i < vi->max_queue_pairs; i++) {
3824f600b690SJohn Fastabend 			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
382503aa6d34SToshiaki Makita 			if (i == 0 && !old_prog)
38263f93522fSJason Wang 				virtnet_clear_guest_offloads(vi);
38273f93522fSJason Wang 		}
382866c0e13aSMarek Majtyka 		if (!old_prog)
382930bbf891SLorenzo Bianconi 			xdp_features_set_redirect_target(dev, true);
383097c2c69eSXuan Zhuo 	} else {
383166c0e13aSMarek Majtyka 		xdp_features_clear_redirect_target(dev);
383297c2c69eSXuan Zhuo 		vi->xdp_enabled = false;
383303aa6d34SToshiaki Makita 	}
383403aa6d34SToshiaki Makita 
383503aa6d34SToshiaki Makita 	for (i = 0; i < vi->max_queue_pairs; i++) {
3836f600b690SJohn Fastabend 		if (old_prog)
3837f600b690SJohn Fastabend 			bpf_prog_put(old_prog);
3838534da5e8SToshiaki Makita 		if (netif_running(dev)) {
38394941d472SJason Wang 			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3840534da5e8SToshiaki Makita 			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3841534da5e8SToshiaki Makita 					       &vi->sq[i].napi);
3842534da5e8SToshiaki Makita 		}
3843f600b690SJohn Fastabend 	}
3844f600b690SJohn Fastabend 
3845f600b690SJohn Fastabend 	return 0;
38462de2f7f4SJohn Fastabend 
38474941d472SJason Wang err:
384803aa6d34SToshiaki Makita 	if (!prog) {
384903aa6d34SToshiaki Makita 		virtnet_clear_guest_offloads(vi);
38504941d472SJason Wang 		for (i = 0; i < vi->max_queue_pairs; i++)
385103aa6d34SToshiaki Makita 			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
385203aa6d34SToshiaki Makita 	}
385303aa6d34SToshiaki Makita 
38548be4d9a4SToshiaki Makita 	if (netif_running(dev)) {
3855534da5e8SToshiaki Makita 		for (i = 0; i < vi->max_queue_pairs; i++) {
38564941d472SJason Wang 			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3857534da5e8SToshiaki Makita 			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3858534da5e8SToshiaki Makita 					       &vi->sq[i].napi);
3859534da5e8SToshiaki Makita 		}
38608be4d9a4SToshiaki Makita 	}
38612de2f7f4SJohn Fastabend 	if (prog)
38622de2f7f4SJohn Fastabend 		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
38632de2f7f4SJohn Fastabend 	return err;
3864f600b690SJohn Fastabend }
3865f600b690SJohn Fastabend 
3866f4e63525SJakub Kicinski static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3867f600b690SJohn Fastabend {
3868f600b690SJohn Fastabend 	switch (xdp->command) {
3869f600b690SJohn Fastabend 	case XDP_SETUP_PROG:
38709861ce03SJakub Kicinski 		return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
3871f600b690SJohn Fastabend 	default:
3872f600b690SJohn Fastabend 		return -EINVAL;
3873f600b690SJohn Fastabend 	}
3874f600b690SJohn Fastabend }
3875f600b690SJohn Fastabend 
3876ba5e4426SSridhar Samudrala static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
3877ba5e4426SSridhar Samudrala 				      size_t len)
3878ba5e4426SSridhar Samudrala {
3879ba5e4426SSridhar Samudrala 	struct virtnet_info *vi = netdev_priv(dev);
3880ba5e4426SSridhar Samudrala 	int ret;
3881ba5e4426SSridhar Samudrala 
3882ba5e4426SSridhar Samudrala 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3883ba5e4426SSridhar Samudrala 		return -EOPNOTSUPP;
3884ba5e4426SSridhar Samudrala 
3885ba5e4426SSridhar Samudrala 	ret = snprintf(buf, len, "sby");
3886ba5e4426SSridhar Samudrala 	if (ret >= len)
3887ba5e4426SSridhar Samudrala 		return -EOPNOTSUPP;
3888ba5e4426SSridhar Samudrala 
3889ba5e4426SSridhar Samudrala 	return 0;
3890ba5e4426SSridhar Samudrala }
3891ba5e4426SSridhar Samudrala 
3892a02e8964SWillem de Bruijn static int virtnet_set_features(struct net_device *dev,
3893a02e8964SWillem de Bruijn 				netdev_features_t features)
3894a02e8964SWillem de Bruijn {
3895a02e8964SWillem de Bruijn 	struct virtnet_info *vi = netdev_priv(dev);
3896cf8691cbSMichael S. Tsirkin 	u64 offloads;
3897a02e8964SWillem de Bruijn 	int err;
3898a02e8964SWillem de Bruijn 
3899dbcf24d1SJason Wang 	if ((dev->features ^ features) & NETIF_F_GRO_HW) {
390097c2c69eSXuan Zhuo 		if (vi->xdp_enabled)
3901a02e8964SWillem de Bruijn 			return -EBUSY;
3902a02e8964SWillem de Bruijn 
3903dbcf24d1SJason Wang 		if (features & NETIF_F_GRO_HW)
3904cf8691cbSMichael S. Tsirkin 			offloads = vi->guest_offloads_capable;
3905a02e8964SWillem de Bruijn 		else
3906cf8691cbSMichael S. Tsirkin 			offloads = vi->guest_offloads_capable &
3907dbcf24d1SJason Wang 				   ~GUEST_OFFLOAD_GRO_HW_MASK;
3908a02e8964SWillem de Bruijn 
3909a02e8964SWillem de Bruijn 		err = virtnet_set_guest_offloads(vi, offloads);
3910a02e8964SWillem de Bruijn 		if (err)
3911a02e8964SWillem de Bruijn 			return err;
39123618ad2aSTonghao Zhang 		vi->guest_offloads = offloads;
3913cf8691cbSMichael S. Tsirkin 	}
3914cf8691cbSMichael S. Tsirkin 
3915c7114b12SAndrew Melnychenko 	if ((dev->features ^ features) & NETIF_F_RXHASH) {
3916c7114b12SAndrew Melnychenko 		if (features & NETIF_F_RXHASH)
3917c1170820SAndrew Melnychenko 			vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3918c7114b12SAndrew Melnychenko 		else
3919c7114b12SAndrew Melnychenko 			vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
3920c7114b12SAndrew Melnychenko 
3921c7114b12SAndrew Melnychenko 		if (!virtnet_commit_rss_command(vi))
3922c7114b12SAndrew Melnychenko 			return -EINVAL;
3923c7114b12SAndrew Melnychenko 	}
3924c7114b12SAndrew Melnychenko 
3925a02e8964SWillem de Bruijn 	return 0;
3926a02e8964SWillem de Bruijn }
3927a02e8964SWillem de Bruijn 
3928a520794bSTony Lu static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
3929a520794bSTony Lu {
3930a520794bSTony Lu 	struct virtnet_info *priv = netdev_priv(dev);
3931a520794bSTony Lu 	struct send_queue *sq = &priv->sq[txqueue];
3932a520794bSTony Lu 	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
3933a520794bSTony Lu 
3934a520794bSTony Lu 	u64_stats_update_begin(&sq->stats.syncp);
393527debe3eSEric Dumazet 	u64_stats_inc(&sq->stats.tx_timeouts);
3936a520794bSTony Lu 	u64_stats_update_end(&sq->stats.syncp);
3937a520794bSTony Lu 
3938a520794bSTony Lu 	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
3939a520794bSTony Lu 		   txqueue, sq->name, sq->vq->index, sq->vq->name,
39405337824fSEric Dumazet 		   jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
3941a520794bSTony Lu }
3942a520794bSTony Lu 
394376288b4eSStephen Hemminger static const struct net_device_ops virtnet_netdev = {
394476288b4eSStephen Hemminger 	.ndo_open            = virtnet_open,
394576288b4eSStephen Hemminger 	.ndo_stop   	     = virtnet_close,
394676288b4eSStephen Hemminger 	.ndo_start_xmit      = start_xmit,
394776288b4eSStephen Hemminger 	.ndo_validate_addr   = eth_validate_addr,
39489c46f6d4SAlex Williamson 	.ndo_set_mac_address = virtnet_set_mac_address,
39492af7698eSAlex Williamson 	.ndo_set_rx_mode     = virtnet_set_rx_mode,
39503fa2a1dfSstephen hemminger 	.ndo_get_stats64     = virtnet_stats,
39511824a989SAlex Williamson 	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
39521824a989SAlex Williamson 	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
3953f4e63525SJakub Kicinski 	.ndo_bpf		= virtnet_xdp,
3954186b3c99SJason Wang 	.ndo_xdp_xmit		= virtnet_xdp_xmit,
39552836b4f2SVlad Yasevich 	.ndo_features_check	= passthru_features_check,
3956ba5e4426SSridhar Samudrala 	.ndo_get_phys_port_name	= virtnet_get_phys_port_name,
3957a02e8964SWillem de Bruijn 	.ndo_set_features	= virtnet_set_features,
3958a520794bSTony Lu 	.ndo_tx_timeout		= virtnet_tx_timeout,
395976288b4eSStephen Hemminger };
396076288b4eSStephen Hemminger 
3961586d17c5SJason Wang static void virtnet_config_changed_work(struct work_struct *work)
39629f4d26d0SMark McLoughlin {
3963586d17c5SJason Wang 	struct virtnet_info *vi =
3964586d17c5SJason Wang 		container_of(work, struct virtnet_info, config_work);
39659f4d26d0SMark McLoughlin 	u16 v;
39669f4d26d0SMark McLoughlin 
3967855e0c52SRusty Russell 	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
3968855e0c52SRusty Russell 				 struct virtio_net_config, status, &v) < 0)
3969507613bfSMichael S. Tsirkin 		return;
3970586d17c5SJason Wang 
3971586d17c5SJason Wang 	if (v & VIRTIO_NET_S_ANNOUNCE) {
3972ee89bab1SAmerigo Wang 		netdev_notify_peers(vi->dev);
3973586d17c5SJason Wang 		virtnet_ack_link_announce(vi);
3974586d17c5SJason Wang 	}
39759f4d26d0SMark McLoughlin 
39769f4d26d0SMark McLoughlin 	/* Ignore unknown (future) status bits */
39779f4d26d0SMark McLoughlin 	v &= VIRTIO_NET_S_LINK_UP;
39789f4d26d0SMark McLoughlin 
39799f4d26d0SMark McLoughlin 	if (vi->status == v)
3980507613bfSMichael S. Tsirkin 		return;
39819f4d26d0SMark McLoughlin 
39829f4d26d0SMark McLoughlin 	vi->status = v;
39839f4d26d0SMark McLoughlin 
39849f4d26d0SMark McLoughlin 	if (vi->status & VIRTIO_NET_S_LINK_UP) {
3985faa9b39fSJason Baron 		virtnet_update_settings(vi);
39869f4d26d0SMark McLoughlin 		netif_carrier_on(vi->dev);
3987986a4f4dSJason Wang 		netif_tx_wake_all_queues(vi->dev);
39889f4d26d0SMark McLoughlin 	} else {
39899f4d26d0SMark McLoughlin 		netif_carrier_off(vi->dev);
3990986a4f4dSJason Wang 		netif_tx_stop_all_queues(vi->dev);
39919f4d26d0SMark McLoughlin 	}
39929f4d26d0SMark McLoughlin }
39939f4d26d0SMark McLoughlin 
39949f4d26d0SMark McLoughlin static void virtnet_config_changed(struct virtio_device *vdev)
39959f4d26d0SMark McLoughlin {
39969f4d26d0SMark McLoughlin 	struct virtnet_info *vi = vdev->priv;
39979f4d26d0SMark McLoughlin 
39983b07e9caSTejun Heo 	schedule_work(&vi->config_work);
39999f4d26d0SMark McLoughlin }
40009f4d26d0SMark McLoughlin 
4001986a4f4dSJason Wang static void virtnet_free_queues(struct virtnet_info *vi)
4002986a4f4dSJason Wang {
4003d4fb84eeSAndrey Vagin 	int i;
4004d4fb84eeSAndrey Vagin 
4005ab3971b1SJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
40065198d545SJakub Kicinski 		__netif_napi_del(&vi->rq[i].napi);
40075198d545SJakub Kicinski 		__netif_napi_del(&vi->sq[i].napi);
4008ab3971b1SJason Wang 	}
4009d4fb84eeSAndrey Vagin 
40105198d545SJakub Kicinski 	/* We called __netif_napi_del(),
4011963abe5cSEric Dumazet 	 * we need to respect an RCU grace period before freeing vi->rq
4012963abe5cSEric Dumazet 	 */
4013963abe5cSEric Dumazet 	synchronize_net();
4014963abe5cSEric Dumazet 
4015986a4f4dSJason Wang 	kfree(vi->rq);
4016986a4f4dSJason Wang 	kfree(vi->sq);
401712e57169SMichael S. Tsirkin 	kfree(vi->ctrl);
4018986a4f4dSJason Wang }
4019986a4f4dSJason Wang 
402047315329SJohn Fastabend static void _free_receive_bufs(struct virtnet_info *vi)
4021986a4f4dSJason Wang {
4022f600b690SJohn Fastabend 	struct bpf_prog *old_prog;
4023986a4f4dSJason Wang 	int i;
4024986a4f4dSJason Wang 
4025986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
4026986a4f4dSJason Wang 		while (vi->rq[i].pages)
4027986a4f4dSJason Wang 			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
4028f600b690SJohn Fastabend 
4029f600b690SJohn Fastabend 		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
4030f600b690SJohn Fastabend 		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
4031f600b690SJohn Fastabend 		if (old_prog)
4032f600b690SJohn Fastabend 			bpf_prog_put(old_prog);
4033986a4f4dSJason Wang 	}
403447315329SJohn Fastabend }
403547315329SJohn Fastabend 
403647315329SJohn Fastabend static void free_receive_bufs(struct virtnet_info *vi)
403747315329SJohn Fastabend {
403847315329SJohn Fastabend 	rtnl_lock();
403947315329SJohn Fastabend 	_free_receive_bufs(vi);
4040f600b690SJohn Fastabend 	rtnl_unlock();
4041986a4f4dSJason Wang }
4042986a4f4dSJason Wang 
4043fb51879dSMichael Dalton static void free_receive_page_frags(struct virtnet_info *vi)
4044fb51879dSMichael Dalton {
4045fb51879dSMichael Dalton 	int i;
4046fb51879dSMichael Dalton 	for (i = 0; i < vi->max_queue_pairs; i++)
4047295525e2SXuan Zhuo 		if (vi->rq[i].alloc_frag.page) {
4048295525e2SXuan Zhuo 			if (vi->rq[i].do_dma && vi->rq[i].last_dma)
4049295525e2SXuan Zhuo 				virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
4050fb51879dSMichael Dalton 			put_page(vi->rq[i].alloc_frag.page);
4051fb51879dSMichael Dalton 		}
4052295525e2SXuan Zhuo }
4053fb51879dSMichael Dalton 
40546e345f8cSXuan Zhuo static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
40556e345f8cSXuan Zhuo {
40566e345f8cSXuan Zhuo 	if (!is_xdp_frame(buf))
40576e345f8cSXuan Zhuo 		dev_kfree_skb(buf);
40586e345f8cSXuan Zhuo 	else
40596e345f8cSXuan Zhuo 		xdp_return_frame(ptr_to_xdp(buf));
40606e345f8cSXuan Zhuo }
40616e345f8cSXuan Zhuo 
4062986a4f4dSJason Wang static void free_unused_bufs(struct virtnet_info *vi)
4063986a4f4dSJason Wang {
4064986a4f4dSJason Wang 	void *buf;
4065986a4f4dSJason Wang 	int i;
4066986a4f4dSJason Wang 
4067986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
4068986a4f4dSJason Wang 		struct virtqueue *vq = vi->sq[i].vq;
40696e345f8cSXuan Zhuo 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
40706e345f8cSXuan Zhuo 			virtnet_sq_free_unused_buf(vq, buf);
4071f8bb5104SWenliang Wang 		cond_resched();
4072986a4f4dSJason Wang 	}
4073986a4f4dSJason Wang 
4074986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
40753ffd05c2SXuan Zhuo 		struct virtqueue *vq = vi->rq[i].vq;
4076295525e2SXuan Zhuo 
40773ffd05c2SXuan Zhuo 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
40783ffd05c2SXuan Zhuo 			virtnet_rq_unmap_free_buf(vq, buf);
4079f8bb5104SWenliang Wang 		cond_resched();
4080986a4f4dSJason Wang 	}
4081ab7db917SMichael Dalton }
4082986a4f4dSJason Wang 
4083e9d7417bSJason Wang static void virtnet_del_vqs(struct virtnet_info *vi)
4084e9d7417bSJason Wang {
4085e9d7417bSJason Wang 	struct virtio_device *vdev = vi->vdev;
4086e9d7417bSJason Wang 
4087310974faSPeter Xu 	virtnet_clean_affinity(vi);
4088986a4f4dSJason Wang 
4089e9d7417bSJason Wang 	vdev->config->del_vqs(vdev);
4090986a4f4dSJason Wang 
4091986a4f4dSJason Wang 	virtnet_free_queues(vi);
4092986a4f4dSJason Wang }
4093986a4f4dSJason Wang 
4094d85b758fSMichael S. Tsirkin /* How large should a single buffer be so a queue full of these can fit at
4095d85b758fSMichael S. Tsirkin  * least one full packet?
4096d85b758fSMichael S. Tsirkin  * Logic below assumes the mergeable buffer header is used.
4097d85b758fSMichael S. Tsirkin  */
4098d85b758fSMichael S. Tsirkin static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
4099d85b758fSMichael S. Tsirkin {
4100c1ddc42dSAndrew Melnychenko 	const unsigned int hdr_len = vi->hdr_len;
4101d85b758fSMichael S. Tsirkin 	unsigned int rq_size = virtqueue_get_vring_size(vq);
4102d85b758fSMichael S. Tsirkin 	unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
4103d85b758fSMichael S. Tsirkin 	unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
4104d85b758fSMichael S. Tsirkin 	unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
4105d85b758fSMichael S. Tsirkin 
4106f0c3192cSMichael S. Tsirkin 	return max(max(min_buf_len, hdr_len) - hdr_len,
4107f0c3192cSMichael S. Tsirkin 		   (unsigned int)GOOD_PACKET_LEN);
4108d85b758fSMichael S. Tsirkin }
4109d85b758fSMichael S. Tsirkin 
4110986a4f4dSJason Wang static int virtnet_find_vqs(struct virtnet_info *vi)
4111986a4f4dSJason Wang {
4112986a4f4dSJason Wang 	vq_callback_t **callbacks;
4113986a4f4dSJason Wang 	struct virtqueue **vqs;
4114986a4f4dSJason Wang 	const char **names;
411520e81d2cSZhu Yanjun 	int ret = -ENOMEM;
411620e81d2cSZhu Yanjun 	int total_vqs;
4117d45b897bSMichael S. Tsirkin 	bool *ctx;
411820e81d2cSZhu Yanjun 	u16 i;
4119986a4f4dSJason Wang 
4120986a4f4dSJason Wang 	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
4121986a4f4dSJason Wang 	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
4122986a4f4dSJason Wang 	 * possible control vq.
4123986a4f4dSJason Wang 	 */
4124986a4f4dSJason Wang 	total_vqs = vi->max_queue_pairs * 2 +
4125986a4f4dSJason Wang 		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
4126986a4f4dSJason Wang 
4127986a4f4dSJason Wang 	/* Allocate space for find_vqs parameters */
41286396bb22SKees Cook 	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
4129986a4f4dSJason Wang 	if (!vqs)
4130986a4f4dSJason Wang 		goto err_vq;
41316da2ec56SKees Cook 	callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
4132986a4f4dSJason Wang 	if (!callbacks)
4133986a4f4dSJason Wang 		goto err_callback;
41346da2ec56SKees Cook 	names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
4135986a4f4dSJason Wang 	if (!names)
4136986a4f4dSJason Wang 		goto err_names;
4137192f68cfSJason Wang 	if (!vi->big_packets || vi->mergeable_rx_bufs) {
41386396bb22SKees Cook 		ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
4139d45b897bSMichael S. Tsirkin 		if (!ctx)
4140d45b897bSMichael S. Tsirkin 			goto err_ctx;
4141d45b897bSMichael S. Tsirkin 	} else {
4142d45b897bSMichael S. Tsirkin 		ctx = NULL;
4143d45b897bSMichael S. Tsirkin 	}
4144986a4f4dSJason Wang 
4145986a4f4dSJason Wang 	/* Parameters for control virtqueue, if any */
4146986a4f4dSJason Wang 	if (vi->has_cvq) {
4147986a4f4dSJason Wang 		callbacks[total_vqs - 1] = NULL;
4148986a4f4dSJason Wang 		names[total_vqs - 1] = "control";
4149986a4f4dSJason Wang 	}
4150986a4f4dSJason Wang 
4151986a4f4dSJason Wang 	/* Allocate/initialize parameters for send/receive virtqueues */
4152986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
4153986a4f4dSJason Wang 		callbacks[rxq2vq(i)] = skb_recv_done;
4154986a4f4dSJason Wang 		callbacks[txq2vq(i)] = skb_xmit_done;
415520e81d2cSZhu Yanjun 		sprintf(vi->rq[i].name, "input.%u", i);
415620e81d2cSZhu Yanjun 		sprintf(vi->sq[i].name, "output.%u", i);
4157986a4f4dSJason Wang 		names[rxq2vq(i)] = vi->rq[i].name;
4158986a4f4dSJason Wang 		names[txq2vq(i)] = vi->sq[i].name;
4159d45b897bSMichael S. Tsirkin 		if (ctx)
4160d45b897bSMichael S. Tsirkin 			ctx[rxq2vq(i)] = true;
4161986a4f4dSJason Wang 	}
4162986a4f4dSJason Wang 
41632e9ca760SMichael S. Tsirkin 	ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
41642e9ca760SMichael S. Tsirkin 				  names, ctx, NULL);
4165986a4f4dSJason Wang 	if (ret)
4166986a4f4dSJason Wang 		goto err_find;
4167986a4f4dSJason Wang 
4168986a4f4dSJason Wang 	if (vi->has_cvq) {
4169986a4f4dSJason Wang 		vi->cvq = vqs[total_vqs - 1];
4170986a4f4dSJason Wang 		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
4171f646968fSPatrick McHardy 			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4172986a4f4dSJason Wang 	}
4173986a4f4dSJason Wang 
4174986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
4175986a4f4dSJason Wang 		vi->rq[i].vq = vqs[rxq2vq(i)];
4176d85b758fSMichael S. Tsirkin 		vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
4177986a4f4dSJason Wang 		vi->sq[i].vq = vqs[txq2vq(i)];
4178986a4f4dSJason Wang 	}
4179986a4f4dSJason Wang 
41802fa3c8a8STonghao Zhang 	/* run here: ret == 0. */
4181986a4f4dSJason Wang 
4182986a4f4dSJason Wang 
4183986a4f4dSJason Wang err_find:
4184d45b897bSMichael S. Tsirkin 	kfree(ctx);
4185d45b897bSMichael S. Tsirkin err_ctx:
4186986a4f4dSJason Wang 	kfree(names);
4187986a4f4dSJason Wang err_names:
4188986a4f4dSJason Wang 	kfree(callbacks);
4189986a4f4dSJason Wang err_callback:
4190986a4f4dSJason Wang 	kfree(vqs);
4191986a4f4dSJason Wang err_vq:
4192986a4f4dSJason Wang 	return ret;
4193986a4f4dSJason Wang }
4194986a4f4dSJason Wang 
4195986a4f4dSJason Wang static int virtnet_alloc_queues(struct virtnet_info *vi)
4196986a4f4dSJason Wang {
4197986a4f4dSJason Wang 	int i;
4198986a4f4dSJason Wang 
4199122b84a1SMax Gurtovoy 	if (vi->has_cvq) {
420012e57169SMichael S. Tsirkin 		vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
420112e57169SMichael S. Tsirkin 		if (!vi->ctrl)
420212e57169SMichael S. Tsirkin 			goto err_ctrl;
4203122b84a1SMax Gurtovoy 	} else {
4204122b84a1SMax Gurtovoy 		vi->ctrl = NULL;
4205122b84a1SMax Gurtovoy 	}
42066396bb22SKees Cook 	vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
4207986a4f4dSJason Wang 	if (!vi->sq)
4208986a4f4dSJason Wang 		goto err_sq;
42096396bb22SKees Cook 	vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
4210008d4278SAmerigo Wang 	if (!vi->rq)
4211986a4f4dSJason Wang 		goto err_rq;
4212986a4f4dSJason Wang 
4213986a4f4dSJason Wang 	INIT_DELAYED_WORK(&vi->refill, refill_work);
4214986a4f4dSJason Wang 	for (i = 0; i < vi->max_queue_pairs; i++) {
4215986a4f4dSJason Wang 		vi->rq[i].pages = NULL;
4216d484735dSJakub Kicinski 		netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
4217986a4f4dSJason Wang 				      napi_weight);
42188d602e1aSJakub Kicinski 		netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
42198d602e1aSJakub Kicinski 					 virtnet_poll_tx,
4220b92f1e67SWillem de Bruijn 					 napi_tx ? napi_weight : 0);
4221986a4f4dSJason Wang 
4222986a4f4dSJason Wang 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
42235377d758SJohannes Berg 		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
4224986a4f4dSJason Wang 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
4225d7dfc5cfSToshiaki Makita 
4226d7dfc5cfSToshiaki Makita 		u64_stats_init(&vi->rq[i].stats.syncp);
4227d7dfc5cfSToshiaki Makita 		u64_stats_init(&vi->sq[i].stats.syncp);
4228986a4f4dSJason Wang 	}
4229986a4f4dSJason Wang 
4230986a4f4dSJason Wang 	return 0;
4231986a4f4dSJason Wang 
4232986a4f4dSJason Wang err_rq:
4233986a4f4dSJason Wang 	kfree(vi->sq);
4234986a4f4dSJason Wang err_sq:
423512e57169SMichael S. Tsirkin 	kfree(vi->ctrl);
423612e57169SMichael S. Tsirkin err_ctrl:
4237986a4f4dSJason Wang 	return -ENOMEM;
4238e9d7417bSJason Wang }
4239e9d7417bSJason Wang 
42403f9c10b0SAmit Shah static int init_vqs(struct virtnet_info *vi)
42413f9c10b0SAmit Shah {
4242986a4f4dSJason Wang 	int ret;
42433f9c10b0SAmit Shah 
4244986a4f4dSJason Wang 	/* Allocate send & receive queues */
4245986a4f4dSJason Wang 	ret = virtnet_alloc_queues(vi);
4246986a4f4dSJason Wang 	if (ret)
4247986a4f4dSJason Wang 		goto err;
42483f9c10b0SAmit Shah 
4249986a4f4dSJason Wang 	ret = virtnet_find_vqs(vi);
4250986a4f4dSJason Wang 	if (ret)
4251986a4f4dSJason Wang 		goto err_free;
42523f9c10b0SAmit Shah 
4253295525e2SXuan Zhuo 	virtnet_rq_set_premapped(vi);
4254295525e2SXuan Zhuo 
4255a0d1d0f4SSebastian Andrzej Siewior 	cpus_read_lock();
42568898c21cSWanlong Gao 	virtnet_set_affinity(vi);
4257a0d1d0f4SSebastian Andrzej Siewior 	cpus_read_unlock();
425847be2479SWanlong Gao 
42593f9c10b0SAmit Shah 	return 0;
4260986a4f4dSJason Wang 
4261986a4f4dSJason Wang err_free:
4262986a4f4dSJason Wang 	virtnet_free_queues(vi);
4263986a4f4dSJason Wang err:
4264986a4f4dSJason Wang 	return ret;
42653f9c10b0SAmit Shah }
42663f9c10b0SAmit Shah 
4267fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS
4268fbf28d78SMichael Dalton static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
4269718ad681Sstephen hemminger 		char *buf)
4270fbf28d78SMichael Dalton {
4271fbf28d78SMichael Dalton 	struct virtnet_info *vi = netdev_priv(queue->dev);
4272fbf28d78SMichael Dalton 	unsigned int queue_index = get_netdev_rx_queue_index(queue);
42733cc81a9aSJason Wang 	unsigned int headroom = virtnet_get_headroom(vi);
42743cc81a9aSJason Wang 	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
42755377d758SJohannes Berg 	struct ewma_pkt_len *avg;
4276fbf28d78SMichael Dalton 
4277fbf28d78SMichael Dalton 	BUG_ON(queue_index >= vi->max_queue_pairs);
4278fbf28d78SMichael Dalton 	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
4279d85b758fSMichael S. Tsirkin 	return sprintf(buf, "%u\n",
42803cc81a9aSJason Wang 		       get_mergeable_buf_len(&vi->rq[queue_index], avg,
42813cc81a9aSJason Wang 				       SKB_DATA_ALIGN(headroom + tailroom)));
4282fbf28d78SMichael Dalton }
4283fbf28d78SMichael Dalton 
4284fbf28d78SMichael Dalton static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
4285fbf28d78SMichael Dalton 	__ATTR_RO(mergeable_rx_buffer_size);
4286fbf28d78SMichael Dalton 
4287fbf28d78SMichael Dalton static struct attribute *virtio_net_mrg_rx_attrs[] = {
4288fbf28d78SMichael Dalton 	&mergeable_rx_buffer_size_attribute.attr,
4289fbf28d78SMichael Dalton 	NULL
4290fbf28d78SMichael Dalton };
4291fbf28d78SMichael Dalton 
4292fbf28d78SMichael Dalton static const struct attribute_group virtio_net_mrg_rx_group = {
4293fbf28d78SMichael Dalton 	.name = "virtio_net",
4294fbf28d78SMichael Dalton 	.attrs = virtio_net_mrg_rx_attrs
4295fbf28d78SMichael Dalton };
4296fbf28d78SMichael Dalton #endif
4297fbf28d78SMichael Dalton 
4298892d6eb1SJason Wang static bool virtnet_fail_on_feature(struct virtio_device *vdev,
4299892d6eb1SJason Wang 				    unsigned int fbit,
4300892d6eb1SJason Wang 				    const char *fname, const char *dname)
4301892d6eb1SJason Wang {
4302892d6eb1SJason Wang 	if (!virtio_has_feature(vdev, fbit))
4303892d6eb1SJason Wang 		return false;
4304892d6eb1SJason Wang 
4305892d6eb1SJason Wang 	dev_err(&vdev->dev, "device advertises feature %s but not %s",
4306892d6eb1SJason Wang 		fname, dname);
4307892d6eb1SJason Wang 
4308892d6eb1SJason Wang 	return true;
4309892d6eb1SJason Wang }
4310892d6eb1SJason Wang 
4311892d6eb1SJason Wang #define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
4312892d6eb1SJason Wang 	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
4313892d6eb1SJason Wang 
4314892d6eb1SJason Wang static bool virtnet_validate_features(struct virtio_device *vdev)
4315892d6eb1SJason Wang {
4316892d6eb1SJason Wang 	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
4317892d6eb1SJason Wang 	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
4318892d6eb1SJason Wang 			     "VIRTIO_NET_F_CTRL_VQ") ||
4319892d6eb1SJason Wang 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
4320892d6eb1SJason Wang 			     "VIRTIO_NET_F_CTRL_VQ") ||
4321892d6eb1SJason Wang 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
4322892d6eb1SJason Wang 			     "VIRTIO_NET_F_CTRL_VQ") ||
4323892d6eb1SJason Wang 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
4324892d6eb1SJason Wang 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
4325c7114b12SAndrew Melnychenko 			     "VIRTIO_NET_F_CTRL_VQ") ||
4326c7114b12SAndrew Melnychenko 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
432791f41f01SAndrew Melnychenko 			     "VIRTIO_NET_F_CTRL_VQ") ||
432891f41f01SAndrew Melnychenko 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
4329699b045aSAlvaro Karsz 			     "VIRTIO_NET_F_CTRL_VQ") ||
4330699b045aSAlvaro Karsz 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
43318af3bf66SGavin Li 			     "VIRTIO_NET_F_CTRL_VQ") ||
43328af3bf66SGavin Li 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
4333892d6eb1SJason Wang 			     "VIRTIO_NET_F_CTRL_VQ"))) {
4334892d6eb1SJason Wang 		return false;
4335892d6eb1SJason Wang 	}
4336892d6eb1SJason Wang 
4337892d6eb1SJason Wang 	return true;
4338892d6eb1SJason Wang }
4339892d6eb1SJason Wang 
4340d0c2c997SJarod Wilson #define MIN_MTU ETH_MIN_MTU
4341d0c2c997SJarod Wilson #define MAX_MTU ETH_MAX_MTU
4342d0c2c997SJarod Wilson 
4343fe36cbe0SMichael S. Tsirkin static int virtnet_validate(struct virtio_device *vdev)
4344296f96fcSRusty Russell {
43456ba42248SMichael S. Tsirkin 	if (!vdev->config->get) {
43466ba42248SMichael S. Tsirkin 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
43476ba42248SMichael S. Tsirkin 			__func__);
43486ba42248SMichael S. Tsirkin 		return -EINVAL;
43496ba42248SMichael S. Tsirkin 	}
43506ba42248SMichael S. Tsirkin 
4351892d6eb1SJason Wang 	if (!virtnet_validate_features(vdev))
4352892d6eb1SJason Wang 		return -EINVAL;
4353892d6eb1SJason Wang 
4354fe36cbe0SMichael S. Tsirkin 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4355fe36cbe0SMichael S. Tsirkin 		int mtu = virtio_cread16(vdev,
4356fe36cbe0SMichael S. Tsirkin 					 offsetof(struct virtio_net_config,
4357fe36cbe0SMichael S. Tsirkin 						  mtu));
4358fe36cbe0SMichael S. Tsirkin 		if (mtu < MIN_MTU)
4359fe36cbe0SMichael S. Tsirkin 			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
4360fe36cbe0SMichael S. Tsirkin 	}
4361fe36cbe0SMichael S. Tsirkin 
43627c06458cSLaurent Vivier 	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
43637c06458cSLaurent Vivier 	    !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
43647c06458cSLaurent Vivier 		dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
43657c06458cSLaurent Vivier 		__virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
43667c06458cSLaurent Vivier 	}
43677c06458cSLaurent Vivier 
4368fe36cbe0SMichael S. Tsirkin 	return 0;
4369fe36cbe0SMichael S. Tsirkin }
4370fe36cbe0SMichael S. Tsirkin 
437146cd26f4SGavin Li static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
437246cd26f4SGavin Li {
437346cd26f4SGavin Li 	return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
437446cd26f4SGavin Li 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
437546cd26f4SGavin Li 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4376418044e1SAndrew Melnychenko 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4377418044e1SAndrew Melnychenko 		(virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4378418044e1SAndrew Melnychenko 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
437946cd26f4SGavin Li }
438046cd26f4SGavin Li 
43814959aebbSGavin Li static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
43824959aebbSGavin Li {
43834959aebbSGavin Li 	bool guest_gso = virtnet_check_guest_gso(vi);
43844959aebbSGavin Li 
43854959aebbSGavin Li 	/* If device can receive ANY guest GSO packets, regardless of mtu,
43864959aebbSGavin Li 	 * allocate packets of maximum size, otherwise limit it to only
43874959aebbSGavin Li 	 * mtu size worth only.
43884959aebbSGavin Li 	 */
43894959aebbSGavin Li 	if (mtu > ETH_DATA_LEN || guest_gso) {
43904959aebbSGavin Li 		vi->big_packets = true;
43914959aebbSGavin Li 		vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
43924959aebbSGavin Li 	}
43934959aebbSGavin Li }
43944959aebbSGavin Li 
4395fe36cbe0SMichael S. Tsirkin static int virtnet_probe(struct virtio_device *vdev)
4396fe36cbe0SMichael S. Tsirkin {
4397d7dfc5cfSToshiaki Makita 	int i, err = -ENOMEM;
4398fe36cbe0SMichael S. Tsirkin 	struct net_device *dev;
4399fe36cbe0SMichael S. Tsirkin 	struct virtnet_info *vi;
4400fe36cbe0SMichael S. Tsirkin 	u16 max_queue_pairs;
44014959aebbSGavin Li 	int mtu = 0;
4402fe36cbe0SMichael S. Tsirkin 
4403c7114b12SAndrew Melnychenko 	/* Find if host supports multiqueue/rss virtio_net device */
4404c7114b12SAndrew Melnychenko 	max_queue_pairs = 1;
4405c7114b12SAndrew Melnychenko 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4406c7114b12SAndrew Melnychenko 		max_queue_pairs =
4407c7114b12SAndrew Melnychenko 		     virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
4408986a4f4dSJason Wang 
4409986a4f4dSJason Wang 	/* We need at least 2 queue's */
4410c7114b12SAndrew Melnychenko 	if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
4411986a4f4dSJason Wang 	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
4412986a4f4dSJason Wang 	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4413986a4f4dSJason Wang 		max_queue_pairs = 1;
4414296f96fcSRusty Russell 
4415296f96fcSRusty Russell 	/* Allocate ourselves a network device with room for our info */
4416986a4f4dSJason Wang 	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
4417296f96fcSRusty Russell 	if (!dev)
4418296f96fcSRusty Russell 		return -ENOMEM;
4419296f96fcSRusty Russell 
4420296f96fcSRusty Russell 	/* Set up network device as normal. */
4421ab5bd583SXuan Zhuo 	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
4422ab5bd583SXuan Zhuo 			   IFF_TX_SKB_NO_LINEAR;
442376288b4eSStephen Hemminger 	dev->netdev_ops = &virtnet_netdev;
4424296f96fcSRusty Russell 	dev->features = NETIF_F_HIGHDMA;
44253fa2a1dfSstephen hemminger 
44267ad24ea4SWilfried Klaebe 	dev->ethtool_ops = &virtnet_ethtool_ops;
4427296f96fcSRusty Russell 	SET_NETDEV_DEV(dev, &vdev->dev);
4428296f96fcSRusty Russell 
4429296f96fcSRusty Russell 	/* Do we support "hardware" checksums? */
443098e778c9SMichał Mirosław 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
4431296f96fcSRusty Russell 		/* This opens up the world of extra features. */
443248900cb6SJason Wang 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
443398e778c9SMichał Mirosław 		if (csum)
443448900cb6SJason Wang 			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
443598e778c9SMichał Mirosław 
443698e778c9SMichał Mirosław 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
4437e078de03SDavid S. Miller 			dev->hw_features |= NETIF_F_TSO
443834a48579SRusty Russell 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
443934a48579SRusty Russell 		}
44405539ae96SRusty Russell 		/* Individual feature bits: what can host handle? */
444198e778c9SMichał Mirosław 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
444298e778c9SMichał Mirosław 			dev->hw_features |= NETIF_F_TSO;
444398e778c9SMichał Mirosław 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
444498e778c9SMichał Mirosław 			dev->hw_features |= NETIF_F_TSO6;
444598e778c9SMichał Mirosław 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
444698e778c9SMichał Mirosław 			dev->hw_features |= NETIF_F_TSO_ECN;
4447418044e1SAndrew Melnychenko 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
4448418044e1SAndrew Melnychenko 			dev->hw_features |= NETIF_F_GSO_UDP_L4;
444998e778c9SMichał Mirosław 
445041f2f127SJason Wang 		dev->features |= NETIF_F_GSO_ROBUST;
445141f2f127SJason Wang 
445298e778c9SMichał Mirosław 		if (gso)
4453e078de03SDavid S. Miller 			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
445498e778c9SMichał Mirosław 		/* (!csum && gso) case will be fixed by register_netdev() */
4455296f96fcSRusty Russell 	}
4456*ef609fd7SHeng Qi 
4457*ef609fd7SHeng Qi 	/* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
4458*ef609fd7SHeng Qi 	 * need to calculate checksums for partially checksummed packets,
4459*ef609fd7SHeng Qi 	 * as they're considered valid by the upper layer.
4460*ef609fd7SHeng Qi 	 * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
4461*ef609fd7SHeng Qi 	 * receives fully checksummed packets. The device may assist in
4462*ef609fd7SHeng Qi 	 * validating these packets' checksums, so the driver won't have to.
4463*ef609fd7SHeng Qi 	 */
44644f49129bSThomas Huth 	dev->features |= NETIF_F_RXCSUM;
4465*ef609fd7SHeng Qi 
4466a02e8964SWillem de Bruijn 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4467a02e8964SWillem de Bruijn 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
4468dbcf24d1SJason Wang 		dev->features |= NETIF_F_GRO_HW;
4469cf8691cbSMichael S. Tsirkin 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
4470dbcf24d1SJason Wang 		dev->hw_features |= NETIF_F_GRO_HW;
4471296f96fcSRusty Russell 
44724fda8302SJason Wang 	dev->vlan_features = dev->features;
447366c0e13aSMarek Majtyka 	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
44744fda8302SJason Wang 
4475d0c2c997SJarod Wilson 	/* MTU range: 68 - 65535 */
4476d0c2c997SJarod Wilson 	dev->min_mtu = MIN_MTU;
4477d0c2c997SJarod Wilson 	dev->max_mtu = MAX_MTU;
4478d0c2c997SJarod Wilson 
4479296f96fcSRusty Russell 	/* Configuration may specify what MAC to use.  Otherwise random. */
4480f2edaa4aSJakub Kicinski 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4481f2edaa4aSJakub Kicinski 		u8 addr[ETH_ALEN];
4482f2edaa4aSJakub Kicinski 
4483855e0c52SRusty Russell 		virtio_cread_bytes(vdev,
4484a586d4f6SRusty Russell 				   offsetof(struct virtio_net_config, mac),
4485f2edaa4aSJakub Kicinski 				   addr, ETH_ALEN);
4486f2edaa4aSJakub Kicinski 		eth_hw_addr_set(dev, addr);
4487f2edaa4aSJakub Kicinski 	} else {
4488f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
44899f62d221SLaurent Vivier 		dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
44909f62d221SLaurent Vivier 			 dev->dev_addr);
4491f2edaa4aSJakub Kicinski 	}
4492296f96fcSRusty Russell 
4493296f96fcSRusty Russell 	/* Set up our device-specific information */
4494296f96fcSRusty Russell 	vi = netdev_priv(dev);
4495296f96fcSRusty Russell 	vi->dev = dev;
4496296f96fcSRusty Russell 	vi->vdev = vdev;
4497d9d5dcc8SChristian Borntraeger 	vdev->priv = vi;
4498827da44cSJohn Stultz 
4499586d17c5SJason Wang 	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
45005a159128SJason Wang 	spin_lock_init(&vi->refill_lock);
4501296f96fcSRusty Russell 
450230bbf891SLorenzo Bianconi 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
45033f2c31d9SMark McLoughlin 		vi->mergeable_rx_bufs = true;
450430bbf891SLorenzo Bianconi 		dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
450530bbf891SLorenzo Bianconi 	}
45063f2c31d9SMark McLoughlin 
4507699b045aSAlvaro Karsz 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4508308d7982SGavin Li 		vi->intr_coal_rx.max_usecs = 0;
4509308d7982SGavin Li 		vi->intr_coal_tx.max_usecs = 0;
4510308d7982SGavin Li 		vi->intr_coal_tx.max_packets = 0;
4511308d7982SGavin Li 		vi->intr_coal_rx.max_packets = 0;
4512699b045aSAlvaro Karsz 	}
4513699b045aSAlvaro Karsz 
451491f41f01SAndrew Melnychenko 	if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
451591f41f01SAndrew Melnychenko 		vi->has_rss_hash_report = true;
451691f41f01SAndrew Melnychenko 
451743a71c1bSBreno Leitao 	if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
4518c7114b12SAndrew Melnychenko 		vi->has_rss = true;
451991f41f01SAndrew Melnychenko 
4520c7114b12SAndrew Melnychenko 		vi->rss_indir_table_size =
4521c7114b12SAndrew Melnychenko 			virtio_cread16(vdev, offsetof(struct virtio_net_config,
4522c7114b12SAndrew Melnychenko 				rss_max_indirection_table_length));
452343a71c1bSBreno Leitao 	}
452443a71c1bSBreno Leitao 
452543a71c1bSBreno Leitao 	if (vi->has_rss || vi->has_rss_hash_report) {
4526c7114b12SAndrew Melnychenko 		vi->rss_key_size =
4527c7114b12SAndrew Melnychenko 			virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
4528c7114b12SAndrew Melnychenko 
4529c7114b12SAndrew Melnychenko 		vi->rss_hash_types_supported =
4530c7114b12SAndrew Melnychenko 		    virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4531c7114b12SAndrew Melnychenko 		vi->rss_hash_types_supported &=
4532c7114b12SAndrew Melnychenko 				~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4533c7114b12SAndrew Melnychenko 				  VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4534c7114b12SAndrew Melnychenko 				  VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4535c7114b12SAndrew Melnychenko 
4536c7114b12SAndrew Melnychenko 		dev->hw_features |= NETIF_F_RXHASH;
4537c7114b12SAndrew Melnychenko 	}
453891f41f01SAndrew Melnychenko 
453991f41f01SAndrew Melnychenko 	if (vi->has_rss_hash_report)
454091f41f01SAndrew Melnychenko 		vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
454191f41f01SAndrew Melnychenko 	else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4542d04302b3SMichael S. Tsirkin 		 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4543012873d0SMichael S. Tsirkin 		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4544012873d0SMichael S. Tsirkin 	else
4545012873d0SMichael S. Tsirkin 		vi->hdr_len = sizeof(struct virtio_net_hdr);
4546012873d0SMichael S. Tsirkin 
454775993300SMichael S. Tsirkin 	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
454875993300SMichael S. Tsirkin 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4549e7428e95SMichael S. Tsirkin 		vi->any_header_sg = true;
4550e7428e95SMichael S. Tsirkin 
4551986a4f4dSJason Wang 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4552986a4f4dSJason Wang 		vi->has_cvq = true;
4553986a4f4dSJason Wang 
455414de9d11SAaron Conole 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
455514de9d11SAaron Conole 		mtu = virtio_cread16(vdev,
455614de9d11SAaron Conole 				     offsetof(struct virtio_net_config,
455714de9d11SAaron Conole 					      mtu));
455893a205eeSAaron Conole 		if (mtu < dev->min_mtu) {
4559fe36cbe0SMichael S. Tsirkin 			/* Should never trigger: MTU was previously validated
4560fe36cbe0SMichael S. Tsirkin 			 * in virtnet_validate.
4561fe36cbe0SMichael S. Tsirkin 			 */
45627934b481SYuval Shaia 			dev_err(&vdev->dev,
45637934b481SYuval Shaia 				"device MTU appears to have changed it is now %d < %d",
45647934b481SYuval Shaia 				mtu, dev->min_mtu);
4565411ea23aSDan Carpenter 			err = -EINVAL;
4566d7dfc5cfSToshiaki Makita 			goto free;
4567fe36cbe0SMichael S. Tsirkin 		}
4568fe36cbe0SMichael S. Tsirkin 
4569d0c2c997SJarod Wilson 		dev->mtu = mtu;
457093a205eeSAaron Conole 		dev->max_mtu = mtu;
457114de9d11SAaron Conole 	}
457214de9d11SAaron Conole 
45734959aebbSGavin Li 	virtnet_set_big_packets(vi, mtu);
45744959aebbSGavin Li 
4575012873d0SMichael S. Tsirkin 	if (vi->any_header_sg)
4576012873d0SMichael S. Tsirkin 		dev->needed_headroom = vi->hdr_len;
45776ebbc1a6SZhangjie \(HZ\) 
457844900010SJason Wang 	/* Enable multiqueue by default */
457944900010SJason Wang 	if (num_online_cpus() >= max_queue_pairs)
458044900010SJason Wang 		vi->curr_queue_pairs = max_queue_pairs;
458144900010SJason Wang 	else
458244900010SJason Wang 		vi->curr_queue_pairs = num_online_cpus();
4583986a4f4dSJason Wang 	vi->max_queue_pairs = max_queue_pairs;
4584986a4f4dSJason Wang 
4585986a4f4dSJason Wang 	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
45863f9c10b0SAmit Shah 	err = init_vqs(vi);
4587d2a7dddaSMichael S. Tsirkin 	if (err)
4588d7dfc5cfSToshiaki Makita 		goto free;
4589d2a7dddaSMichael S. Tsirkin 
4590fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS
4591fbf28d78SMichael Dalton 	if (vi->mergeable_rx_bufs)
4592fbf28d78SMichael Dalton 		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4593fbf28d78SMichael Dalton #endif
45940f13b66bSZhi Yong Wu 	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
45950f13b66bSZhi Yong Wu 	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4596986a4f4dSJason Wang 
45972e9ca760SMichael S. Tsirkin 	virtnet_init_settings(dev);
45982e9ca760SMichael S. Tsirkin 
4599ba5e4426SSridhar Samudrala 	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4600ba5e4426SSridhar Samudrala 		vi->failover = net_failover_create(vi->dev);
46014b8e6ac4SWei Yongjun 		if (IS_ERR(vi->failover)) {
46024b8e6ac4SWei Yongjun 			err = PTR_ERR(vi->failover);
4603ba5e4426SSridhar Samudrala 			goto free_vqs;
4604ba5e4426SSridhar Samudrala 		}
46054b8e6ac4SWei Yongjun 	}
4606ba5e4426SSridhar Samudrala 
460791f41f01SAndrew Melnychenko 	if (vi->has_rss || vi->has_rss_hash_report)
4608c7114b12SAndrew Melnychenko 		virtnet_init_default_rss(vi);
4609c7114b12SAndrew Melnychenko 
461050c0ada6SJason Wang 	/* serialize netdev register + virtio_device_ready() with ndo_open() */
461150c0ada6SJason Wang 	rtnl_lock();
461250c0ada6SJason Wang 
461350c0ada6SJason Wang 	err = register_netdevice(dev);
4614296f96fcSRusty Russell 	if (err) {
4615296f96fcSRusty Russell 		pr_debug("virtio_net: registering device failed\n");
461650c0ada6SJason Wang 		rtnl_unlock();
4617ba5e4426SSridhar Samudrala 		goto free_failover;
4618296f96fcSRusty Russell 	}
4619b3369c1fSRusty Russell 
46204baf1e33SMichael S. Tsirkin 	virtio_device_ready(vdev);
46214baf1e33SMichael S. Tsirkin 
462251b81317SJason Wang 	_virtnet_set_queues(vi, vi->curr_queue_pairs);
462351b81317SJason Wang 
46249f62d221SLaurent Vivier 	/* a random MAC address has been assigned, notify the device.
46259f62d221SLaurent Vivier 	 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
46269f62d221SLaurent Vivier 	 * because many devices work fine without getting MAC explicitly
46279f62d221SLaurent Vivier 	 */
46289f62d221SLaurent Vivier 	if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
46299f62d221SLaurent Vivier 	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
46309f62d221SLaurent Vivier 		struct scatterlist sg;
46319f62d221SLaurent Vivier 
46329f62d221SLaurent Vivier 		sg_init_one(&sg, dev->dev_addr, dev->addr_len);
46339f62d221SLaurent Vivier 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
46349f62d221SLaurent Vivier 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
46359f62d221SLaurent Vivier 			pr_debug("virtio_net: setting MAC address failed\n");
46369f62d221SLaurent Vivier 			rtnl_unlock();
46379f62d221SLaurent Vivier 			err = -EINVAL;
46389f62d221SLaurent Vivier 			goto free_unregister_netdev;
46399f62d221SLaurent Vivier 		}
46409f62d221SLaurent Vivier 	}
46419f62d221SLaurent Vivier 
464250c0ada6SJason Wang 	rtnl_unlock();
464350c0ada6SJason Wang 
46448017c279SSebastian Andrzej Siewior 	err = virtnet_cpu_notif_add(vi);
46458de4b2f3SWanlong Gao 	if (err) {
46468de4b2f3SWanlong Gao 		pr_debug("virtio_net: registering cpu notifier failed\n");
4647f00e35e2Swangyunjian 		goto free_unregister_netdev;
46488de4b2f3SWanlong Gao 	}
46498de4b2f3SWanlong Gao 
4650167c25e4SJason Wang 	/* Assume link up if device can't report link status,
4651167c25e4SJason Wang 	   otherwise get link status from config. */
4652167c25e4SJason Wang 	netif_carrier_off(dev);
4653bda7fab5SJay Vosburgh 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
46543b07e9caSTejun Heo 		schedule_work(&vi->config_work);
4655167c25e4SJason Wang 	} else {
4656167c25e4SJason Wang 		vi->status = VIRTIO_NET_S_LINK_UP;
4657faa9b39fSJason Baron 		virtnet_update_settings(vi);
46584783256eSPantelis Koukousoulas 		netif_carrier_on(dev);
4659167c25e4SJason Wang 	}
46609f4d26d0SMark McLoughlin 
46613f93522fSJason Wang 	for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
46623f93522fSJason Wang 		if (virtio_has_feature(vi->vdev, guest_offloads[i]))
46633f93522fSJason Wang 			set_bit(guest_offloads[i], &vi->guest_offloads);
4664a02e8964SWillem de Bruijn 	vi->guest_offloads_capable = vi->guest_offloads;
46653f93522fSJason Wang 
4666986a4f4dSJason Wang 	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4667986a4f4dSJason Wang 		 dev->name, max_queue_pairs);
4668986a4f4dSJason Wang 
4669296f96fcSRusty Russell 	return 0;
4670296f96fcSRusty Russell 
4671f00e35e2Swangyunjian free_unregister_netdev:
4672b3369c1fSRusty Russell 	unregister_netdev(dev);
4673ba5e4426SSridhar Samudrala free_failover:
4674ba5e4426SSridhar Samudrala 	net_failover_destroy(vi->failover);
4675d2a7dddaSMichael S. Tsirkin free_vqs:
4676b0686565SLi Zetao 	virtio_reset_device(vdev);
4677986a4f4dSJason Wang 	cancel_delayed_work_sync(&vi->refill);
4678fb51879dSMichael Dalton 	free_receive_page_frags(vi);
4679e9d7417bSJason Wang 	virtnet_del_vqs(vi);
4680296f96fcSRusty Russell free:
4681296f96fcSRusty Russell 	free_netdev(dev);
4682296f96fcSRusty Russell 	return err;
4683296f96fcSRusty Russell }
4684296f96fcSRusty Russell 
468504486ed0SAmit Shah static void remove_vq_common(struct virtnet_info *vi)
4686296f96fcSRusty Russell {
4687d9679d00SMichael S. Tsirkin 	virtio_reset_device(vi->vdev);
4688830a8a97SShirley Ma 
4689830a8a97SShirley Ma 	/* Free unused buffers in both send and recv, if any. */
46909ab86bbcSShirley Ma 	free_unused_bufs(vi);
4691fb6813f4SRusty Russell 
4692986a4f4dSJason Wang 	free_receive_bufs(vi);
4693d2a7dddaSMichael S. Tsirkin 
4694fb51879dSMichael Dalton 	free_receive_page_frags(vi);
4695fb51879dSMichael Dalton 
4696986a4f4dSJason Wang 	virtnet_del_vqs(vi);
469704486ed0SAmit Shah }
469804486ed0SAmit Shah 
46998cc085d6SBill Pemberton static void virtnet_remove(struct virtio_device *vdev)
470004486ed0SAmit Shah {
470104486ed0SAmit Shah 	struct virtnet_info *vi = vdev->priv;
470204486ed0SAmit Shah 
47038017c279SSebastian Andrzej Siewior 	virtnet_cpu_notif_remove(vi);
47048de4b2f3SWanlong Gao 
4705102a2786SMichael S. Tsirkin 	/* Make sure no work handler is accessing the device. */
4706102a2786SMichael S. Tsirkin 	flush_work(&vi->config_work);
4707586d17c5SJason Wang 
470804486ed0SAmit Shah 	unregister_netdev(vi->dev);
470904486ed0SAmit Shah 
4710ba5e4426SSridhar Samudrala 	net_failover_destroy(vi->failover);
4711ba5e4426SSridhar Samudrala 
471204486ed0SAmit Shah 	remove_vq_common(vi);
4713fb6813f4SRusty Russell 
471474b2553fSRusty Russell 	free_netdev(vi->dev);
4715296f96fcSRusty Russell }
4716296f96fcSRusty Russell 
471767a75194SArnd Bergmann static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
47180741bcb5SAmit Shah {
47190741bcb5SAmit Shah 	struct virtnet_info *vi = vdev->priv;
47200741bcb5SAmit Shah 
47218017c279SSebastian Andrzej Siewior 	virtnet_cpu_notif_remove(vi);
47229fe7bfceSJohn Fastabend 	virtnet_freeze_down(vdev);
47230741bcb5SAmit Shah 	remove_vq_common(vi);
47240741bcb5SAmit Shah 
47250741bcb5SAmit Shah 	return 0;
47260741bcb5SAmit Shah }
47270741bcb5SAmit Shah 
472867a75194SArnd Bergmann static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
47290741bcb5SAmit Shah {
47300741bcb5SAmit Shah 	struct virtnet_info *vi = vdev->priv;
47319fe7bfceSJohn Fastabend 	int err;
47320741bcb5SAmit Shah 
47339fe7bfceSJohn Fastabend 	err = virtnet_restore_up(vdev);
47340741bcb5SAmit Shah 	if (err)
47350741bcb5SAmit Shah 		return err;
4736986a4f4dSJason Wang 	virtnet_set_queues(vi, vi->curr_queue_pairs);
4737986a4f4dSJason Wang 
47388017c279SSebastian Andrzej Siewior 	err = virtnet_cpu_notif_add(vi);
47393f2869caSXie Yongji 	if (err) {
47403f2869caSXie Yongji 		virtnet_freeze_down(vdev);
47413f2869caSXie Yongji 		remove_vq_common(vi);
4742ec9debbdSJason Wang 		return err;
47433f2869caSXie Yongji 	}
4744ec9debbdSJason Wang 
47450741bcb5SAmit Shah 	return 0;
47460741bcb5SAmit Shah }
47470741bcb5SAmit Shah 
4748296f96fcSRusty Russell static struct virtio_device_id id_table[] = {
4749296f96fcSRusty Russell 	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4750296f96fcSRusty Russell 	{ 0 },
4751296f96fcSRusty Russell };
4752296f96fcSRusty Russell 
4753f3358507SMichael S. Tsirkin #define VIRTNET_FEATURES \
4754f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4755f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_MAC, \
4756f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4757f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4758f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4759418044e1SAndrew Melnychenko 	VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4760f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4761f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4762f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4763f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_CTRL_MAC_ADDR, \
4764faa9b39fSJason Baron 	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4765c7114b12SAndrew Melnychenko 	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4766be50da3eSJiri Pirko 	VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
47678af3bf66SGavin Li 	VIRTIO_NET_F_VQ_NOTF_COAL, \
4768be50da3eSJiri Pirko 	VIRTIO_NET_F_GUEST_HDRLEN
4769f3358507SMichael S. Tsirkin 
4770c45a6816SRusty Russell static unsigned int features[] = {
4771f3358507SMichael S. Tsirkin 	VIRTNET_FEATURES,
4772f3358507SMichael S. Tsirkin };
4773f3358507SMichael S. Tsirkin 
4774f3358507SMichael S. Tsirkin static unsigned int features_legacy[] = {
4775f3358507SMichael S. Tsirkin 	VIRTNET_FEATURES,
4776f3358507SMichael S. Tsirkin 	VIRTIO_NET_F_GSO,
4777e7428e95SMichael S. Tsirkin 	VIRTIO_F_ANY_LAYOUT,
4778c45a6816SRusty Russell };
4779c45a6816SRusty Russell 
478022402529SUwe Kleine-König static struct virtio_driver virtio_net_driver = {
4781c45a6816SRusty Russell 	.feature_table = features,
4782c45a6816SRusty Russell 	.feature_table_size = ARRAY_SIZE(features),
4783f3358507SMichael S. Tsirkin 	.feature_table_legacy = features_legacy,
4784f3358507SMichael S. Tsirkin 	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
4785296f96fcSRusty Russell 	.driver.name =	KBUILD_MODNAME,
4786296f96fcSRusty Russell 	.driver.owner =	THIS_MODULE,
4787296f96fcSRusty Russell 	.id_table =	id_table,
4788fe36cbe0SMichael S. Tsirkin 	.validate =	virtnet_validate,
4789296f96fcSRusty Russell 	.probe =	virtnet_probe,
47908cc085d6SBill Pemberton 	.remove =	virtnet_remove,
47919f4d26d0SMark McLoughlin 	.config_changed = virtnet_config_changed,
479289107000SAaron Lu #ifdef CONFIG_PM_SLEEP
47930741bcb5SAmit Shah 	.freeze =	virtnet_freeze,
47940741bcb5SAmit Shah 	.restore =	virtnet_restore,
47950741bcb5SAmit Shah #endif
4796296f96fcSRusty Russell };
4797296f96fcSRusty Russell 
47988017c279SSebastian Andrzej Siewior static __init int virtio_net_driver_init(void)
47998017c279SSebastian Andrzej Siewior {
48008017c279SSebastian Andrzej Siewior 	int ret;
48018017c279SSebastian Andrzej Siewior 
480273c1b41eSThomas Gleixner 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
48038017c279SSebastian Andrzej Siewior 				      virtnet_cpu_online,
48048017c279SSebastian Andrzej Siewior 				      virtnet_cpu_down_prep);
48058017c279SSebastian Andrzej Siewior 	if (ret < 0)
48068017c279SSebastian Andrzej Siewior 		goto out;
48078017c279SSebastian Andrzej Siewior 	virtionet_online = ret;
480873c1b41eSThomas Gleixner 	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
48098017c279SSebastian Andrzej Siewior 				      NULL, virtnet_cpu_dead);
48108017c279SSebastian Andrzej Siewior 	if (ret)
48118017c279SSebastian Andrzej Siewior 		goto err_dead;
48128017c279SSebastian Andrzej Siewior 	ret = register_virtio_driver(&virtio_net_driver);
48138017c279SSebastian Andrzej Siewior 	if (ret)
48148017c279SSebastian Andrzej Siewior 		goto err_virtio;
48158017c279SSebastian Andrzej Siewior 	return 0;
48168017c279SSebastian Andrzej Siewior err_virtio:
48178017c279SSebastian Andrzej Siewior 	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
48188017c279SSebastian Andrzej Siewior err_dead:
48198017c279SSebastian Andrzej Siewior 	cpuhp_remove_multi_state(virtionet_online);
48208017c279SSebastian Andrzej Siewior out:
48218017c279SSebastian Andrzej Siewior 	return ret;
48228017c279SSebastian Andrzej Siewior }
48238017c279SSebastian Andrzej Siewior module_init(virtio_net_driver_init);
48248017c279SSebastian Andrzej Siewior 
48258017c279SSebastian Andrzej Siewior static __exit void virtio_net_driver_exit(void)
48268017c279SSebastian Andrzej Siewior {
4827cfa0ebc9SAndrew Jones 	unregister_virtio_driver(&virtio_net_driver);
48288017c279SSebastian Andrzej Siewior 	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
48298017c279SSebastian Andrzej Siewior 	cpuhp_remove_multi_state(virtionet_online);
48308017c279SSebastian Andrzej Siewior }
48318017c279SSebastian Andrzej Siewior module_exit(virtio_net_driver_exit);
4832296f96fcSRusty Russell 
4833296f96fcSRusty Russell MODULE_DEVICE_TABLE(virtio, id_table);
4834296f96fcSRusty Russell MODULE_DESCRIPTION("Virtio network driver");
4835296f96fcSRusty Russell MODULE_LICENSE("GPL");
4836