11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
248925e37SRusty Russell /* A network driver using virtio.
3296f96fcSRusty Russell *
4296f96fcSRusty Russell * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5296f96fcSRusty Russell */
6296f96fcSRusty Russell //#define DEBUG
7296f96fcSRusty Russell #include <linux/netdevice.h>
8296f96fcSRusty Russell #include <linux/etherdevice.h>
9a9ea3fc6SHerbert Xu #include <linux/ethtool.h>
10296f96fcSRusty Russell #include <linux/module.h>
11296f96fcSRusty Russell #include <linux/virtio.h>
12296f96fcSRusty Russell #include <linux/virtio_net.h>
13f600b690SJohn Fastabend #include <linux/bpf.h>
14a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h>
15296f96fcSRusty Russell #include <linux/scatterlist.h>
16e918085aSAlex Williamson #include <linux/if_vlan.h>
175a0e3ad6STejun Heo #include <linux/slab.h>
188de4b2f3SWanlong Gao #include <linux/cpu.h>
19ab7db917SMichael Dalton #include <linux/average.h>
20186b3c99SJason Wang #include <linux/filter.h>
212ca653d6SCaleb Raitto #include <linux/kernel.h>
22d85b758fSMichael S. Tsirkin #include <net/route.h>
23754b8a21SJesper Dangaard Brouer #include <net/xdp.h>
24ba5e4426SSridhar Samudrala #include <net/net_failover.h>
2549e47a5bSJakub Kicinski #include <net/netdev_rx_queue.h>
26296f96fcSRusty Russell
27d34710e3SAmerigo Wang static int napi_weight = NAPI_POLL_WEIGHT;
286c0cd7c0SDor Laor module_param(napi_weight, int, 0444);
296c0cd7c0SDor Laor
3031c03aefSWillem de Bruijn static bool csum = true, gso = true, napi_tx = true;
3134a48579SRusty Russell module_param(csum, bool, 0444);
3234a48579SRusty Russell module_param(gso, bool, 0444);
33b92f1e67SWillem de Bruijn module_param(napi_tx, bool, 0644);
3434a48579SRusty Russell
35296f96fcSRusty Russell /* FIXME: MTU in config. */
365061de36SMichael Dalton #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
373f2c31d9SMark McLoughlin #define GOOD_COPY_LEN 128
38296f96fcSRusty Russell
39f6b10209SJason Wang #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
40f6b10209SJason Wang
412de2f7f4SJohn Fastabend /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
422de2f7f4SJohn Fastabend #define VIRTIO_XDP_HEADROOM 256
432de2f7f4SJohn Fastabend
442471c75eSJesper Dangaard Brouer /* Separating two types of XDP xmit */
452471c75eSJesper Dangaard Brouer #define VIRTIO_XDP_TX BIT(0)
462471c75eSJesper Dangaard Brouer #define VIRTIO_XDP_REDIR BIT(1)
472471c75eSJesper Dangaard Brouer
485050471dSToshiaki Makita #define VIRTIO_XDP_FLAG BIT(0)
495050471dSToshiaki Makita
505377d758SJohannes Berg /* RX packet size EWMA. The average packet size is used to determine the packet
515377d758SJohannes Berg * buffer size when refilling RX rings. As the entire RX ring may be refilled
525377d758SJohannes Berg * at once, the weight is chosen so that the EWMA will be insensitive to short-
535377d758SJohannes Berg * term, transient changes in packet size.
54ab7db917SMichael Dalton */
55eb1e011aSJohannes Berg DECLARE_EWMA(pkt_len, 0, 64)
56ab7db917SMichael Dalton
5766846048SRick Jones #define VIRTNET_DRIVER_VERSION "1.0.0"
582a41f71dSAlex Williamson
597acd4329SColin Ian King static const unsigned long guest_offloads[] = {
607acd4329SColin Ian King VIRTIO_NET_F_GUEST_TSO4,
613f93522fSJason Wang VIRTIO_NET_F_GUEST_TSO6,
623f93522fSJason Wang VIRTIO_NET_F_GUEST_ECN,
63e59ff2c4SJason Wang VIRTIO_NET_F_GUEST_UFO,
64418044e1SAndrew Melnychenko VIRTIO_NET_F_GUEST_CSUM,
65418044e1SAndrew Melnychenko VIRTIO_NET_F_GUEST_USO4,
66be50da3eSJiri Pirko VIRTIO_NET_F_GUEST_USO6,
67be50da3eSJiri Pirko VIRTIO_NET_F_GUEST_HDRLEN
687acd4329SColin Ian King };
693f93522fSJason Wang
70dbcf24d1SJason Wang #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
711a03b8a3STonghao Zhang (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
721a03b8a3STonghao Zhang (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
73418044e1SAndrew Melnychenko (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
74418044e1SAndrew Melnychenko (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
75418044e1SAndrew Melnychenko (1ULL << VIRTIO_NET_F_GUEST_USO6))
761a03b8a3STonghao Zhang
77d7dfc5cfSToshiaki Makita struct virtnet_stat_desc {
78d7dfc5cfSToshiaki Makita char desc[ETH_GSTRING_LEN];
79d7dfc5cfSToshiaki Makita size_t offset;
803fa2a1dfSstephen hemminger };
813fa2a1dfSstephen hemminger
82d7dfc5cfSToshiaki Makita struct virtnet_sq_stats {
83d7dfc5cfSToshiaki Makita struct u64_stats_sync syncp;
8427debe3eSEric Dumazet u64_stats_t packets;
8527debe3eSEric Dumazet u64_stats_t bytes;
8627debe3eSEric Dumazet u64_stats_t xdp_tx;
8727debe3eSEric Dumazet u64_stats_t xdp_tx_drops;
8827debe3eSEric Dumazet u64_stats_t kicks;
8927debe3eSEric Dumazet u64_stats_t tx_timeouts;
90d7dfc5cfSToshiaki Makita };
91d7dfc5cfSToshiaki Makita
92d46eeeafSJason Wang struct virtnet_rq_stats {
93d46eeeafSJason Wang struct u64_stats_sync syncp;
9427debe3eSEric Dumazet u64_stats_t packets;
9527debe3eSEric Dumazet u64_stats_t bytes;
9627debe3eSEric Dumazet u64_stats_t drops;
9727debe3eSEric Dumazet u64_stats_t xdp_packets;
9827debe3eSEric Dumazet u64_stats_t xdp_tx;
9927debe3eSEric Dumazet u64_stats_t xdp_redirects;
10027debe3eSEric Dumazet u64_stats_t xdp_drops;
10127debe3eSEric Dumazet u64_stats_t kicks;
102d7dfc5cfSToshiaki Makita };
103d7dfc5cfSToshiaki Makita
104d7dfc5cfSToshiaki Makita #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
105d46eeeafSJason Wang #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
106d7dfc5cfSToshiaki Makita
107d7dfc5cfSToshiaki Makita static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
108d7dfc5cfSToshiaki Makita { "packets", VIRTNET_SQ_STAT(packets) },
109d7dfc5cfSToshiaki Makita { "bytes", VIRTNET_SQ_STAT(bytes) },
1105b8f3c8dSToshiaki Makita { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
1115b8f3c8dSToshiaki Makita { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
112461f03dcSToshiaki Makita { "kicks", VIRTNET_SQ_STAT(kicks) },
113a520794bSTony Lu { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) },
114d7dfc5cfSToshiaki Makita };
115d7dfc5cfSToshiaki Makita
116d7dfc5cfSToshiaki Makita static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
117d7dfc5cfSToshiaki Makita { "packets", VIRTNET_RQ_STAT(packets) },
118d7dfc5cfSToshiaki Makita { "bytes", VIRTNET_RQ_STAT(bytes) },
1192c4a2f7dSToshiaki Makita { "drops", VIRTNET_RQ_STAT(drops) },
1205b8f3c8dSToshiaki Makita { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
1215b8f3c8dSToshiaki Makita { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
1225b8f3c8dSToshiaki Makita { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
1235b8f3c8dSToshiaki Makita { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
124461f03dcSToshiaki Makita { "kicks", VIRTNET_RQ_STAT(kicks) },
125d7dfc5cfSToshiaki Makita };
126d7dfc5cfSToshiaki Makita
127d7dfc5cfSToshiaki Makita #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
128d7dfc5cfSToshiaki Makita #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
129d7dfc5cfSToshiaki Makita
130308d7982SGavin Li struct virtnet_interrupt_coalesce {
131308d7982SGavin Li u32 max_packets;
132308d7982SGavin Li u32 max_usecs;
133308d7982SGavin Li };
134308d7982SGavin Li
135295525e2SXuan Zhuo /* The dma information of pages allocated at a time. */
136295525e2SXuan Zhuo struct virtnet_rq_dma {
137295525e2SXuan Zhuo dma_addr_t addr;
138295525e2SXuan Zhuo u32 ref;
139295525e2SXuan Zhuo u16 len;
140295525e2SXuan Zhuo u16 need_sync;
141295525e2SXuan Zhuo };
142295525e2SXuan Zhuo
143e9d7417bSJason Wang /* Internal representation of a send virtqueue */
144e9d7417bSJason Wang struct send_queue {
145e9d7417bSJason Wang /* Virtqueue associated with this send _queue */
146e9d7417bSJason Wang struct virtqueue *vq;
147e9d7417bSJason Wang
148e9d7417bSJason Wang /* TX: fragments + linear part + virtio header */
149e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2];
150986a4f4dSJason Wang
151986a4f4dSJason Wang /* Name of the send queue: output.$index */
152d0671115SParav Pandit char name[16];
153b92f1e67SWillem de Bruijn
154d7dfc5cfSToshiaki Makita struct virtnet_sq_stats stats;
155d7dfc5cfSToshiaki Makita
156394bd877SGavin Li struct virtnet_interrupt_coalesce intr_coal;
157394bd877SGavin Li
158b92f1e67SWillem de Bruijn struct napi_struct napi;
159ebcce492SXuan Zhuo
160ebcce492SXuan Zhuo /* Record whether sq is in reset state. */
161ebcce492SXuan Zhuo bool reset;
162e9d7417bSJason Wang };
163e9d7417bSJason Wang
164e9d7417bSJason Wang /* Internal representation of a receive virtqueue */
165e9d7417bSJason Wang struct receive_queue {
166e9d7417bSJason Wang /* Virtqueue associated with this receive_queue */
167e9d7417bSJason Wang struct virtqueue *vq;
168e9d7417bSJason Wang
169296f96fcSRusty Russell struct napi_struct napi;
170296f96fcSRusty Russell
171f600b690SJohn Fastabend struct bpf_prog __rcu *xdp_prog;
172f600b690SJohn Fastabend
173d7dfc5cfSToshiaki Makita struct virtnet_rq_stats stats;
174d7dfc5cfSToshiaki Makita
175394bd877SGavin Li struct virtnet_interrupt_coalesce intr_coal;
176394bd877SGavin Li
177e9d7417bSJason Wang /* Chain pages by the private ptr. */
178e9d7417bSJason Wang struct page *pages;
179e9d7417bSJason Wang
180ab7db917SMichael Dalton /* Average packet length for mergeable receive buffers. */
1815377d758SJohannes Berg struct ewma_pkt_len mrg_avg_pkt_len;
182ab7db917SMichael Dalton
183fb51879dSMichael Dalton /* Page frag for packet buffer allocation. */
184fb51879dSMichael Dalton struct page_frag alloc_frag;
185fb51879dSMichael Dalton
186e9d7417bSJason Wang /* RX: fragments + linear part + virtio header */
187e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2];
188986a4f4dSJason Wang
189d85b758fSMichael S. Tsirkin /* Min single buffer size for mergeable buffers case. */
190d85b758fSMichael S. Tsirkin unsigned int min_buf_len;
191d85b758fSMichael S. Tsirkin
192986a4f4dSJason Wang /* Name of this receive queue: input.$index */
193d0671115SParav Pandit char name[16];
194754b8a21SJesper Dangaard Brouer
195754b8a21SJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq;
196295525e2SXuan Zhuo
197295525e2SXuan Zhuo /* Record the last dma info to free after new pages is allocated. */
198295525e2SXuan Zhuo struct virtnet_rq_dma *last_dma;
199295525e2SXuan Zhuo
200295525e2SXuan Zhuo /* Do dma by self */
201295525e2SXuan Zhuo bool do_dma;
202e9d7417bSJason Wang };
203e9d7417bSJason Wang
204c7114b12SAndrew Melnychenko /* This structure can contain rss message with maximum settings for indirection table and keysize
205c7114b12SAndrew Melnychenko * Note, that default structure that describes RSS configuration virtio_net_rss_config
206c7114b12SAndrew Melnychenko * contains same info but can't handle table values.
207c7114b12SAndrew Melnychenko * In any case, structure would be passed to virtio hw through sg_buf split by parts
208c7114b12SAndrew Melnychenko * because table sizes may be differ according to the device configuration.
209c7114b12SAndrew Melnychenko */
210c7114b12SAndrew Melnychenko #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
211c7114b12SAndrew Melnychenko #define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
212c7114b12SAndrew Melnychenko struct virtio_net_ctrl_rss {
213c7114b12SAndrew Melnychenko u32 hash_types;
214c7114b12SAndrew Melnychenko u16 indirection_table_mask;
215c7114b12SAndrew Melnychenko u16 unclassified_queue;
216c7114b12SAndrew Melnychenko u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
217c7114b12SAndrew Melnychenko u16 max_tx_vq;
218c7114b12SAndrew Melnychenko u8 hash_key_length;
219c7114b12SAndrew Melnychenko u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
220c7114b12SAndrew Melnychenko };
221c7114b12SAndrew Melnychenko
22212e57169SMichael S. Tsirkin /* Control VQ buffers: protected by the rtnl lock */
22312e57169SMichael S. Tsirkin struct control_buf {
22412e57169SMichael S. Tsirkin struct virtio_net_ctrl_hdr hdr;
22512e57169SMichael S. Tsirkin virtio_net_ctrl_ack status;
22612e57169SMichael S. Tsirkin struct virtio_net_ctrl_mq mq;
22712e57169SMichael S. Tsirkin u8 promisc;
22812e57169SMichael S. Tsirkin u8 allmulti;
229d7fad4c8SMichael S. Tsirkin __virtio16 vid;
230f4ee703aSMichael S. Tsirkin __virtio64 offloads;
231c7114b12SAndrew Melnychenko struct virtio_net_ctrl_rss rss;
232accc1bf2SBrett Creeley struct virtio_net_ctrl_coal_tx coal_tx;
233accc1bf2SBrett Creeley struct virtio_net_ctrl_coal_rx coal_rx;
234394bd877SGavin Li struct virtio_net_ctrl_coal_vq coal_vq;
23512e57169SMichael S. Tsirkin };
23612e57169SMichael S. Tsirkin
237e9d7417bSJason Wang struct virtnet_info {
238e9d7417bSJason Wang struct virtio_device *vdev;
239e9d7417bSJason Wang struct virtqueue *cvq;
240e9d7417bSJason Wang struct net_device *dev;
241986a4f4dSJason Wang struct send_queue *sq;
242986a4f4dSJason Wang struct receive_queue *rq;
243e9d7417bSJason Wang unsigned int status;
244e9d7417bSJason Wang
245986a4f4dSJason Wang /* Max # of queue pairs supported by the device */
246986a4f4dSJason Wang u16 max_queue_pairs;
247986a4f4dSJason Wang
248986a4f4dSJason Wang /* # of queue pairs currently used by the driver */
249986a4f4dSJason Wang u16 curr_queue_pairs;
250986a4f4dSJason Wang
251672aafd5SJohn Fastabend /* # of XDP queue pairs currently used by the driver */
252672aafd5SJohn Fastabend u16 xdp_queue_pairs;
253672aafd5SJohn Fastabend
25497c2c69eSXuan Zhuo /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
25597c2c69eSXuan Zhuo bool xdp_enabled;
25697c2c69eSXuan Zhuo
25797402b96SHerbert Xu /* I like... big packets and I cannot lie! */
25897402b96SHerbert Xu bool big_packets;
25997402b96SHerbert Xu
2604959aebbSGavin Li /* number of sg entries allocated for big packets */
2614959aebbSGavin Li unsigned int big_packets_num_skbfrags;
2624959aebbSGavin Li
2633f2c31d9SMark McLoughlin /* Host will merge rx buffers for big packets (shake it! shake it!) */
2643f2c31d9SMark McLoughlin bool mergeable_rx_bufs;
2653f2c31d9SMark McLoughlin
266c7114b12SAndrew Melnychenko /* Host supports rss and/or hash report */
267c7114b12SAndrew Melnychenko bool has_rss;
26891f41f01SAndrew Melnychenko bool has_rss_hash_report;
269c7114b12SAndrew Melnychenko u8 rss_key_size;
270c7114b12SAndrew Melnychenko u16 rss_indir_table_size;
271c7114b12SAndrew Melnychenko u32 rss_hash_types_supported;
272c1170820SAndrew Melnychenko u32 rss_hash_types_saved;
273c7114b12SAndrew Melnychenko
274986a4f4dSJason Wang /* Has control virtqueue */
275986a4f4dSJason Wang bool has_cvq;
276986a4f4dSJason Wang
277e7428e95SMichael S. Tsirkin /* Host can handle any s/g split between our header and packet data */
278e7428e95SMichael S. Tsirkin bool any_header_sg;
279e7428e95SMichael S. Tsirkin
280012873d0SMichael S. Tsirkin /* Packet virtio header size */
281012873d0SMichael S. Tsirkin u8 hdr_len;
282012873d0SMichael S. Tsirkin
2835a159128SJason Wang /* Work struct for delayed refilling if we run low on memory. */
2843161e453SRusty Russell struct delayed_work refill;
2853161e453SRusty Russell
2865a159128SJason Wang /* Is delayed refill enabled? */
2875a159128SJason Wang bool refill_enabled;
2885a159128SJason Wang
2895a159128SJason Wang /* The lock to synchronize the access to refill_enabled */
2905a159128SJason Wang spinlock_t refill_lock;
2915a159128SJason Wang
292586d17c5SJason Wang /* Work struct for config space updates */
293586d17c5SJason Wang struct work_struct config_work;
294586d17c5SJason Wang
295986a4f4dSJason Wang /* Does the affinity hint is set for virtqueues? */
296986a4f4dSJason Wang bool affinity_hint_set;
29747be2479SWanlong Gao
2988017c279SSebastian Andrzej Siewior /* CPU hotplug instances for online & dead */
2998017c279SSebastian Andrzej Siewior struct hlist_node node;
3008017c279SSebastian Andrzej Siewior struct hlist_node node_dead;
3012ac46030SMichael S. Tsirkin
30212e57169SMichael S. Tsirkin struct control_buf *ctrl;
30316032be5SNikolay Aleksandrov
30416032be5SNikolay Aleksandrov /* Ethtool settings */
30516032be5SNikolay Aleksandrov u8 duplex;
30616032be5SNikolay Aleksandrov u32 speed;
3073f93522fSJason Wang
308699b045aSAlvaro Karsz /* Interrupt coalescing settings */
309308d7982SGavin Li struct virtnet_interrupt_coalesce intr_coal_tx;
310308d7982SGavin Li struct virtnet_interrupt_coalesce intr_coal_rx;
311699b045aSAlvaro Karsz
3123f93522fSJason Wang unsigned long guest_offloads;
313a02e8964SWillem de Bruijn unsigned long guest_offloads_capable;
314ba5e4426SSridhar Samudrala
315ba5e4426SSridhar Samudrala /* failover when STANDBY feature enabled */
316ba5e4426SSridhar Samudrala struct failover *failover;
317296f96fcSRusty Russell };
318296f96fcSRusty Russell
3199ab86bbcSShirley Ma struct padded_vnet_hdr {
320c1ddc42dSAndrew Melnychenko struct virtio_net_hdr_v1_hash hdr;
3219ab86bbcSShirley Ma /*
322012873d0SMichael S. Tsirkin * hdr is in a separate sg buffer, and data sg buffer shares same page
323012873d0SMichael S. Tsirkin * with this header sg. This padding makes next sg 16 byte aligned
324012873d0SMichael S. Tsirkin * after the header.
3259ab86bbcSShirley Ma */
326c1ddc42dSAndrew Melnychenko char padding[12];
3279ab86bbcSShirley Ma };
3289ab86bbcSShirley Ma
329dae64749SFeng Liu struct virtio_net_common_hdr {
330dae64749SFeng Liu union {
331dae64749SFeng Liu struct virtio_net_hdr hdr;
332dae64749SFeng Liu struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
333dae64749SFeng Liu struct virtio_net_hdr_v1_hash hash_v1_hdr;
334dae64749SFeng Liu };
335dae64749SFeng Liu };
336dae64749SFeng Liu
337ebcce492SXuan Zhuo static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
3386a4763e2SXuan Zhuo
is_xdp_frame(void * ptr)3395050471dSToshiaki Makita static bool is_xdp_frame(void *ptr)
3405050471dSToshiaki Makita {
3415050471dSToshiaki Makita return (unsigned long)ptr & VIRTIO_XDP_FLAG;
3425050471dSToshiaki Makita }
3435050471dSToshiaki Makita
xdp_to_ptr(struct xdp_frame * ptr)3445050471dSToshiaki Makita static void *xdp_to_ptr(struct xdp_frame *ptr)
3455050471dSToshiaki Makita {
3465050471dSToshiaki Makita return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
3475050471dSToshiaki Makita }
3485050471dSToshiaki Makita
ptr_to_xdp(void * ptr)3495050471dSToshiaki Makita static struct xdp_frame *ptr_to_xdp(void *ptr)
3505050471dSToshiaki Makita {
3515050471dSToshiaki Makita return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
3525050471dSToshiaki Makita }
3535050471dSToshiaki Makita
354986a4f4dSJason Wang /* Converting between virtqueue no. and kernel tx/rx queue no.
355986a4f4dSJason Wang * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
356986a4f4dSJason Wang */
vq2txq(struct virtqueue * vq)357986a4f4dSJason Wang static int vq2txq(struct virtqueue *vq)
358986a4f4dSJason Wang {
3599d0ca6edSRusty Russell return (vq->index - 1) / 2;
360986a4f4dSJason Wang }
361986a4f4dSJason Wang
txq2vq(int txq)362986a4f4dSJason Wang static int txq2vq(int txq)
363986a4f4dSJason Wang {
364986a4f4dSJason Wang return txq * 2 + 1;
365986a4f4dSJason Wang }
366986a4f4dSJason Wang
vq2rxq(struct virtqueue * vq)367986a4f4dSJason Wang static int vq2rxq(struct virtqueue *vq)
368986a4f4dSJason Wang {
3699d0ca6edSRusty Russell return vq->index / 2;
370986a4f4dSJason Wang }
371986a4f4dSJason Wang
rxq2vq(int rxq)372986a4f4dSJason Wang static int rxq2vq(int rxq)
373986a4f4dSJason Wang {
374986a4f4dSJason Wang return rxq * 2;
375986a4f4dSJason Wang }
376986a4f4dSJason Wang
377dae64749SFeng Liu static inline struct virtio_net_common_hdr *
skb_vnet_common_hdr(struct sk_buff * skb)378dae64749SFeng Liu skb_vnet_common_hdr(struct sk_buff *skb)
379296f96fcSRusty Russell {
380dae64749SFeng Liu return (struct virtio_net_common_hdr *)skb->cb;
381296f96fcSRusty Russell }
382296f96fcSRusty Russell
3839ab86bbcSShirley Ma /*
3849ab86bbcSShirley Ma * private is used to chain pages for big packets, put the whole
3859ab86bbcSShirley Ma * most recent used list in the beginning for reuse
3869ab86bbcSShirley Ma */
give_pages(struct receive_queue * rq,struct page * page)387e9d7417bSJason Wang static void give_pages(struct receive_queue *rq, struct page *page)
388fb6813f4SRusty Russell {
3899ab86bbcSShirley Ma struct page *end;
3909ab86bbcSShirley Ma
391e9d7417bSJason Wang /* Find end of list, sew whole thing into vi->rq.pages. */
3929ab86bbcSShirley Ma for (end = page; end->private; end = (struct page *)end->private);
393e9d7417bSJason Wang end->private = (unsigned long)rq->pages;
394e9d7417bSJason Wang rq->pages = page;
395fb6813f4SRusty Russell }
396fb6813f4SRusty Russell
get_a_page(struct receive_queue * rq,gfp_t gfp_mask)397e9d7417bSJason Wang static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
398fb6813f4SRusty Russell {
399e9d7417bSJason Wang struct page *p = rq->pages;
400fb6813f4SRusty Russell
4019ab86bbcSShirley Ma if (p) {
402e9d7417bSJason Wang rq->pages = (struct page *)p->private;
4039ab86bbcSShirley Ma /* clear private here, it is used to chain pages */
4049ab86bbcSShirley Ma p->private = 0;
4059ab86bbcSShirley Ma } else
406fb6813f4SRusty Russell p = alloc_page(gfp_mask);
407fb6813f4SRusty Russell return p;
408fb6813f4SRusty Russell }
409fb6813f4SRusty Russell
virtnet_rq_free_buf(struct virtnet_info * vi,struct receive_queue * rq,void * buf)4103ffd05c2SXuan Zhuo static void virtnet_rq_free_buf(struct virtnet_info *vi,
4113ffd05c2SXuan Zhuo struct receive_queue *rq, void *buf)
4123ffd05c2SXuan Zhuo {
4133ffd05c2SXuan Zhuo if (vi->mergeable_rx_bufs)
4143ffd05c2SXuan Zhuo put_page(virt_to_head_page(buf));
4153ffd05c2SXuan Zhuo else if (vi->big_packets)
4163ffd05c2SXuan Zhuo give_pages(rq, buf);
4173ffd05c2SXuan Zhuo else
4183ffd05c2SXuan Zhuo put_page(virt_to_head_page(buf));
4193ffd05c2SXuan Zhuo }
4203ffd05c2SXuan Zhuo
enable_delayed_refill(struct virtnet_info * vi)4215a159128SJason Wang static void enable_delayed_refill(struct virtnet_info *vi)
4225a159128SJason Wang {
4235a159128SJason Wang spin_lock_bh(&vi->refill_lock);
4245a159128SJason Wang vi->refill_enabled = true;
4255a159128SJason Wang spin_unlock_bh(&vi->refill_lock);
4265a159128SJason Wang }
4275a159128SJason Wang
disable_delayed_refill(struct virtnet_info * vi)4285a159128SJason Wang static void disable_delayed_refill(struct virtnet_info *vi)
4295a159128SJason Wang {
4305a159128SJason Wang spin_lock_bh(&vi->refill_lock);
4315a159128SJason Wang vi->refill_enabled = false;
4325a159128SJason Wang spin_unlock_bh(&vi->refill_lock);
4335a159128SJason Wang }
4345a159128SJason Wang
virtqueue_napi_schedule(struct napi_struct * napi,struct virtqueue * vq)435e4e8452aSWillem de Bruijn static void virtqueue_napi_schedule(struct napi_struct *napi,
436e4e8452aSWillem de Bruijn struct virtqueue *vq)
437e4e8452aSWillem de Bruijn {
438e4e8452aSWillem de Bruijn if (napi_schedule_prep(napi)) {
439e4e8452aSWillem de Bruijn virtqueue_disable_cb(vq);
440e4e8452aSWillem de Bruijn __napi_schedule(napi);
441e4e8452aSWillem de Bruijn }
442e4e8452aSWillem de Bruijn }
443e4e8452aSWillem de Bruijn
virtqueue_napi_complete(struct napi_struct * napi,struct virtqueue * vq,int processed)444e4e8452aSWillem de Bruijn static void virtqueue_napi_complete(struct napi_struct *napi,
445e4e8452aSWillem de Bruijn struct virtqueue *vq, int processed)
446e4e8452aSWillem de Bruijn {
447e4e8452aSWillem de Bruijn int opaque;
448e4e8452aSWillem de Bruijn
449e4e8452aSWillem de Bruijn opaque = virtqueue_enable_cb_prepare(vq);
450fdaa767aSToshiaki Makita if (napi_complete_done(napi, processed)) {
451fdaa767aSToshiaki Makita if (unlikely(virtqueue_poll(vq, opaque)))
452e4e8452aSWillem de Bruijn virtqueue_napi_schedule(napi, vq);
453fdaa767aSToshiaki Makita } else {
454fdaa767aSToshiaki Makita virtqueue_disable_cb(vq);
455fdaa767aSToshiaki Makita }
456e4e8452aSWillem de Bruijn }
457e4e8452aSWillem de Bruijn
skb_xmit_done(struct virtqueue * vq)458e9d7417bSJason Wang static void skb_xmit_done(struct virtqueue *vq)
459296f96fcSRusty Russell {
460e9d7417bSJason Wang struct virtnet_info *vi = vq->vdev->priv;
461b92f1e67SWillem de Bruijn struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
462296f96fcSRusty Russell
4632cb9c6baSRusty Russell /* Suppress further interrupts. */
464e9d7417bSJason Wang virtqueue_disable_cb(vq);
46511a3a154SRusty Russell
466b92f1e67SWillem de Bruijn if (napi->weight)
467b92f1e67SWillem de Bruijn virtqueue_napi_schedule(napi, vq);
468b92f1e67SWillem de Bruijn else
469363f1514SRusty Russell /* We were probably waiting for more output buffers. */
470986a4f4dSJason Wang netif_wake_subqueue(vi->dev, vq2txq(vq));
471296f96fcSRusty Russell }
472296f96fcSRusty Russell
47328b39bc7SJason Wang #define MRG_CTX_HEADER_SHIFT 22
mergeable_len_to_ctx(unsigned int truesize,unsigned int headroom)47428b39bc7SJason Wang static void *mergeable_len_to_ctx(unsigned int truesize,
47528b39bc7SJason Wang unsigned int headroom)
47628b39bc7SJason Wang {
47728b39bc7SJason Wang return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
47828b39bc7SJason Wang }
47928b39bc7SJason Wang
mergeable_ctx_to_headroom(void * mrg_ctx)48028b39bc7SJason Wang static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
48128b39bc7SJason Wang {
48228b39bc7SJason Wang return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
48328b39bc7SJason Wang }
48428b39bc7SJason Wang
mergeable_ctx_to_truesize(void * mrg_ctx)48528b39bc7SJason Wang static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
48628b39bc7SJason Wang {
48728b39bc7SJason Wang return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
48828b39bc7SJason Wang }
48928b39bc7SJason Wang
virtnet_build_skb(void * buf,unsigned int buflen,unsigned int headroom,unsigned int len)49021e26a71SXuan Zhuo static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
49121e26a71SXuan Zhuo unsigned int headroom,
49221e26a71SXuan Zhuo unsigned int len)
49321e26a71SXuan Zhuo {
49421e26a71SXuan Zhuo struct sk_buff *skb;
49521e26a71SXuan Zhuo
49621e26a71SXuan Zhuo skb = build_skb(buf, buflen);
49721e26a71SXuan Zhuo if (unlikely(!skb))
49821e26a71SXuan Zhuo return NULL;
49921e26a71SXuan Zhuo
50021e26a71SXuan Zhuo skb_reserve(skb, headroom);
50121e26a71SXuan Zhuo skb_put(skb, len);
50221e26a71SXuan Zhuo
50321e26a71SXuan Zhuo return skb;
50421e26a71SXuan Zhuo }
50521e26a71SXuan Zhuo
5063464645aSMike Waychison /* Called from bottom half context */
page_to_skb(struct virtnet_info * vi,struct receive_queue * rq,struct page * page,unsigned int offset,unsigned int len,unsigned int truesize,unsigned int headroom)507946fa564SMichael S. Tsirkin static struct sk_buff *page_to_skb(struct virtnet_info *vi,
508946fa564SMichael S. Tsirkin struct receive_queue *rq,
5092613af0eSMichael Dalton struct page *page, unsigned int offset,
510fa0f1ba7SXuan Zhuo unsigned int len, unsigned int truesize,
511fa0f1ba7SXuan Zhuo unsigned int headroom)
5129ab86bbcSShirley Ma {
5139ab86bbcSShirley Ma struct sk_buff *skb;
514dae64749SFeng Liu struct virtio_net_common_hdr *hdr;
5152613af0eSMichael Dalton unsigned int copy, hdr_len, hdr_padded_len;
516af39c8f7SEric Dumazet struct page *page_to_free = NULL;
517fb32856bSXuan Zhuo int tailroom, shinfo_size;
518f80bd740SXuan Zhuo char *p, *hdr_p, *buf;
5199ab86bbcSShirley Ma
5202613af0eSMichael Dalton p = page_address(page) + offset;
521fb32856bSXuan Zhuo hdr_p = p;
5229ab86bbcSShirley Ma
523012873d0SMichael S. Tsirkin hdr_len = vi->hdr_len;
524012873d0SMichael S. Tsirkin if (vi->mergeable_rx_bufs)
525c1ddc42dSAndrew Melnychenko hdr_padded_len = hdr_len;
526012873d0SMichael S. Tsirkin else
5272613af0eSMichael Dalton hdr_padded_len = sizeof(struct padded_vnet_hdr);
5283f2c31d9SMark McLoughlin
529fa0f1ba7SXuan Zhuo buf = p - headroom;
5309ab86bbcSShirley Ma len -= hdr_len;
5312613af0eSMichael Dalton offset += hdr_padded_len;
5322613af0eSMichael Dalton p += hdr_padded_len;
533fa0f1ba7SXuan Zhuo tailroom = truesize - headroom - hdr_padded_len - len;
5343f2c31d9SMark McLoughlin
535fb32856bSXuan Zhuo shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
536fb32856bSXuan Zhuo
537f80bd740SXuan Zhuo /* copy small packet so we can reuse these pages */
538f5d7872aSEric Dumazet if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
53921e26a71SXuan Zhuo skb = virtnet_build_skb(buf, truesize, p - buf, len);
540fb32856bSXuan Zhuo if (unlikely(!skb))
541fb32856bSXuan Zhuo return NULL;
542fb32856bSXuan Zhuo
543afd92d82SJason Wang page = (struct page *)page->private;
544afd92d82SJason Wang if (page)
545afd92d82SJason Wang give_pages(rq, page);
546fb32856bSXuan Zhuo goto ok;
547fb32856bSXuan Zhuo }
548fb32856bSXuan Zhuo
549fb32856bSXuan Zhuo /* copy small packet so we can reuse these pages for small data */
550fb32856bSXuan Zhuo skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
551fb32856bSXuan Zhuo if (unlikely(!skb))
552fb32856bSXuan Zhuo return NULL;
553fb32856bSXuan Zhuo
5540f6925b3SEric Dumazet /* Copy all frame if it fits skb->head, otherwise
5550f6925b3SEric Dumazet * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
5560f6925b3SEric Dumazet */
5570f6925b3SEric Dumazet if (len <= skb_tailroom(skb))
5583f2c31d9SMark McLoughlin copy = len;
5590f6925b3SEric Dumazet else
56018117a84SHeng Qi copy = ETH_HLEN;
56159ae1d12SJohannes Berg skb_put_data(skb, p, copy);
5623f2c31d9SMark McLoughlin
5633f2c31d9SMark McLoughlin len -= copy;
5649ab86bbcSShirley Ma offset += copy;
5653f2c31d9SMark McLoughlin
5662613af0eSMichael Dalton if (vi->mergeable_rx_bufs) {
5672613af0eSMichael Dalton if (len)
5682613af0eSMichael Dalton skb_add_rx_frag(skb, 0, page, offset, len, truesize);
5692613af0eSMichael Dalton else
570af39c8f7SEric Dumazet page_to_free = page;
571fb32856bSXuan Zhuo goto ok;
5722613af0eSMichael Dalton }
5732613af0eSMichael Dalton
574e878d78bSSasha Levin /*
575e878d78bSSasha Levin * Verify that we can indeed put this data into a skb.
576e878d78bSSasha Levin * This is here to handle cases when the device erroneously
577e878d78bSSasha Levin * tries to receive more than is possible. This is usually
578e878d78bSSasha Levin * the case of a broken device.
579e878d78bSSasha Levin */
580e878d78bSSasha Levin if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
581be443899SAmerigo Wang net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
582e878d78bSSasha Levin dev_kfree_skb(skb);
583e878d78bSSasha Levin return NULL;
584e878d78bSSasha Levin }
5852613af0eSMichael Dalton BUG_ON(offset >= PAGE_SIZE);
5869ab86bbcSShirley Ma while (len) {
5872613af0eSMichael Dalton unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
5882613af0eSMichael Dalton skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
5892613af0eSMichael Dalton frag_size, truesize);
5902613af0eSMichael Dalton len -= frag_size;
5919ab86bbcSShirley Ma page = (struct page *)page->private;
5929ab86bbcSShirley Ma offset = 0;
5933f2c31d9SMark McLoughlin }
5943f2c31d9SMark McLoughlin
5959ab86bbcSShirley Ma if (page)
596e9d7417bSJason Wang give_pages(rq, page);
5973f2c31d9SMark McLoughlin
598fb32856bSXuan Zhuo ok:
599dae64749SFeng Liu hdr = skb_vnet_common_hdr(skb);
600fb32856bSXuan Zhuo memcpy(hdr, hdr_p, hdr_len);
601af39c8f7SEric Dumazet if (page_to_free)
602af39c8f7SEric Dumazet put_page(page_to_free);
603fb32856bSXuan Zhuo
6049ab86bbcSShirley Ma return skb;
6059ab86bbcSShirley Ma }
6069ab86bbcSShirley Ma
virtnet_rq_unmap(struct receive_queue * rq,void * buf,u32 len)607295525e2SXuan Zhuo static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
608295525e2SXuan Zhuo {
609295525e2SXuan Zhuo struct page *page = virt_to_head_page(buf);
610295525e2SXuan Zhuo struct virtnet_rq_dma *dma;
611295525e2SXuan Zhuo void *head;
612295525e2SXuan Zhuo int offset;
613295525e2SXuan Zhuo
614295525e2SXuan Zhuo head = page_address(page);
615295525e2SXuan Zhuo
616295525e2SXuan Zhuo dma = head;
617295525e2SXuan Zhuo
618295525e2SXuan Zhuo --dma->ref;
619295525e2SXuan Zhuo
620295525e2SXuan Zhuo if (dma->need_sync && len) {
621295525e2SXuan Zhuo offset = buf - (head + sizeof(*dma));
622295525e2SXuan Zhuo
6235720c43dSXuan Zhuo virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
6245720c43dSXuan Zhuo offset, len,
6255720c43dSXuan Zhuo DMA_FROM_DEVICE);
626295525e2SXuan Zhuo }
627295525e2SXuan Zhuo
6285720c43dSXuan Zhuo if (dma->ref)
629295525e2SXuan Zhuo return;
630295525e2SXuan Zhuo
631295525e2SXuan Zhuo virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
632295525e2SXuan Zhuo DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
633295525e2SXuan Zhuo put_page(page);
634295525e2SXuan Zhuo }
635295525e2SXuan Zhuo
virtnet_rq_get_buf(struct receive_queue * rq,u32 * len,void ** ctx)636295525e2SXuan Zhuo static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
637295525e2SXuan Zhuo {
638295525e2SXuan Zhuo void *buf;
639295525e2SXuan Zhuo
640295525e2SXuan Zhuo buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
641295525e2SXuan Zhuo if (buf && rq->do_dma)
642295525e2SXuan Zhuo virtnet_rq_unmap(rq, buf, *len);
643295525e2SXuan Zhuo
644295525e2SXuan Zhuo return buf;
645295525e2SXuan Zhuo }
646295525e2SXuan Zhuo
virtnet_rq_init_one_sg(struct receive_queue * rq,void * buf,u32 len)647295525e2SXuan Zhuo static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
648295525e2SXuan Zhuo {
649295525e2SXuan Zhuo struct virtnet_rq_dma *dma;
650295525e2SXuan Zhuo dma_addr_t addr;
651295525e2SXuan Zhuo u32 offset;
652295525e2SXuan Zhuo void *head;
653295525e2SXuan Zhuo
654295525e2SXuan Zhuo if (!rq->do_dma) {
655295525e2SXuan Zhuo sg_init_one(rq->sg, buf, len);
656295525e2SXuan Zhuo return;
657295525e2SXuan Zhuo }
658295525e2SXuan Zhuo
659295525e2SXuan Zhuo head = page_address(rq->alloc_frag.page);
660295525e2SXuan Zhuo
661295525e2SXuan Zhuo offset = buf - head;
662295525e2SXuan Zhuo
663295525e2SXuan Zhuo dma = head;
664295525e2SXuan Zhuo
665295525e2SXuan Zhuo addr = dma->addr - sizeof(*dma) + offset;
666295525e2SXuan Zhuo
667295525e2SXuan Zhuo sg_init_table(rq->sg, 1);
668295525e2SXuan Zhuo rq->sg[0].dma_address = addr;
669295525e2SXuan Zhuo rq->sg[0].length = len;
670295525e2SXuan Zhuo }
671295525e2SXuan Zhuo
virtnet_rq_alloc(struct receive_queue * rq,u32 size,gfp_t gfp)672295525e2SXuan Zhuo static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
673295525e2SXuan Zhuo {
674295525e2SXuan Zhuo struct page_frag *alloc_frag = &rq->alloc_frag;
675295525e2SXuan Zhuo struct virtnet_rq_dma *dma;
676295525e2SXuan Zhuo void *buf, *head;
677295525e2SXuan Zhuo dma_addr_t addr;
678295525e2SXuan Zhuo
679295525e2SXuan Zhuo if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
680295525e2SXuan Zhuo return NULL;
681295525e2SXuan Zhuo
682295525e2SXuan Zhuo head = page_address(alloc_frag->page);
683295525e2SXuan Zhuo
684295525e2SXuan Zhuo if (rq->do_dma) {
685295525e2SXuan Zhuo dma = head;
686295525e2SXuan Zhuo
687295525e2SXuan Zhuo /* new pages */
688295525e2SXuan Zhuo if (!alloc_frag->offset) {
689295525e2SXuan Zhuo if (rq->last_dma) {
690295525e2SXuan Zhuo /* Now, the new page is allocated, the last dma
691295525e2SXuan Zhuo * will not be used. So the dma can be unmapped
692295525e2SXuan Zhuo * if the ref is 0.
693295525e2SXuan Zhuo */
694295525e2SXuan Zhuo virtnet_rq_unmap(rq, rq->last_dma, 0);
695295525e2SXuan Zhuo rq->last_dma = NULL;
696295525e2SXuan Zhuo }
697295525e2SXuan Zhuo
698295525e2SXuan Zhuo dma->len = alloc_frag->size - sizeof(*dma);
699295525e2SXuan Zhuo
700295525e2SXuan Zhuo addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
701295525e2SXuan Zhuo dma->len, DMA_FROM_DEVICE, 0);
702295525e2SXuan Zhuo if (virtqueue_dma_mapping_error(rq->vq, addr))
703295525e2SXuan Zhuo return NULL;
704295525e2SXuan Zhuo
705295525e2SXuan Zhuo dma->addr = addr;
706295525e2SXuan Zhuo dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
707295525e2SXuan Zhuo
708295525e2SXuan Zhuo /* Add a reference to dma to prevent the entire dma from
709295525e2SXuan Zhuo * being released during error handling. This reference
710295525e2SXuan Zhuo * will be freed after the pages are no longer used.
711295525e2SXuan Zhuo */
712295525e2SXuan Zhuo get_page(alloc_frag->page);
713295525e2SXuan Zhuo dma->ref = 1;
714295525e2SXuan Zhuo alloc_frag->offset = sizeof(*dma);
715295525e2SXuan Zhuo
716295525e2SXuan Zhuo rq->last_dma = dma;
717295525e2SXuan Zhuo }
718295525e2SXuan Zhuo
719295525e2SXuan Zhuo ++dma->ref;
720295525e2SXuan Zhuo }
721295525e2SXuan Zhuo
722295525e2SXuan Zhuo buf = head + alloc_frag->offset;
723295525e2SXuan Zhuo
724295525e2SXuan Zhuo get_page(alloc_frag->page);
725295525e2SXuan Zhuo alloc_frag->offset += size;
726295525e2SXuan Zhuo
727295525e2SXuan Zhuo return buf;
728295525e2SXuan Zhuo }
729295525e2SXuan Zhuo
virtnet_rq_set_premapped(struct virtnet_info * vi)730295525e2SXuan Zhuo static void virtnet_rq_set_premapped(struct virtnet_info *vi)
731295525e2SXuan Zhuo {
732295525e2SXuan Zhuo int i;
733295525e2SXuan Zhuo
734295525e2SXuan Zhuo /* disable for big mode */
735295525e2SXuan Zhuo if (!vi->mergeable_rx_bufs && vi->big_packets)
736295525e2SXuan Zhuo return;
737295525e2SXuan Zhuo
738295525e2SXuan Zhuo for (i = 0; i < vi->max_queue_pairs; i++) {
739295525e2SXuan Zhuo if (virtqueue_set_dma_premapped(vi->rq[i].vq))
740295525e2SXuan Zhuo continue;
741295525e2SXuan Zhuo
742295525e2SXuan Zhuo vi->rq[i].do_dma = true;
743295525e2SXuan Zhuo }
744295525e2SXuan Zhuo }
745295525e2SXuan Zhuo
virtnet_rq_unmap_free_buf(struct virtqueue * vq,void * buf)7463ffd05c2SXuan Zhuo static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
7473ffd05c2SXuan Zhuo {
7483ffd05c2SXuan Zhuo struct virtnet_info *vi = vq->vdev->priv;
7493ffd05c2SXuan Zhuo struct receive_queue *rq;
7503ffd05c2SXuan Zhuo int i = vq2rxq(vq);
7513ffd05c2SXuan Zhuo
7523ffd05c2SXuan Zhuo rq = &vi->rq[i];
7533ffd05c2SXuan Zhuo
7543ffd05c2SXuan Zhuo if (rq->do_dma)
7553ffd05c2SXuan Zhuo virtnet_rq_unmap(rq, buf, 0);
7563ffd05c2SXuan Zhuo
7573ffd05c2SXuan Zhuo virtnet_rq_free_buf(vi, rq, buf);
7583ffd05c2SXuan Zhuo }
7593ffd05c2SXuan Zhuo
free_old_xmit_skbs(struct send_queue * sq,bool in_napi)76025074a44SXuan Zhuo static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
76125074a44SXuan Zhuo {
76225074a44SXuan Zhuo unsigned int len;
76325074a44SXuan Zhuo unsigned int packets = 0;
76425074a44SXuan Zhuo unsigned int bytes = 0;
76525074a44SXuan Zhuo void *ptr;
76625074a44SXuan Zhuo
76725074a44SXuan Zhuo while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
76825074a44SXuan Zhuo if (likely(!is_xdp_frame(ptr))) {
76925074a44SXuan Zhuo struct sk_buff *skb = ptr;
77025074a44SXuan Zhuo
77125074a44SXuan Zhuo pr_debug("Sent skb %p\n", skb);
77225074a44SXuan Zhuo
77325074a44SXuan Zhuo bytes += skb->len;
77425074a44SXuan Zhuo napi_consume_skb(skb, in_napi);
77525074a44SXuan Zhuo } else {
77625074a44SXuan Zhuo struct xdp_frame *frame = ptr_to_xdp(ptr);
77725074a44SXuan Zhuo
77825074a44SXuan Zhuo bytes += xdp_get_frame_len(frame);
77925074a44SXuan Zhuo xdp_return_frame(frame);
78025074a44SXuan Zhuo }
78125074a44SXuan Zhuo packets++;
78225074a44SXuan Zhuo }
78325074a44SXuan Zhuo
78425074a44SXuan Zhuo /* Avoid overhead when no packets have been processed
78525074a44SXuan Zhuo * happens when called speculatively from start_xmit.
78625074a44SXuan Zhuo */
78725074a44SXuan Zhuo if (!packets)
78825074a44SXuan Zhuo return;
78925074a44SXuan Zhuo
79025074a44SXuan Zhuo u64_stats_update_begin(&sq->stats.syncp);
79127debe3eSEric Dumazet u64_stats_add(&sq->stats.bytes, bytes);
79227debe3eSEric Dumazet u64_stats_add(&sq->stats.packets, packets);
79325074a44SXuan Zhuo u64_stats_update_end(&sq->stats.syncp);
79425074a44SXuan Zhuo }
79525074a44SXuan Zhuo
is_xdp_raw_buffer_queue(struct virtnet_info * vi,int q)79625074a44SXuan Zhuo static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
79725074a44SXuan Zhuo {
79825074a44SXuan Zhuo if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
79925074a44SXuan Zhuo return false;
80025074a44SXuan Zhuo else if (q < vi->curr_queue_pairs)
80125074a44SXuan Zhuo return true;
80225074a44SXuan Zhuo else
80325074a44SXuan Zhuo return false;
80425074a44SXuan Zhuo }
80525074a44SXuan Zhuo
check_sq_full_and_disable(struct virtnet_info * vi,struct net_device * dev,struct send_queue * sq)806b8ef4809SXuan Zhuo static void check_sq_full_and_disable(struct virtnet_info *vi,
807b8ef4809SXuan Zhuo struct net_device *dev,
808b8ef4809SXuan Zhuo struct send_queue *sq)
809b8ef4809SXuan Zhuo {
810b8ef4809SXuan Zhuo bool use_napi = sq->napi.weight;
811b8ef4809SXuan Zhuo int qnum;
812b8ef4809SXuan Zhuo
813b8ef4809SXuan Zhuo qnum = sq - vi->sq;
814b8ef4809SXuan Zhuo
815b8ef4809SXuan Zhuo /* If running out of space, stop queue to avoid getting packets that we
816b8ef4809SXuan Zhuo * are then unable to transmit.
817b8ef4809SXuan Zhuo * An alternative would be to force queuing layer to requeue the skb by
818b8ef4809SXuan Zhuo * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
819b8ef4809SXuan Zhuo * returned in a normal path of operation: it means that driver is not
820b8ef4809SXuan Zhuo * maintaining the TX queue stop/start state properly, and causes
821b8ef4809SXuan Zhuo * the stack to do a non-trivial amount of useless work.
822b8ef4809SXuan Zhuo * Since most packets only take 1 or 2 ring slots, stopping the queue
823b8ef4809SXuan Zhuo * early means 16 slots are typically wasted.
824b8ef4809SXuan Zhuo */
825b8ef4809SXuan Zhuo if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
826b8ef4809SXuan Zhuo netif_stop_subqueue(dev, qnum);
827b8ef4809SXuan Zhuo if (use_napi) {
828b8ef4809SXuan Zhuo if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
829b8ef4809SXuan Zhuo virtqueue_napi_schedule(&sq->napi, sq->vq);
830b8ef4809SXuan Zhuo } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
831b8ef4809SXuan Zhuo /* More just got used, free them then recheck. */
832b8ef4809SXuan Zhuo free_old_xmit_skbs(sq, false);
833b8ef4809SXuan Zhuo if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
834b8ef4809SXuan Zhuo netif_start_subqueue(dev, qnum);
835b8ef4809SXuan Zhuo virtqueue_disable_cb(sq->vq);
836b8ef4809SXuan Zhuo }
837b8ef4809SXuan Zhuo }
838b8ef4809SXuan Zhuo }
839b8ef4809SXuan Zhuo }
840b8ef4809SXuan Zhuo
__virtnet_xdp_xmit_one(struct virtnet_info * vi,struct send_queue * sq,struct xdp_frame * xdpf)841735fc405SJesper Dangaard Brouer static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
842735fc405SJesper Dangaard Brouer struct send_queue *sq,
84344fa2dbdSJesper Dangaard Brouer struct xdp_frame *xdpf)
84456434a01SJohn Fastabend {
84556434a01SJohn Fastabend struct virtio_net_hdr_mrg_rxbuf *hdr;
84697717e8dSHeng Qi struct skb_shared_info *shinfo;
84797717e8dSHeng Qi u8 nr_frags = 0;
84897717e8dSHeng Qi int err, i;
84956434a01SJohn Fastabend
850cac320c8SJesper Dangaard Brouer if (unlikely(xdpf->headroom < vi->hdr_len))
851cac320c8SJesper Dangaard Brouer return -EOVERFLOW;
852cac320c8SJesper Dangaard Brouer
85397717e8dSHeng Qi if (unlikely(xdp_frame_has_frags(xdpf))) {
85497717e8dSHeng Qi shinfo = xdp_get_shared_info_from_frame(xdpf);
85597717e8dSHeng Qi nr_frags = shinfo->nr_frags;
85697717e8dSHeng Qi }
85797717e8dSHeng Qi
85897717e8dSHeng Qi /* In wrapping function virtnet_xdp_xmit(), we need to free
85997717e8dSHeng Qi * up the pending old buffers, where we need to calculate the
86097717e8dSHeng Qi * position of skb_shared_info in xdp_get_frame_len() and
86197717e8dSHeng Qi * xdp_return_frame(), which will involve to xdpf->data and
86297717e8dSHeng Qi * xdpf->headroom. Therefore, we need to update the value of
86397717e8dSHeng Qi * headroom synchronously here.
86497717e8dSHeng Qi */
86597717e8dSHeng Qi xdpf->headroom -= vi->hdr_len;
866cac320c8SJesper Dangaard Brouer xdpf->data -= vi->hdr_len;
86756434a01SJohn Fastabend /* Zero header and leave csum up to XDP layers */
868cac320c8SJesper Dangaard Brouer hdr = xdpf->data;
86956434a01SJohn Fastabend memset(hdr, 0, vi->hdr_len);
870cac320c8SJesper Dangaard Brouer xdpf->len += vi->hdr_len;
87156434a01SJohn Fastabend
87297717e8dSHeng Qi sg_init_table(sq->sg, nr_frags + 1);
87397717e8dSHeng Qi sg_set_buf(sq->sg, xdpf->data, xdpf->len);
87497717e8dSHeng Qi for (i = 0; i < nr_frags; i++) {
87597717e8dSHeng Qi skb_frag_t *frag = &shinfo->frags[i];
876bb91accfSJason Wang
87797717e8dSHeng Qi sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
87897717e8dSHeng Qi skb_frag_size(frag), skb_frag_off(frag));
87997717e8dSHeng Qi }
88097717e8dSHeng Qi
88197717e8dSHeng Qi err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
88297717e8dSHeng Qi xdp_to_ptr(xdpf), GFP_ATOMIC);
88311b7d897SJesper Dangaard Brouer if (unlikely(err))
884cac320c8SJesper Dangaard Brouer return -ENOSPC; /* Caller handle free/refcnt */
88556434a01SJohn Fastabend
886cac320c8SJesper Dangaard Brouer return 0;
88756434a01SJohn Fastabend }
88856434a01SJohn Fastabend
88997c2c69eSXuan Zhuo /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
89097c2c69eSXuan Zhuo * the current cpu, so it does not need to be locked.
89197c2c69eSXuan Zhuo *
89297c2c69eSXuan Zhuo * Here we use marco instead of inline functions because we have to deal with
89397c2c69eSXuan Zhuo * three issues at the same time: 1. the choice of sq. 2. judge and execute the
89497c2c69eSXuan Zhuo * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
89597c2c69eSXuan Zhuo * functions to perfectly solve these three problems at the same time.
89697c2c69eSXuan Zhuo */
89797c2c69eSXuan Zhuo #define virtnet_xdp_get_sq(vi) ({ \
8983dcc1edcSLi RongQing int cpu = smp_processor_id(); \
89997c2c69eSXuan Zhuo struct netdev_queue *txq; \
90097c2c69eSXuan Zhuo typeof(vi) v = (vi); \
90197c2c69eSXuan Zhuo unsigned int qp; \
90297c2c69eSXuan Zhuo \
90397c2c69eSXuan Zhuo if (v->curr_queue_pairs > nr_cpu_ids) { \
90497c2c69eSXuan Zhuo qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
9053dcc1edcSLi RongQing qp += cpu; \
90697c2c69eSXuan Zhuo txq = netdev_get_tx_queue(v->dev, qp); \
90797c2c69eSXuan Zhuo __netif_tx_acquire(txq); \
90897c2c69eSXuan Zhuo } else { \
9093dcc1edcSLi RongQing qp = cpu % v->curr_queue_pairs; \
91097c2c69eSXuan Zhuo txq = netdev_get_tx_queue(v->dev, qp); \
9113dcc1edcSLi RongQing __netif_tx_lock(txq, cpu); \
91297c2c69eSXuan Zhuo } \
91397c2c69eSXuan Zhuo v->sq + qp; \
91497c2c69eSXuan Zhuo })
9152a43565cSToshiaki Makita
91697c2c69eSXuan Zhuo #define virtnet_xdp_put_sq(vi, q) { \
91797c2c69eSXuan Zhuo struct netdev_queue *txq; \
91897c2c69eSXuan Zhuo typeof(vi) v = (vi); \
91997c2c69eSXuan Zhuo \
92097c2c69eSXuan Zhuo txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
92197c2c69eSXuan Zhuo if (v->curr_queue_pairs > nr_cpu_ids) \
92297c2c69eSXuan Zhuo __netif_tx_release(txq); \
92397c2c69eSXuan Zhuo else \
92497c2c69eSXuan Zhuo __netif_tx_unlock(txq); \
9252a43565cSToshiaki Makita }
9262a43565cSToshiaki Makita
virtnet_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)927735fc405SJesper Dangaard Brouer static int virtnet_xdp_xmit(struct net_device *dev,
92842b33468SJesper Dangaard Brouer int n, struct xdp_frame **frames, u32 flags)
929186b3c99SJason Wang {
930186b3c99SJason Wang struct virtnet_info *vi = netdev_priv(dev);
9318dcc5b0aSJesper Dangaard Brouer struct receive_queue *rq = vi->rq;
9328dcc5b0aSJesper Dangaard Brouer struct bpf_prog *xdp_prog;
933735fc405SJesper Dangaard Brouer struct send_queue *sq;
934735fc405SJesper Dangaard Brouer unsigned int len;
935546f2897SToshiaki Makita int packets = 0;
936546f2897SToshiaki Makita int bytes = 0;
937fdc13979SLorenzo Bianconi int nxmit = 0;
938461f03dcSToshiaki Makita int kicks = 0;
9395050471dSToshiaki Makita void *ptr;
940fdc13979SLorenzo Bianconi int ret;
941735fc405SJesper Dangaard Brouer int i;
942735fc405SJesper Dangaard Brouer
9438dcc5b0aSJesper Dangaard Brouer /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
9448dcc5b0aSJesper Dangaard Brouer * indicate XDP resources have been successfully allocated.
9458dcc5b0aSJesper Dangaard Brouer */
9469719c6b9SJohn Fastabend xdp_prog = rcu_access_pointer(rq->xdp_prog);
9471667c08aSToshiaki Makita if (!xdp_prog)
9481667c08aSToshiaki Makita return -ENXIO;
9491667c08aSToshiaki Makita
95097c2c69eSXuan Zhuo sq = virtnet_xdp_get_sq(vi);
9519ab86bbcSShirley Ma
9529ab86bbcSShirley Ma if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
953186b3c99SJason Wang ret = -EINVAL;
954186b3c99SJason Wang goto out;
955186b3c99SJason Wang }
956186b3c99SJason Wang
957735fc405SJesper Dangaard Brouer /* Free up any pending old buffers before queueing new ones. */
9585050471dSToshiaki Makita while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
959546f2897SToshiaki Makita if (likely(is_xdp_frame(ptr))) {
960546f2897SToshiaki Makita struct xdp_frame *frame = ptr_to_xdp(ptr);
961546f2897SToshiaki Makita
96250bd14bcSHeng Qi bytes += xdp_get_frame_len(frame);
963546f2897SToshiaki Makita xdp_return_frame(frame);
964546f2897SToshiaki Makita } else {
965546f2897SToshiaki Makita struct sk_buff *skb = ptr;
966546f2897SToshiaki Makita
967546f2897SToshiaki Makita bytes += skb->len;
968546f2897SToshiaki Makita napi_consume_skb(skb, false);
969546f2897SToshiaki Makita }
970546f2897SToshiaki Makita packets++;
9715050471dSToshiaki Makita }
972735fc405SJesper Dangaard Brouer
973735fc405SJesper Dangaard Brouer for (i = 0; i < n; i++) {
974735fc405SJesper Dangaard Brouer struct xdp_frame *xdpf = frames[i];
975735fc405SJesper Dangaard Brouer
976fdc13979SLorenzo Bianconi if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
977fdc13979SLorenzo Bianconi break;
978fdc13979SLorenzo Bianconi nxmit++;
979735fc405SJesper Dangaard Brouer }
980fdc13979SLorenzo Bianconi ret = nxmit;
9815d274cb4SJesper Dangaard Brouer
982cd1c604aSXuan Zhuo if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
983cd1c604aSXuan Zhuo check_sq_full_and_disable(vi, dev, sq);
984cd1c604aSXuan Zhuo
985461f03dcSToshiaki Makita if (flags & XDP_XMIT_FLUSH) {
986461f03dcSToshiaki Makita if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
987461f03dcSToshiaki Makita kicks = 1;
988461f03dcSToshiaki Makita }
9895b8f3c8dSToshiaki Makita out:
9905b8f3c8dSToshiaki Makita u64_stats_update_begin(&sq->stats.syncp);
99127debe3eSEric Dumazet u64_stats_add(&sq->stats.bytes, bytes);
99227debe3eSEric Dumazet u64_stats_add(&sq->stats.packets, packets);
99327debe3eSEric Dumazet u64_stats_add(&sq->stats.xdp_tx, n);
99427debe3eSEric Dumazet u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
99527debe3eSEric Dumazet u64_stats_add(&sq->stats.kicks, kicks);
9965b8f3c8dSToshiaki Makita u64_stats_update_end(&sq->stats.syncp);
9975d274cb4SJesper Dangaard Brouer
99897c2c69eSXuan Zhuo virtnet_xdp_put_sq(vi, sq);
9995b8f3c8dSToshiaki Makita return ret;
1000186b3c99SJason Wang }
1001186b3c99SJason Wang
put_xdp_frags(struct xdp_buff * xdp)1002bb2c1e9eSXuan Zhuo static void put_xdp_frags(struct xdp_buff *xdp)
1003bb2c1e9eSXuan Zhuo {
1004bb2c1e9eSXuan Zhuo struct skb_shared_info *shinfo;
1005bb2c1e9eSXuan Zhuo struct page *xdp_page;
1006bb2c1e9eSXuan Zhuo int i;
1007bb2c1e9eSXuan Zhuo
1008bb2c1e9eSXuan Zhuo if (xdp_buff_has_frags(xdp)) {
1009bb2c1e9eSXuan Zhuo shinfo = xdp_get_shared_info_from_buff(xdp);
1010bb2c1e9eSXuan Zhuo for (i = 0; i < shinfo->nr_frags; i++) {
1011bb2c1e9eSXuan Zhuo xdp_page = skb_frag_page(&shinfo->frags[i]);
1012bb2c1e9eSXuan Zhuo put_page(xdp_page);
1013bb2c1e9eSXuan Zhuo }
1014bb2c1e9eSXuan Zhuo }
1015bb2c1e9eSXuan Zhuo }
1016bb2c1e9eSXuan Zhuo
virtnet_xdp_handler(struct bpf_prog * xdp_prog,struct xdp_buff * xdp,struct net_device * dev,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)101700765f8eSXuan Zhuo static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
101800765f8eSXuan Zhuo struct net_device *dev,
101900765f8eSXuan Zhuo unsigned int *xdp_xmit,
102000765f8eSXuan Zhuo struct virtnet_rq_stats *stats)
102100765f8eSXuan Zhuo {
102200765f8eSXuan Zhuo struct xdp_frame *xdpf;
102300765f8eSXuan Zhuo int err;
102400765f8eSXuan Zhuo u32 act;
102500765f8eSXuan Zhuo
102600765f8eSXuan Zhuo act = bpf_prog_run_xdp(xdp_prog, xdp);
102727debe3eSEric Dumazet u64_stats_inc(&stats->xdp_packets);
102800765f8eSXuan Zhuo
102900765f8eSXuan Zhuo switch (act) {
103000765f8eSXuan Zhuo case XDP_PASS:
103100765f8eSXuan Zhuo return act;
103200765f8eSXuan Zhuo
103300765f8eSXuan Zhuo case XDP_TX:
103427debe3eSEric Dumazet u64_stats_inc(&stats->xdp_tx);
103500765f8eSXuan Zhuo xdpf = xdp_convert_buff_to_frame(xdp);
103600765f8eSXuan Zhuo if (unlikely(!xdpf)) {
103700765f8eSXuan Zhuo netdev_dbg(dev, "convert buff to frame failed for xdp\n");
103800765f8eSXuan Zhuo return XDP_DROP;
103900765f8eSXuan Zhuo }
104000765f8eSXuan Zhuo
104100765f8eSXuan Zhuo err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
104200765f8eSXuan Zhuo if (unlikely(!err)) {
104300765f8eSXuan Zhuo xdp_return_frame_rx_napi(xdpf);
104400765f8eSXuan Zhuo } else if (unlikely(err < 0)) {
104500765f8eSXuan Zhuo trace_xdp_exception(dev, xdp_prog, act);
104600765f8eSXuan Zhuo return XDP_DROP;
104700765f8eSXuan Zhuo }
104800765f8eSXuan Zhuo *xdp_xmit |= VIRTIO_XDP_TX;
104900765f8eSXuan Zhuo return act;
105000765f8eSXuan Zhuo
105100765f8eSXuan Zhuo case XDP_REDIRECT:
105227debe3eSEric Dumazet u64_stats_inc(&stats->xdp_redirects);
105300765f8eSXuan Zhuo err = xdp_do_redirect(dev, xdp, xdp_prog);
105400765f8eSXuan Zhuo if (err)
105500765f8eSXuan Zhuo return XDP_DROP;
105600765f8eSXuan Zhuo
105700765f8eSXuan Zhuo *xdp_xmit |= VIRTIO_XDP_REDIR;
105800765f8eSXuan Zhuo return act;
105900765f8eSXuan Zhuo
106000765f8eSXuan Zhuo default:
106100765f8eSXuan Zhuo bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
106200765f8eSXuan Zhuo fallthrough;
106300765f8eSXuan Zhuo case XDP_ABORTED:
106400765f8eSXuan Zhuo trace_xdp_exception(dev, xdp_prog, act);
106500765f8eSXuan Zhuo fallthrough;
106600765f8eSXuan Zhuo case XDP_DROP:
106700765f8eSXuan Zhuo return XDP_DROP;
106800765f8eSXuan Zhuo }
106900765f8eSXuan Zhuo }
107000765f8eSXuan Zhuo
virtnet_get_headroom(struct virtnet_info * vi)1071f6b10209SJason Wang static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1072f6b10209SJason Wang {
107397c2c69eSXuan Zhuo return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1074f6b10209SJason Wang }
1075f6b10209SJason Wang
10764941d472SJason Wang /* We copy the packet for XDP in the following cases:
10774941d472SJason Wang *
10784941d472SJason Wang * 1) Packet is scattered across multiple rx buffers.
10794941d472SJason Wang * 2) Headroom space is insufficient.
10804941d472SJason Wang *
10814941d472SJason Wang * This is inefficient but it's a temporary condition that
10824941d472SJason Wang * we hit right after XDP is enabled and until queue is refilled
10834941d472SJason Wang * with large buffers with sufficient headroom - so it should affect
10844941d472SJason Wang * at most queue size packets.
10854941d472SJason Wang * Afterwards, the conditions to enable
10864941d472SJason Wang * XDP should preclude the underlying device from sending packets
10874941d472SJason Wang * across multiple buffers (num_buf > 1), and we make sure buffers
10884941d472SJason Wang * have enough headroom.
108972979a6cSJohn Fastabend */
xdp_linearize_page(struct receive_queue * rq,int * num_buf,struct page * p,int offset,int page_off,unsigned int * len)109072979a6cSJohn Fastabend static struct page *xdp_linearize_page(struct receive_queue *rq,
1091981f14d4SHeng Qi int *num_buf,
109272979a6cSJohn Fastabend struct page *p,
109372979a6cSJohn Fastabend int offset,
10944941d472SJason Wang int page_off,
109572979a6cSJohn Fastabend unsigned int *len)
109672979a6cSJohn Fastabend {
1097853618d5SXuan Zhuo int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1098853618d5SXuan Zhuo struct page *page;
109972979a6cSJohn Fastabend
1100853618d5SXuan Zhuo if (page_off + *len + tailroom > PAGE_SIZE)
1101853618d5SXuan Zhuo return NULL;
1102853618d5SXuan Zhuo
1103853618d5SXuan Zhuo page = alloc_page(GFP_ATOMIC);
110472979a6cSJohn Fastabend if (!page)
110572979a6cSJohn Fastabend return NULL;
110672979a6cSJohn Fastabend
110772979a6cSJohn Fastabend memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
110872979a6cSJohn Fastabend page_off += *len;
110972979a6cSJohn Fastabend
111056a86f84SJason Wang while (--*num_buf) {
111172979a6cSJohn Fastabend unsigned int buflen;
111272979a6cSJohn Fastabend void *buf;
111372979a6cSJohn Fastabend int off;
111472979a6cSJohn Fastabend
1115295525e2SXuan Zhuo buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1116680557cfSMichael S. Tsirkin if (unlikely(!buf))
111772979a6cSJohn Fastabend goto err_buf;
111872979a6cSJohn Fastabend
111972979a6cSJohn Fastabend p = virt_to_head_page(buf);
112072979a6cSJohn Fastabend off = buf - page_address(p);
112172979a6cSJohn Fastabend
112256a86f84SJason Wang /* guard against a misconfigured or uncooperative backend that
112356a86f84SJason Wang * is sending packet larger than the MTU.
112456a86f84SJason Wang */
11253cc81a9aSJason Wang if ((page_off + buflen + tailroom) > PAGE_SIZE) {
112656a86f84SJason Wang put_page(p);
112756a86f84SJason Wang goto err_buf;
112856a86f84SJason Wang }
112956a86f84SJason Wang
113072979a6cSJohn Fastabend memcpy(page_address(page) + page_off,
113172979a6cSJohn Fastabend page_address(p) + off, buflen);
113272979a6cSJohn Fastabend page_off += buflen;
113356a86f84SJason Wang put_page(p);
113472979a6cSJohn Fastabend }
113572979a6cSJohn Fastabend
11362de2f7f4SJohn Fastabend /* Headroom does not contribute to packet length */
11372de2f7f4SJohn Fastabend *len = page_off - VIRTIO_XDP_HEADROOM;
113872979a6cSJohn Fastabend return page;
113972979a6cSJohn Fastabend err_buf:
114072979a6cSJohn Fastabend __free_pages(page, 0);
114172979a6cSJohn Fastabend return NULL;
114272979a6cSJohn Fastabend }
114372979a6cSJohn Fastabend
receive_small_build_skb(struct virtnet_info * vi,unsigned int xdp_headroom,void * buf,unsigned int len)114419e8c85eSXuan Zhuo static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
114519e8c85eSXuan Zhuo unsigned int xdp_headroom,
114619e8c85eSXuan Zhuo void *buf,
114719e8c85eSXuan Zhuo unsigned int len)
114819e8c85eSXuan Zhuo {
114919e8c85eSXuan Zhuo unsigned int header_offset;
115019e8c85eSXuan Zhuo unsigned int headroom;
115119e8c85eSXuan Zhuo unsigned int buflen;
115219e8c85eSXuan Zhuo struct sk_buff *skb;
115319e8c85eSXuan Zhuo
115419e8c85eSXuan Zhuo header_offset = VIRTNET_RX_PAD + xdp_headroom;
115519e8c85eSXuan Zhuo headroom = vi->hdr_len + header_offset;
115619e8c85eSXuan Zhuo buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
115719e8c85eSXuan Zhuo SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
115819e8c85eSXuan Zhuo
115921e26a71SXuan Zhuo skb = virtnet_build_skb(buf, buflen, headroom, len);
116021e26a71SXuan Zhuo if (unlikely(!skb))
116119e8c85eSXuan Zhuo return NULL;
116219e8c85eSXuan Zhuo
116319e8c85eSXuan Zhuo buf += header_offset;
1164dae64749SFeng Liu memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
116519e8c85eSXuan Zhuo
116619e8c85eSXuan Zhuo return skb;
116719e8c85eSXuan Zhuo }
116819e8c85eSXuan Zhuo
receive_small_xdp(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct bpf_prog * xdp_prog,void * buf,unsigned int xdp_headroom,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1169c5f3e72fSXuan Zhuo static struct sk_buff *receive_small_xdp(struct net_device *dev,
11704941d472SJason Wang struct virtnet_info *vi,
11714941d472SJason Wang struct receive_queue *rq,
1172c5f3e72fSXuan Zhuo struct bpf_prog *xdp_prog,
1173c5f3e72fSXuan Zhuo void *buf,
1174c5f3e72fSXuan Zhuo unsigned int xdp_headroom,
1175186b3c99SJason Wang unsigned int len,
11767d9d60fdSToshiaki Makita unsigned int *xdp_xmit,
1177d46eeeafSJason Wang struct virtnet_rq_stats *stats)
11784941d472SJason Wang {
11794941d472SJason Wang unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
11804941d472SJason Wang unsigned int headroom = vi->hdr_len + header_offset;
11814941d472SJason Wang struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1182c5f3e72fSXuan Zhuo struct page *page = virt_to_head_page(buf);
1183c5f3e72fSXuan Zhuo struct page *xdp_page;
1184c5f3e72fSXuan Zhuo unsigned int buflen;
11854941d472SJason Wang struct xdp_buff xdp;
1186c5f3e72fSXuan Zhuo struct sk_buff *skb;
1187c5f3e72fSXuan Zhuo unsigned int metasize = 0;
11884941d472SJason Wang u32 act;
11894941d472SJason Wang
119095dbe9e7SJesper Dangaard Brouer if (unlikely(hdr->hdr.gso_type))
11914941d472SJason Wang goto err_xdp;
11924941d472SJason Wang
1193*3ef2a16aSHeng Qi /* Partially checksummed packets must be dropped. */
1194*3ef2a16aSHeng Qi if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
1195*3ef2a16aSHeng Qi goto err_xdp;
1196*3ef2a16aSHeng Qi
1197c5f3e72fSXuan Zhuo buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1198c5f3e72fSXuan Zhuo SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1199c5f3e72fSXuan Zhuo
12004941d472SJason Wang if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
12014941d472SJason Wang int offset = buf - page_address(page) + header_offset;
12024941d472SJason Wang unsigned int tlen = len + vi->hdr_len;
1203981f14d4SHeng Qi int num_buf = 1;
12044941d472SJason Wang
12054941d472SJason Wang xdp_headroom = virtnet_get_headroom(vi);
12064941d472SJason Wang header_offset = VIRTNET_RX_PAD + xdp_headroom;
12074941d472SJason Wang headroom = vi->hdr_len + header_offset;
12084941d472SJason Wang buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
12094941d472SJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
12104941d472SJason Wang xdp_page = xdp_linearize_page(rq, &num_buf, page,
12114941d472SJason Wang offset, header_offset,
12124941d472SJason Wang &tlen);
12134941d472SJason Wang if (!xdp_page)
12144941d472SJason Wang goto err_xdp;
12154941d472SJason Wang
12164941d472SJason Wang buf = page_address(xdp_page);
12174941d472SJason Wang put_page(page);
12184941d472SJason Wang page = xdp_page;
12194941d472SJason Wang }
12204941d472SJason Wang
122143b5169dSLorenzo Bianconi xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1222be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1223be9df4afSLorenzo Bianconi xdp_headroom, len, true);
122400765f8eSXuan Zhuo
122500765f8eSXuan Zhuo act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
12264941d472SJason Wang
12274941d472SJason Wang switch (act) {
12284941d472SJason Wang case XDP_PASS:
12294941d472SJason Wang /* Recalculate length in case bpf program changed it */
12306870de43SNikita V. Shirokov len = xdp.data_end - xdp.data;
1231503d539aSYuya Kusakabe metasize = xdp.data - xdp.data_meta;
12324941d472SJason Wang break;
1233c5f3e72fSXuan Zhuo
12344941d472SJason Wang case XDP_TX:
1235186b3c99SJason Wang case XDP_REDIRECT:
12364941d472SJason Wang goto xdp_xmit;
1237c5f3e72fSXuan Zhuo
12384941d472SJason Wang default:
12394941d472SJason Wang goto err_xdp;
12404941d472SJason Wang }
1241c5f3e72fSXuan Zhuo
124221e26a71SXuan Zhuo skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
124321e26a71SXuan Zhuo if (unlikely(!skb))
1244c5f3e72fSXuan Zhuo goto err;
1245c5f3e72fSXuan Zhuo
1246c5f3e72fSXuan Zhuo if (metasize)
1247c5f3e72fSXuan Zhuo skb_metadata_set(skb, metasize);
1248c5f3e72fSXuan Zhuo
1249c5f3e72fSXuan Zhuo return skb;
1250c5f3e72fSXuan Zhuo
1251c5f3e72fSXuan Zhuo err_xdp:
125227debe3eSEric Dumazet u64_stats_inc(&stats->xdp_drops);
1253c5f3e72fSXuan Zhuo err:
125427debe3eSEric Dumazet u64_stats_inc(&stats->drops);
1255c5f3e72fSXuan Zhuo put_page(page);
1256c5f3e72fSXuan Zhuo xdp_xmit:
1257c5f3e72fSXuan Zhuo return NULL;
1258c5f3e72fSXuan Zhuo }
1259c5f3e72fSXuan Zhuo
receive_small(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,void * buf,void * ctx,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1260c5f3e72fSXuan Zhuo static struct sk_buff *receive_small(struct net_device *dev,
1261c5f3e72fSXuan Zhuo struct virtnet_info *vi,
1262c5f3e72fSXuan Zhuo struct receive_queue *rq,
1263c5f3e72fSXuan Zhuo void *buf, void *ctx,
1264c5f3e72fSXuan Zhuo unsigned int len,
1265c5f3e72fSXuan Zhuo unsigned int *xdp_xmit,
1266c5f3e72fSXuan Zhuo struct virtnet_rq_stats *stats)
1267c5f3e72fSXuan Zhuo {
1268c5f3e72fSXuan Zhuo unsigned int xdp_headroom = (unsigned long)ctx;
1269c5f3e72fSXuan Zhuo struct page *page = virt_to_head_page(buf);
1270aef76506SXuan Zhuo struct sk_buff *skb;
1271c5f3e72fSXuan Zhuo
1272c5f3e72fSXuan Zhuo len -= vi->hdr_len;
127327debe3eSEric Dumazet u64_stats_add(&stats->bytes, len);
1274c5f3e72fSXuan Zhuo
1275c5f3e72fSXuan Zhuo if (unlikely(len > GOOD_PACKET_LEN)) {
1276c5f3e72fSXuan Zhuo pr_debug("%s: rx error: len %u exceeds max size %d\n",
1277c5f3e72fSXuan Zhuo dev->name, len, GOOD_PACKET_LEN);
1278e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, rx_length_errors);
1279c5f3e72fSXuan Zhuo goto err;
1280c5f3e72fSXuan Zhuo }
1281c5f3e72fSXuan Zhuo
1282aef76506SXuan Zhuo if (unlikely(vi->xdp_enabled)) {
1283aef76506SXuan Zhuo struct bpf_prog *xdp_prog;
1284c5f3e72fSXuan Zhuo
1285c5f3e72fSXuan Zhuo rcu_read_lock();
1286c5f3e72fSXuan Zhuo xdp_prog = rcu_dereference(rq->xdp_prog);
1287c5f3e72fSXuan Zhuo if (xdp_prog) {
1288aef76506SXuan Zhuo skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1289aef76506SXuan Zhuo xdp_headroom, len, xdp_xmit,
1290aef76506SXuan Zhuo stats);
1291c5f3e72fSXuan Zhuo rcu_read_unlock();
1292c5f3e72fSXuan Zhuo return skb;
12934941d472SJason Wang }
12944941d472SJason Wang rcu_read_unlock();
1295aef76506SXuan Zhuo }
12964941d472SJason Wang
129719e8c85eSXuan Zhuo skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
129819e8c85eSXuan Zhuo if (likely(skb))
12994941d472SJason Wang return skb;
13004941d472SJason Wang
1301053c9e18SWenliang Wang err:
130227debe3eSEric Dumazet u64_stats_inc(&stats->drops);
13034941d472SJason Wang put_page(page);
13044941d472SJason Wang return NULL;
13054941d472SJason Wang }
13064941d472SJason Wang
receive_big(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,void * buf,unsigned int len,struct virtnet_rq_stats * stats)13074941d472SJason Wang static struct sk_buff *receive_big(struct net_device *dev,
13084941d472SJason Wang struct virtnet_info *vi,
13094941d472SJason Wang struct receive_queue *rq,
13104941d472SJason Wang void *buf,
13117d9d60fdSToshiaki Makita unsigned int len,
1312d46eeeafSJason Wang struct virtnet_rq_stats *stats)
13134941d472SJason Wang {
13144941d472SJason Wang struct page *page = buf;
1315503d539aSYuya Kusakabe struct sk_buff *skb =
1316fa0f1ba7SXuan Zhuo page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
13174941d472SJason Wang
131827debe3eSEric Dumazet u64_stats_add(&stats->bytes, len - vi->hdr_len);
13194941d472SJason Wang if (unlikely(!skb))
13204941d472SJason Wang goto err;
13214941d472SJason Wang
13224941d472SJason Wang return skb;
13234941d472SJason Wang
13244941d472SJason Wang err:
132527debe3eSEric Dumazet u64_stats_inc(&stats->drops);
13264941d472SJason Wang give_pages(rq, page);
13274941d472SJason Wang return NULL;
13284941d472SJason Wang }
13294941d472SJason Wang
mergeable_buf_free(struct receive_queue * rq,int num_buf,struct net_device * dev,struct virtnet_rq_stats * stats)133080f50f91SXuan Zhuo static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
133180f50f91SXuan Zhuo struct net_device *dev,
133280f50f91SXuan Zhuo struct virtnet_rq_stats *stats)
133380f50f91SXuan Zhuo {
133480f50f91SXuan Zhuo struct page *page;
133580f50f91SXuan Zhuo void *buf;
133680f50f91SXuan Zhuo int len;
133780f50f91SXuan Zhuo
133880f50f91SXuan Zhuo while (num_buf-- > 1) {
1339295525e2SXuan Zhuo buf = virtnet_rq_get_buf(rq, &len, NULL);
134080f50f91SXuan Zhuo if (unlikely(!buf)) {
134180f50f91SXuan Zhuo pr_debug("%s: rx error: %d buffers missing\n",
134280f50f91SXuan Zhuo dev->name, num_buf);
1343e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, rx_length_errors);
134480f50f91SXuan Zhuo break;
134580f50f91SXuan Zhuo }
134627debe3eSEric Dumazet u64_stats_add(&stats->bytes, len);
134780f50f91SXuan Zhuo page = virt_to_head_page(buf);
134880f50f91SXuan Zhuo put_page(page);
134980f50f91SXuan Zhuo }
135080f50f91SXuan Zhuo }
135180f50f91SXuan Zhuo
1352b26aa481SHeng Qi /* Why not use xdp_build_skb_from_frame() ?
1353b26aa481SHeng Qi * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1354b26aa481SHeng Qi * virtio-net there are 2 points that do not match its requirements:
1355b26aa481SHeng Qi * 1. The size of the prefilled buffer is not fixed before xdp is set.
1356b26aa481SHeng Qi * 2. xdp_build_skb_from_frame() does more checks that we don't need,
1357b26aa481SHeng Qi * like eth_type_trans() (which virtio-net does in receive_buf()).
1358b26aa481SHeng Qi */
build_skb_from_xdp_buff(struct net_device * dev,struct virtnet_info * vi,struct xdp_buff * xdp,unsigned int xdp_frags_truesz)1359b26aa481SHeng Qi static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1360b26aa481SHeng Qi struct virtnet_info *vi,
1361b26aa481SHeng Qi struct xdp_buff *xdp,
1362b26aa481SHeng Qi unsigned int xdp_frags_truesz)
1363b26aa481SHeng Qi {
1364b26aa481SHeng Qi struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1365b26aa481SHeng Qi unsigned int headroom, data_len;
1366b26aa481SHeng Qi struct sk_buff *skb;
1367b26aa481SHeng Qi int metasize;
1368b26aa481SHeng Qi u8 nr_frags;
1369b26aa481SHeng Qi
1370b26aa481SHeng Qi if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1371b26aa481SHeng Qi pr_debug("Error building skb as missing reserved tailroom for xdp");
1372b26aa481SHeng Qi return NULL;
1373b26aa481SHeng Qi }
1374b26aa481SHeng Qi
1375b26aa481SHeng Qi if (unlikely(xdp_buff_has_frags(xdp)))
1376b26aa481SHeng Qi nr_frags = sinfo->nr_frags;
1377b26aa481SHeng Qi
1378b26aa481SHeng Qi skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1379b26aa481SHeng Qi if (unlikely(!skb))
1380b26aa481SHeng Qi return NULL;
1381b26aa481SHeng Qi
1382b26aa481SHeng Qi headroom = xdp->data - xdp->data_hard_start;
1383b26aa481SHeng Qi data_len = xdp->data_end - xdp->data;
1384b26aa481SHeng Qi skb_reserve(skb, headroom);
1385b26aa481SHeng Qi __skb_put(skb, data_len);
1386b26aa481SHeng Qi
1387b26aa481SHeng Qi metasize = xdp->data - xdp->data_meta;
1388b26aa481SHeng Qi metasize = metasize > 0 ? metasize : 0;
1389b26aa481SHeng Qi if (metasize)
1390b26aa481SHeng Qi skb_metadata_set(skb, metasize);
1391b26aa481SHeng Qi
1392b26aa481SHeng Qi if (unlikely(xdp_buff_has_frags(xdp)))
1393b26aa481SHeng Qi xdp_update_skb_shared_info(skb, nr_frags,
1394b26aa481SHeng Qi sinfo->xdp_frags_size,
1395b26aa481SHeng Qi xdp_frags_truesz,
1396b26aa481SHeng Qi xdp_buff_is_frag_pfmemalloc(xdp));
1397b26aa481SHeng Qi
1398b26aa481SHeng Qi return skb;
1399b26aa481SHeng Qi }
1400b26aa481SHeng Qi
1401ef75cb51SHeng Qi /* TODO: build xdp in big mode */
virtnet_build_xdp_buff_mrg(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct xdp_buff * xdp,void * buf,unsigned int len,unsigned int frame_sz,int * num_buf,unsigned int * xdp_frags_truesize,struct virtnet_rq_stats * stats)1402ef75cb51SHeng Qi static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1403ef75cb51SHeng Qi struct virtnet_info *vi,
1404ef75cb51SHeng Qi struct receive_queue *rq,
1405ef75cb51SHeng Qi struct xdp_buff *xdp,
1406ef75cb51SHeng Qi void *buf,
1407ef75cb51SHeng Qi unsigned int len,
1408ef75cb51SHeng Qi unsigned int frame_sz,
1409981f14d4SHeng Qi int *num_buf,
1410ef75cb51SHeng Qi unsigned int *xdp_frags_truesize,
1411ef75cb51SHeng Qi struct virtnet_rq_stats *stats)
1412ef75cb51SHeng Qi {
1413ef75cb51SHeng Qi struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1414ef75cb51SHeng Qi unsigned int headroom, tailroom, room;
1415ef75cb51SHeng Qi unsigned int truesize, cur_frag_size;
1416ef75cb51SHeng Qi struct skb_shared_info *shinfo;
1417ef75cb51SHeng Qi unsigned int xdp_frags_truesz = 0;
1418ef75cb51SHeng Qi struct page *page;
1419ef75cb51SHeng Qi skb_frag_t *frag;
1420ef75cb51SHeng Qi int offset;
1421ef75cb51SHeng Qi void *ctx;
1422ef75cb51SHeng Qi
1423ef75cb51SHeng Qi xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1424ef75cb51SHeng Qi xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1425ef75cb51SHeng Qi VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1426ef75cb51SHeng Qi
1427981f14d4SHeng Qi if (!*num_buf)
1428981f14d4SHeng Qi return 0;
1429981f14d4SHeng Qi
1430ef75cb51SHeng Qi if (*num_buf > 1) {
1431ef75cb51SHeng Qi /* If we want to build multi-buffer xdp, we need
1432ef75cb51SHeng Qi * to specify that the flags of xdp_buff have the
1433ef75cb51SHeng Qi * XDP_FLAGS_HAS_FRAG bit.
1434ef75cb51SHeng Qi */
1435ef75cb51SHeng Qi if (!xdp_buff_has_frags(xdp))
1436ef75cb51SHeng Qi xdp_buff_set_frags_flag(xdp);
1437ef75cb51SHeng Qi
1438ef75cb51SHeng Qi shinfo = xdp_get_shared_info_from_buff(xdp);
1439ef75cb51SHeng Qi shinfo->nr_frags = 0;
1440ef75cb51SHeng Qi shinfo->xdp_frags_size = 0;
1441ef75cb51SHeng Qi }
1442ef75cb51SHeng Qi
1443981f14d4SHeng Qi if (*num_buf > MAX_SKB_FRAGS + 1)
1444ef75cb51SHeng Qi return -EINVAL;
1445ef75cb51SHeng Qi
1446981f14d4SHeng Qi while (--*num_buf > 0) {
1447295525e2SXuan Zhuo buf = virtnet_rq_get_buf(rq, &len, &ctx);
1448ef75cb51SHeng Qi if (unlikely(!buf)) {
1449ef75cb51SHeng Qi pr_debug("%s: rx error: %d buffers out of %d missing\n",
1450ef75cb51SHeng Qi dev->name, *num_buf,
1451ef75cb51SHeng Qi virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1452e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, rx_length_errors);
14534cb00b13SXuan Zhuo goto err;
1454ef75cb51SHeng Qi }
1455ef75cb51SHeng Qi
145627debe3eSEric Dumazet u64_stats_add(&stats->bytes, len);
1457ef75cb51SHeng Qi page = virt_to_head_page(buf);
1458ef75cb51SHeng Qi offset = buf - page_address(page);
1459ef75cb51SHeng Qi
1460ef75cb51SHeng Qi truesize = mergeable_ctx_to_truesize(ctx);
1461ef75cb51SHeng Qi headroom = mergeable_ctx_to_headroom(ctx);
1462ef75cb51SHeng Qi tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1463ef75cb51SHeng Qi room = SKB_DATA_ALIGN(headroom + tailroom);
1464ef75cb51SHeng Qi
1465ef75cb51SHeng Qi cur_frag_size = truesize;
1466ef75cb51SHeng Qi xdp_frags_truesz += cur_frag_size;
1467ef75cb51SHeng Qi if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1468ef75cb51SHeng Qi put_page(page);
1469ef75cb51SHeng Qi pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1470ef75cb51SHeng Qi dev->name, len, (unsigned long)(truesize - room));
1471e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, rx_length_errors);
14724cb00b13SXuan Zhuo goto err;
1473ef75cb51SHeng Qi }
1474ef75cb51SHeng Qi
1475ef75cb51SHeng Qi frag = &shinfo->frags[shinfo->nr_frags++];
1476b51f4113SYunsheng Lin skb_frag_fill_page_desc(frag, page, offset, len);
1477ef75cb51SHeng Qi if (page_is_pfmemalloc(page))
1478ef75cb51SHeng Qi xdp_buff_set_frag_pfmemalloc(xdp);
1479ef75cb51SHeng Qi
1480ef75cb51SHeng Qi shinfo->xdp_frags_size += len;
1481ef75cb51SHeng Qi }
1482ef75cb51SHeng Qi
1483ef75cb51SHeng Qi *xdp_frags_truesize = xdp_frags_truesz;
1484ef75cb51SHeng Qi return 0;
14854cb00b13SXuan Zhuo
14864cb00b13SXuan Zhuo err:
14874cb00b13SXuan Zhuo put_xdp_frags(xdp);
14884cb00b13SXuan Zhuo return -EINVAL;
1489ef75cb51SHeng Qi }
1490ef75cb51SHeng Qi
mergeable_xdp_get_buf(struct virtnet_info * vi,struct receive_queue * rq,struct bpf_prog * xdp_prog,void * ctx,unsigned int * frame_sz,int * num_buf,struct page ** page,int offset,unsigned int * len,struct virtio_net_hdr_mrg_rxbuf * hdr)1491ad4858beSXuan Zhuo static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1492ad4858beSXuan Zhuo struct receive_queue *rq,
1493ad4858beSXuan Zhuo struct bpf_prog *xdp_prog,
1494ad4858beSXuan Zhuo void *ctx,
1495ad4858beSXuan Zhuo unsigned int *frame_sz,
1496ad4858beSXuan Zhuo int *num_buf,
1497ad4858beSXuan Zhuo struct page **page,
1498ad4858beSXuan Zhuo int offset,
1499ad4858beSXuan Zhuo unsigned int *len,
1500ad4858beSXuan Zhuo struct virtio_net_hdr_mrg_rxbuf *hdr)
1501ad4858beSXuan Zhuo {
1502ad4858beSXuan Zhuo unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1503ad4858beSXuan Zhuo unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1504ad4858beSXuan Zhuo struct page *xdp_page;
1505ad4858beSXuan Zhuo unsigned int xdp_room;
1506ad4858beSXuan Zhuo
1507ad4858beSXuan Zhuo /* Transient failure which in theory could occur if
1508ad4858beSXuan Zhuo * in-flight packets from before XDP was enabled reach
1509ad4858beSXuan Zhuo * the receive path after XDP is loaded.
1510ad4858beSXuan Zhuo */
1511ad4858beSXuan Zhuo if (unlikely(hdr->hdr.gso_type))
1512ad4858beSXuan Zhuo return NULL;
1513ad4858beSXuan Zhuo
1514*3ef2a16aSHeng Qi /* Partially checksummed packets must be dropped. */
1515*3ef2a16aSHeng Qi if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
1516*3ef2a16aSHeng Qi return NULL;
1517*3ef2a16aSHeng Qi
1518ad4858beSXuan Zhuo /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1519ad4858beSXuan Zhuo * with headroom may add hole in truesize, which
1520ad4858beSXuan Zhuo * make their length exceed PAGE_SIZE. So we disabled the
1521ad4858beSXuan Zhuo * hole mechanism for xdp. See add_recvbuf_mergeable().
1522ad4858beSXuan Zhuo */
1523ad4858beSXuan Zhuo *frame_sz = truesize;
1524ad4858beSXuan Zhuo
1525dbe4fec2SXuan Zhuo if (likely(headroom >= virtnet_get_headroom(vi) &&
1526dbe4fec2SXuan Zhuo (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1527dbe4fec2SXuan Zhuo return page_address(*page) + offset;
1528dbe4fec2SXuan Zhuo }
1529dbe4fec2SXuan Zhuo
1530ad4858beSXuan Zhuo /* This happens when headroom is not enough because
1531ad4858beSXuan Zhuo * of the buffer was prefilled before XDP is set.
1532ad4858beSXuan Zhuo * This should only happen for the first several packets.
1533ad4858beSXuan Zhuo * In fact, vq reset can be used here to help us clean up
1534ad4858beSXuan Zhuo * the prefilled buffers, but many existing devices do not
1535ad4858beSXuan Zhuo * support it, and we don't want to bother users who are
1536ad4858beSXuan Zhuo * using xdp normally.
1537ad4858beSXuan Zhuo */
1538dbe4fec2SXuan Zhuo if (!xdp_prog->aux->xdp_has_frags) {
1539ad4858beSXuan Zhuo /* linearize data for XDP */
1540ad4858beSXuan Zhuo xdp_page = xdp_linearize_page(rq, num_buf,
1541ad4858beSXuan Zhuo *page, offset,
1542ad4858beSXuan Zhuo VIRTIO_XDP_HEADROOM,
1543ad4858beSXuan Zhuo len);
1544ad4858beSXuan Zhuo if (!xdp_page)
1545ad4858beSXuan Zhuo return NULL;
1546dbe4fec2SXuan Zhuo } else {
1547ad4858beSXuan Zhuo xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1548ad4858beSXuan Zhuo sizeof(struct skb_shared_info));
1549ad4858beSXuan Zhuo if (*len + xdp_room > PAGE_SIZE)
1550ad4858beSXuan Zhuo return NULL;
1551ad4858beSXuan Zhuo
1552ad4858beSXuan Zhuo xdp_page = alloc_page(GFP_ATOMIC);
1553ad4858beSXuan Zhuo if (!xdp_page)
1554ad4858beSXuan Zhuo return NULL;
1555ad4858beSXuan Zhuo
1556ad4858beSXuan Zhuo memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1557ad4858beSXuan Zhuo page_address(*page) + offset, *len);
1558ad4858beSXuan Zhuo }
1559ad4858beSXuan Zhuo
1560dbe4fec2SXuan Zhuo *frame_sz = PAGE_SIZE;
1561dbe4fec2SXuan Zhuo
1562dbe4fec2SXuan Zhuo put_page(*page);
1563dbe4fec2SXuan Zhuo
1564dbe4fec2SXuan Zhuo *page = xdp_page;
1565dbe4fec2SXuan Zhuo
1566dbe4fec2SXuan Zhuo return page_address(*page) + VIRTIO_XDP_HEADROOM;
1567ad4858beSXuan Zhuo }
1568ad4858beSXuan Zhuo
receive_mergeable_xdp(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct bpf_prog * xdp_prog,void * buf,void * ctx,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1569d8f2835aSXuan Zhuo static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1570d8f2835aSXuan Zhuo struct virtnet_info *vi,
1571d8f2835aSXuan Zhuo struct receive_queue *rq,
1572d8f2835aSXuan Zhuo struct bpf_prog *xdp_prog,
1573d8f2835aSXuan Zhuo void *buf,
1574d8f2835aSXuan Zhuo void *ctx,
1575d8f2835aSXuan Zhuo unsigned int len,
1576d8f2835aSXuan Zhuo unsigned int *xdp_xmit,
1577d8f2835aSXuan Zhuo struct virtnet_rq_stats *stats)
1578d8f2835aSXuan Zhuo {
1579d8f2835aSXuan Zhuo struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1580d8f2835aSXuan Zhuo int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1581d8f2835aSXuan Zhuo struct page *page = virt_to_head_page(buf);
1582d8f2835aSXuan Zhuo int offset = buf - page_address(page);
1583d8f2835aSXuan Zhuo unsigned int xdp_frags_truesz = 0;
1584d8f2835aSXuan Zhuo struct sk_buff *head_skb;
1585d8f2835aSXuan Zhuo unsigned int frame_sz;
1586d8f2835aSXuan Zhuo struct xdp_buff xdp;
1587d8f2835aSXuan Zhuo void *data;
1588d8f2835aSXuan Zhuo u32 act;
1589d8f2835aSXuan Zhuo int err;
1590d8f2835aSXuan Zhuo
1591d8f2835aSXuan Zhuo data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1592d8f2835aSXuan Zhuo offset, &len, hdr);
1593d8f2835aSXuan Zhuo if (unlikely(!data))
1594d8f2835aSXuan Zhuo goto err_xdp;
1595d8f2835aSXuan Zhuo
1596d8f2835aSXuan Zhuo err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1597d8f2835aSXuan Zhuo &num_buf, &xdp_frags_truesz, stats);
1598d8f2835aSXuan Zhuo if (unlikely(err))
1599d8f2835aSXuan Zhuo goto err_xdp;
1600d8f2835aSXuan Zhuo
1601d8f2835aSXuan Zhuo act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1602d8f2835aSXuan Zhuo
1603d8f2835aSXuan Zhuo switch (act) {
1604d8f2835aSXuan Zhuo case XDP_PASS:
1605d8f2835aSXuan Zhuo head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1606d8f2835aSXuan Zhuo if (unlikely(!head_skb))
1607d8f2835aSXuan Zhuo break;
1608d8f2835aSXuan Zhuo return head_skb;
1609d8f2835aSXuan Zhuo
1610d8f2835aSXuan Zhuo case XDP_TX:
1611d8f2835aSXuan Zhuo case XDP_REDIRECT:
1612d8f2835aSXuan Zhuo return NULL;
1613d8f2835aSXuan Zhuo
1614d8f2835aSXuan Zhuo default:
1615d8f2835aSXuan Zhuo break;
1616d8f2835aSXuan Zhuo }
1617d8f2835aSXuan Zhuo
1618d8f2835aSXuan Zhuo put_xdp_frags(&xdp);
1619d8f2835aSXuan Zhuo
1620d8f2835aSXuan Zhuo err_xdp:
1621d8f2835aSXuan Zhuo put_page(page);
1622d8f2835aSXuan Zhuo mergeable_buf_free(rq, num_buf, dev, stats);
1623d8f2835aSXuan Zhuo
162427debe3eSEric Dumazet u64_stats_inc(&stats->xdp_drops);
162527debe3eSEric Dumazet u64_stats_inc(&stats->drops);
1626d8f2835aSXuan Zhuo return NULL;
1627d8f2835aSXuan Zhuo }
1628d8f2835aSXuan Zhuo
receive_mergeable(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,void * buf,void * ctx,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)16298fc3b9e9SMichael S. Tsirkin static struct sk_buff *receive_mergeable(struct net_device *dev,
1630fdd819b2SMichael S. Tsirkin struct virtnet_info *vi,
16318fc3b9e9SMichael S. Tsirkin struct receive_queue *rq,
1632680557cfSMichael S. Tsirkin void *buf,
1633680557cfSMichael S. Tsirkin void *ctx,
1634186b3c99SJason Wang unsigned int len,
16357d9d60fdSToshiaki Makita unsigned int *xdp_xmit,
1636d46eeeafSJason Wang struct virtnet_rq_stats *stats)
16379ab86bbcSShirley Ma {
1638012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1639981f14d4SHeng Qi int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
16408fc3b9e9SMichael S. Tsirkin struct page *page = virt_to_head_page(buf);
16418fc3b9e9SMichael S. Tsirkin int offset = buf - page_address(page);
1642f600b690SJohn Fastabend struct sk_buff *head_skb, *curr_skb;
16439ce6146eSJesper Dangaard Brouer unsigned int truesize = mergeable_ctx_to_truesize(ctx);
16444941d472SJason Wang unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1645ef75cb51SHeng Qi unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1646ef75cb51SHeng Qi unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1647ab7db917SMichael Dalton
164856434a01SJohn Fastabend head_skb = NULL;
164927debe3eSEric Dumazet u64_stats_add(&stats->bytes, len - vi->hdr_len);
165056434a01SJohn Fastabend
1651ef75cb51SHeng Qi if (unlikely(len > truesize - room)) {
1652ad993a95SXie Yongji pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1653ef75cb51SHeng Qi dev->name, len, (unsigned long)(truesize - room));
1654e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, rx_length_errors);
1655ad993a95SXie Yongji goto err_skb;
1656ad993a95SXie Yongji }
16576213f07cSLi RongQing
165859ba3b1aSXuan Zhuo if (unlikely(vi->xdp_enabled)) {
165959ba3b1aSXuan Zhuo struct bpf_prog *xdp_prog;
16606213f07cSLi RongQing
1661f600b690SJohn Fastabend rcu_read_lock();
1662f600b690SJohn Fastabend xdp_prog = rcu_dereference(rq->xdp_prog);
1663f600b690SJohn Fastabend if (xdp_prog) {
1664d8f2835aSXuan Zhuo head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1665d8f2835aSXuan Zhuo len, xdp_xmit, stats);
1666fab89bafSHeng Qi rcu_read_unlock();
16671830f893SJason Wang return head_skb;
166856434a01SJohn Fastabend }
1669f600b690SJohn Fastabend rcu_read_unlock();
167059ba3b1aSXuan Zhuo }
1671f600b690SJohn Fastabend
1672fa0f1ba7SXuan Zhuo head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1673f600b690SJohn Fastabend curr_skb = head_skb;
16749ab86bbcSShirley Ma
16758fc3b9e9SMichael S. Tsirkin if (unlikely(!curr_skb))
16768fc3b9e9SMichael S. Tsirkin goto err_skb;
16779ab86bbcSShirley Ma while (--num_buf) {
16788fc3b9e9SMichael S. Tsirkin int num_skb_frags;
16798fc3b9e9SMichael S. Tsirkin
1680295525e2SXuan Zhuo buf = virtnet_rq_get_buf(rq, &len, &ctx);
168103e9f8a0SYunjian Wang if (unlikely(!buf)) {
16828fc3b9e9SMichael S. Tsirkin pr_debug("%s: rx error: %d buffers out of %d missing\n",
1683fdd819b2SMichael S. Tsirkin dev->name, num_buf,
1684012873d0SMichael S. Tsirkin virtio16_to_cpu(vi->vdev,
1685012873d0SMichael S. Tsirkin hdr->num_buffers));
1686e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, rx_length_errors);
16878fc3b9e9SMichael S. Tsirkin goto err_buf;
16883f2c31d9SMark McLoughlin }
16898fc3b9e9SMichael S. Tsirkin
169027debe3eSEric Dumazet u64_stats_add(&stats->bytes, len);
16918fc3b9e9SMichael S. Tsirkin page = virt_to_head_page(buf);
169228b39bc7SJason Wang
169328b39bc7SJason Wang truesize = mergeable_ctx_to_truesize(ctx);
1694ef75cb51SHeng Qi headroom = mergeable_ctx_to_headroom(ctx);
1695ef75cb51SHeng Qi tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1696ef75cb51SHeng Qi room = SKB_DATA_ALIGN(headroom + tailroom);
1697ef75cb51SHeng Qi if (unlikely(len > truesize - room)) {
169856da5fd0SDan Carpenter pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1699ef75cb51SHeng Qi dev->name, len, (unsigned long)(truesize - room));
1700e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, rx_length_errors);
1701680557cfSMichael S. Tsirkin goto err_skb;
1702680557cfSMichael S. Tsirkin }
17038fc3b9e9SMichael S. Tsirkin
17048fc3b9e9SMichael S. Tsirkin num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
17052613af0eSMichael Dalton if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
17062613af0eSMichael Dalton struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
17078fc3b9e9SMichael S. Tsirkin
17088fc3b9e9SMichael S. Tsirkin if (unlikely(!nskb))
17098fc3b9e9SMichael S. Tsirkin goto err_skb;
17102613af0eSMichael Dalton if (curr_skb == head_skb)
17112613af0eSMichael Dalton skb_shinfo(curr_skb)->frag_list = nskb;
17122613af0eSMichael Dalton else
17132613af0eSMichael Dalton curr_skb->next = nskb;
17142613af0eSMichael Dalton curr_skb = nskb;
17152613af0eSMichael Dalton head_skb->truesize += nskb->truesize;
17162613af0eSMichael Dalton num_skb_frags = 0;
17172613af0eSMichael Dalton }
17182613af0eSMichael Dalton if (curr_skb != head_skb) {
17192613af0eSMichael Dalton head_skb->data_len += len;
17202613af0eSMichael Dalton head_skb->len += len;
1721fb51879dSMichael Dalton head_skb->truesize += truesize;
17222613af0eSMichael Dalton }
17238fc3b9e9SMichael S. Tsirkin offset = buf - page_address(page);
1724ba275241SJason Wang if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1725ba275241SJason Wang put_page(page);
1726ba275241SJason Wang skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1727fb51879dSMichael Dalton len, truesize);
1728ba275241SJason Wang } else {
17292613af0eSMichael Dalton skb_add_rx_frag(curr_skb, num_skb_frags, page,
1730fb51879dSMichael Dalton offset, len, truesize);
1731ba275241SJason Wang }
17328fc3b9e9SMichael S. Tsirkin }
17338fc3b9e9SMichael S. Tsirkin
17345377d758SJohannes Berg ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
17358fc3b9e9SMichael S. Tsirkin return head_skb;
17368fc3b9e9SMichael S. Tsirkin
17378fc3b9e9SMichael S. Tsirkin err_skb:
17388fc3b9e9SMichael S. Tsirkin put_page(page);
173980f50f91SXuan Zhuo mergeable_buf_free(rq, num_buf, dev, stats);
174080f50f91SXuan Zhuo
17418fc3b9e9SMichael S. Tsirkin err_buf:
174227debe3eSEric Dumazet u64_stats_inc(&stats->drops);
17438fc3b9e9SMichael S. Tsirkin dev_kfree_skb(head_skb);
17448fc3b9e9SMichael S. Tsirkin return NULL;
17459ab86bbcSShirley Ma }
17469ab86bbcSShirley Ma
virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash * hdr_hash,struct sk_buff * skb)174791f41f01SAndrew Melnychenko static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
174891f41f01SAndrew Melnychenko struct sk_buff *skb)
174991f41f01SAndrew Melnychenko {
175091f41f01SAndrew Melnychenko enum pkt_hash_types rss_hash_type;
175191f41f01SAndrew Melnychenko
175291f41f01SAndrew Melnychenko if (!hdr_hash || !skb)
175391f41f01SAndrew Melnychenko return;
175491f41f01SAndrew Melnychenko
175595bb6330SMichael S. Tsirkin switch (__le16_to_cpu(hdr_hash->hash_report)) {
175691f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_TCPv4:
175791f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_UDPv4:
175891f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_TCPv6:
175991f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_UDPv6:
176091f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
176191f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
176291f41f01SAndrew Melnychenko rss_hash_type = PKT_HASH_TYPE_L4;
176391f41f01SAndrew Melnychenko break;
176491f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_IPv4:
176591f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_IPv6:
176691f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_IPv6_EX:
176791f41f01SAndrew Melnychenko rss_hash_type = PKT_HASH_TYPE_L3;
176891f41f01SAndrew Melnychenko break;
176991f41f01SAndrew Melnychenko case VIRTIO_NET_HASH_REPORT_NONE:
177091f41f01SAndrew Melnychenko default:
177191f41f01SAndrew Melnychenko rss_hash_type = PKT_HASH_TYPE_NONE;
177291f41f01SAndrew Melnychenko }
177395bb6330SMichael S. Tsirkin skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
177491f41f01SAndrew Melnychenko }
177591f41f01SAndrew Melnychenko
receive_buf(struct virtnet_info * vi,struct receive_queue * rq,void * buf,unsigned int len,void ** ctx,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)17767d9d60fdSToshiaki Makita static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
17772471c75eSJesper Dangaard Brouer void *buf, unsigned int len, void **ctx,
1778a0929a44SToshiaki Makita unsigned int *xdp_xmit,
1779d46eeeafSJason Wang struct virtnet_rq_stats *stats)
17809ab86bbcSShirley Ma {
1781e9d7417bSJason Wang struct net_device *dev = vi->dev;
17829ab86bbcSShirley Ma struct sk_buff *skb;
1783dae64749SFeng Liu struct virtio_net_common_hdr *hdr;
1784*3ef2a16aSHeng Qi u8 flags;
17859ab86bbcSShirley Ma
1786bcff3162SMichael S. Tsirkin if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
17879ab86bbcSShirley Ma pr_debug("%s: short packet %i\n", dev->name, len);
1788e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, rx_length_errors);
17893ffd05c2SXuan Zhuo virtnet_rq_free_buf(vi, rq, buf);
17907d9d60fdSToshiaki Makita return;
17919ab86bbcSShirley Ma }
17929ab86bbcSShirley Ma
1793*3ef2a16aSHeng Qi /* 1. Save the flags early, as the XDP program might overwrite them.
1794*3ef2a16aSHeng Qi * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
1795*3ef2a16aSHeng Qi * stay valid after XDP processing.
1796*3ef2a16aSHeng Qi * 2. XDP doesn't work with partially checksummed packets (refer to
1797*3ef2a16aSHeng Qi * virtnet_xdp_set()), so packets marked as
1798*3ef2a16aSHeng Qi * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
1799*3ef2a16aSHeng Qi */
1800*3ef2a16aSHeng Qi flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
1801*3ef2a16aSHeng Qi
1802f121159dSMichael S. Tsirkin if (vi->mergeable_rx_bufs)
18037d9d60fdSToshiaki Makita skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1804a0929a44SToshiaki Makita stats);
1805f121159dSMichael S. Tsirkin else if (vi->big_packets)
1806a0929a44SToshiaki Makita skb = receive_big(dev, vi, rq, buf, len, stats);
1807f121159dSMichael S. Tsirkin else
1808a0929a44SToshiaki Makita skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1809f121159dSMichael S. Tsirkin
18108fc3b9e9SMichael S. Tsirkin if (unlikely(!skb))
18117d9d60fdSToshiaki Makita return;
18123f2c31d9SMark McLoughlin
1813dae64749SFeng Liu hdr = skb_vnet_common_hdr(skb);
181491f41f01SAndrew Melnychenko if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1815dae64749SFeng Liu virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
18163fa2a1dfSstephen hemminger
1817*3ef2a16aSHeng Qi if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
181810a8d94aSJason Wang skb->ip_summed = CHECKSUM_UNNECESSARY;
1819296f96fcSRusty Russell
1820e858fae2SMike Rapoport if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1821e858fae2SMike Rapoport virtio_is_little_endian(vi->vdev))) {
1822e858fae2SMike Rapoport net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1823e858fae2SMike Rapoport dev->name, hdr->hdr.gso_type,
1824fdd819b2SMichael S. Tsirkin hdr->hdr.gso_size);
1825296f96fcSRusty Russell goto frame_err;
1826296f96fcSRusty Russell }
1827296f96fcSRusty Russell
1828133bbb18SWillem de Bruijn skb_record_rx_queue(skb, vq2rxq(rq->vq));
1829d1dc06dcSMike Rapoport skb->protocol = eth_type_trans(skb, dev);
1830d1dc06dcSMike Rapoport pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1831d1dc06dcSMike Rapoport ntohs(skb->protocol), skb->len, skb->pkt_type);
1832d1dc06dcSMike Rapoport
18330fbd050aSEric Dumazet napi_gro_receive(&rq->napi, skb);
18347d9d60fdSToshiaki Makita return;
1835296f96fcSRusty Russell
1836296f96fcSRusty Russell frame_err:
1837e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, rx_frame_errors);
1838296f96fcSRusty Russell dev_kfree_skb(skb);
1839296f96fcSRusty Russell }
1840296f96fcSRusty Russell
1841192f68cfSJason Wang /* Unlike mergeable buffers, all buffers are allocated to the
1842192f68cfSJason Wang * same size, except for the headroom. For this reason we do
1843192f68cfSJason Wang * not need to use mergeable_len_to_ctx here - it is enough
1844192f68cfSJason Wang * to store the headroom as the context ignoring the truesize.
1845192f68cfSJason Wang */
add_recvbuf_small(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)1846946fa564SMichael S. Tsirkin static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1847946fa564SMichael S. Tsirkin gfp_t gfp)
1848296f96fcSRusty Russell {
1849f6b10209SJason Wang char *buf;
18502de2f7f4SJohn Fastabend unsigned int xdp_headroom = virtnet_get_headroom(vi);
1851192f68cfSJason Wang void *ctx = (void *)(unsigned long)xdp_headroom;
1852f6b10209SJason Wang int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
18539ab86bbcSShirley Ma int err;
18543f2c31d9SMark McLoughlin
1855f6b10209SJason Wang len = SKB_DATA_ALIGN(len) +
1856f6b10209SJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1857295525e2SXuan Zhuo
1858295525e2SXuan Zhuo buf = virtnet_rq_alloc(rq, len, gfp);
1859295525e2SXuan Zhuo if (unlikely(!buf))
18609ab86bbcSShirley Ma return -ENOMEM;
1861296f96fcSRusty Russell
1862295525e2SXuan Zhuo virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
1863f6b10209SJason Wang vi->hdr_len + GOOD_PACKET_LEN);
1864295525e2SXuan Zhuo
1865192f68cfSJason Wang err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1866295525e2SXuan Zhuo if (err < 0) {
1867295525e2SXuan Zhuo if (rq->do_dma)
1868295525e2SXuan Zhuo virtnet_rq_unmap(rq, buf, 0);
1869f6b10209SJason Wang put_page(virt_to_head_page(buf));
1870295525e2SXuan Zhuo }
1871295525e2SXuan Zhuo
18729ab86bbcSShirley Ma return err;
187397402b96SHerbert Xu }
187497402b96SHerbert Xu
add_recvbuf_big(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)1875012873d0SMichael S. Tsirkin static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1876012873d0SMichael S. Tsirkin gfp_t gfp)
18779ab86bbcSShirley Ma {
18789ab86bbcSShirley Ma struct page *first, *list = NULL;
18799ab86bbcSShirley Ma char *p;
18809ab86bbcSShirley Ma int i, err, offset;
1881296f96fcSRusty Russell
18824959aebbSGavin Li sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1883a5835440SRusty Russell
18844959aebbSGavin Li /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
18854959aebbSGavin Li for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1886e9d7417bSJason Wang first = get_a_page(rq, gfp);
18879ab86bbcSShirley Ma if (!first) {
18889ab86bbcSShirley Ma if (list)
1889e9d7417bSJason Wang give_pages(rq, list);
18909ab86bbcSShirley Ma return -ENOMEM;
1891296f96fcSRusty Russell }
1892e9d7417bSJason Wang sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
18939ab86bbcSShirley Ma
18949ab86bbcSShirley Ma /* chain new page in list head to match sg */
18959ab86bbcSShirley Ma first->private = (unsigned long)list;
18969ab86bbcSShirley Ma list = first;
18979ab86bbcSShirley Ma }
18989ab86bbcSShirley Ma
1899e9d7417bSJason Wang first = get_a_page(rq, gfp);
19009ab86bbcSShirley Ma if (!first) {
1901e9d7417bSJason Wang give_pages(rq, list);
19029ab86bbcSShirley Ma return -ENOMEM;
19039ab86bbcSShirley Ma }
19049ab86bbcSShirley Ma p = page_address(first);
19059ab86bbcSShirley Ma
1906e9d7417bSJason Wang /* rq->sg[0], rq->sg[1] share the same page */
1907012873d0SMichael S. Tsirkin /* a separated rq->sg[0] for header - required in case !any_header_sg */
1908012873d0SMichael S. Tsirkin sg_set_buf(&rq->sg[0], p, vi->hdr_len);
19099ab86bbcSShirley Ma
1910e9d7417bSJason Wang /* rq->sg[1] for data packet, from offset */
19119ab86bbcSShirley Ma offset = sizeof(struct padded_vnet_hdr);
1912e9d7417bSJason Wang sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
19139ab86bbcSShirley Ma
19149ab86bbcSShirley Ma /* chain first in list head */
19159ab86bbcSShirley Ma first->private = (unsigned long)list;
19164959aebbSGavin Li err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1917aa989f5eSMichael S. Tsirkin first, gfp);
19189ab86bbcSShirley Ma if (err < 0)
1919e9d7417bSJason Wang give_pages(rq, first);
19209ab86bbcSShirley Ma
19219ab86bbcSShirley Ma return err;
19229ab86bbcSShirley Ma }
19239ab86bbcSShirley Ma
get_mergeable_buf_len(struct receive_queue * rq,struct ewma_pkt_len * avg_pkt_len,unsigned int room)1924d85b758fSMichael S. Tsirkin static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
19253cc81a9aSJason Wang struct ewma_pkt_len *avg_pkt_len,
19263cc81a9aSJason Wang unsigned int room)
19279ab86bbcSShirley Ma {
1928c1ddc42dSAndrew Melnychenko struct virtnet_info *vi = rq->vq->vdev->priv;
1929c1ddc42dSAndrew Melnychenko const size_t hdr_len = vi->hdr_len;
1930fbf28d78SMichael Dalton unsigned int len;
1931fbf28d78SMichael Dalton
19323cc81a9aSJason Wang if (room)
19333cc81a9aSJason Wang return PAGE_SIZE - room;
19343cc81a9aSJason Wang
19355377d758SJohannes Berg len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1936f0c3192cSMichael S. Tsirkin rq->min_buf_len, PAGE_SIZE - hdr_len);
19373cc81a9aSJason Wang
1938e377fcc8SMichael S. Tsirkin return ALIGN(len, L1_CACHE_BYTES);
1939fbf28d78SMichael Dalton }
1940fbf28d78SMichael Dalton
add_recvbuf_mergeable(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)19412de2f7f4SJohn Fastabend static int add_recvbuf_mergeable(struct virtnet_info *vi,
19422de2f7f4SJohn Fastabend struct receive_queue *rq, gfp_t gfp)
1943fbf28d78SMichael Dalton {
1944fb51879dSMichael Dalton struct page_frag *alloc_frag = &rq->alloc_frag;
19452de2f7f4SJohn Fastabend unsigned int headroom = virtnet_get_headroom(vi);
19463cc81a9aSJason Wang unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
19473cc81a9aSJason Wang unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1948fb51879dSMichael Dalton unsigned int len, hole;
1949295525e2SXuan Zhuo void *ctx;
1950295525e2SXuan Zhuo char *buf;
1951295525e2SXuan Zhuo int err;
19529ab86bbcSShirley Ma
19533cc81a9aSJason Wang /* Extra tailroom is needed to satisfy XDP's assumption. This
19543cc81a9aSJason Wang * means rx frags coalescing won't work, but consider we've
19553cc81a9aSJason Wang * disabled GSO for XDP, it won't be a big issue.
19563cc81a9aSJason Wang */
19573cc81a9aSJason Wang len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1958295525e2SXuan Zhuo
1959295525e2SXuan Zhuo buf = virtnet_rq_alloc(rq, len + room, gfp);
1960295525e2SXuan Zhuo if (unlikely(!buf))
19619ab86bbcSShirley Ma return -ENOMEM;
1962ab7db917SMichael Dalton
19632de2f7f4SJohn Fastabend buf += headroom; /* advance address leaving hole at front of pkt */
1964fb51879dSMichael Dalton hole = alloc_frag->size - alloc_frag->offset;
19653cc81a9aSJason Wang if (hole < len + room) {
1966ab7db917SMichael Dalton /* To avoid internal fragmentation, if there is very likely not
1967ab7db917SMichael Dalton * enough space for another buffer, add the remaining space to
19681daa8790SMichael S. Tsirkin * the current buffer.
1969484beac2SHeng Qi * XDP core assumes that frame_size of xdp_buff and the length
1970484beac2SHeng Qi * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1971ab7db917SMichael Dalton */
1972484beac2SHeng Qi if (!headroom)
1973fb51879dSMichael Dalton len += hole;
1974fb51879dSMichael Dalton alloc_frag->offset += hole;
1975fb51879dSMichael Dalton }
19769ab86bbcSShirley Ma
1977295525e2SXuan Zhuo virtnet_rq_init_one_sg(rq, buf, len);
1978295525e2SXuan Zhuo
1979ef75cb51SHeng Qi ctx = mergeable_len_to_ctx(len + room, headroom);
1980680557cfSMichael S. Tsirkin err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1981295525e2SXuan Zhuo if (err < 0) {
1982295525e2SXuan Zhuo if (rq->do_dma)
1983295525e2SXuan Zhuo virtnet_rq_unmap(rq, buf, 0);
19842613af0eSMichael Dalton put_page(virt_to_head_page(buf));
1985295525e2SXuan Zhuo }
19869ab86bbcSShirley Ma
19879ab86bbcSShirley Ma return err;
1988296f96fcSRusty Russell }
1989296f96fcSRusty Russell
1990b2baed69SRusty Russell /*
1991b2baed69SRusty Russell * Returns false if we couldn't fill entirely (OOM).
1992b2baed69SRusty Russell *
1993b2baed69SRusty Russell * Normally run in the receive path, but can also be run from ndo_open
1994b2baed69SRusty Russell * before we're receiving packets, or from refill_work which is
1995b2baed69SRusty Russell * careful to disable receiving (using napi_disable).
1996b2baed69SRusty Russell */
try_fill_recv(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)1997946fa564SMichael S. Tsirkin static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1998946fa564SMichael S. Tsirkin gfp_t gfp)
19993f2c31d9SMark McLoughlin {
20003f2c31d9SMark McLoughlin int err;
20011788f495SMichael S. Tsirkin bool oom;
20023f2c31d9SMark McLoughlin
20030aea51c3SAmit Shah do {
20049ab86bbcSShirley Ma if (vi->mergeable_rx_bufs)
20052de2f7f4SJohn Fastabend err = add_recvbuf_mergeable(vi, rq, gfp);
20069ab86bbcSShirley Ma else if (vi->big_packets)
2007012873d0SMichael S. Tsirkin err = add_recvbuf_big(vi, rq, gfp);
20089ab86bbcSShirley Ma else
2009946fa564SMichael S. Tsirkin err = add_recvbuf_small(vi, rq, gfp);
20103f2c31d9SMark McLoughlin
20111788f495SMichael S. Tsirkin oom = err == -ENOMEM;
20129ed4cb07SRusty Russell if (err)
20133f2c31d9SMark McLoughlin break;
2014b7dfde95SLinus Torvalds } while (rq->vq->num_free);
2015461f03dcSToshiaki Makita if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
201601c32598SMichael S. Tsirkin unsigned long flags;
201701c32598SMichael S. Tsirkin
201801c32598SMichael S. Tsirkin flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
201927debe3eSEric Dumazet u64_stats_inc(&rq->stats.kicks);
202001c32598SMichael S. Tsirkin u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2021461f03dcSToshiaki Makita }
2022461f03dcSToshiaki Makita
20233161e453SRusty Russell return !oom;
20243f2c31d9SMark McLoughlin }
20253f2c31d9SMark McLoughlin
skb_recv_done(struct virtqueue * rvq)202618445c4dSRusty Russell static void skb_recv_done(struct virtqueue *rvq)
2027296f96fcSRusty Russell {
2028296f96fcSRusty Russell struct virtnet_info *vi = rvq->vdev->priv;
2029986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2030e9d7417bSJason Wang
2031e4e8452aSWillem de Bruijn virtqueue_napi_schedule(&rq->napi, rvq);
2032296f96fcSRusty Russell }
2033296f96fcSRusty Russell
virtnet_napi_enable(struct virtqueue * vq,struct napi_struct * napi)2034e4e8452aSWillem de Bruijn static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
20353e9d08ecSBruce Rogers {
2036e4e8452aSWillem de Bruijn napi_enable(napi);
20373e9d08ecSBruce Rogers
20383e9d08ecSBruce Rogers /* If all buffers were filled by other side before we napi_enabled, we
2039e4e8452aSWillem de Bruijn * won't get another interrupt, so process any outstanding packets now.
2040e4e8452aSWillem de Bruijn * Call local_bh_enable after to trigger softIRQ processing.
2041e4e8452aSWillem de Bruijn */
2042ec13ee80SMichael S. Tsirkin local_bh_disable();
2043e4e8452aSWillem de Bruijn virtqueue_napi_schedule(napi, vq);
2044ec13ee80SMichael S. Tsirkin local_bh_enable();
20453e9d08ecSBruce Rogers }
20463e9d08ecSBruce Rogers
virtnet_napi_tx_enable(struct virtnet_info * vi,struct virtqueue * vq,struct napi_struct * napi)2047b92f1e67SWillem de Bruijn static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2048b92f1e67SWillem de Bruijn struct virtqueue *vq,
2049b92f1e67SWillem de Bruijn struct napi_struct *napi)
2050b92f1e67SWillem de Bruijn {
2051b92f1e67SWillem de Bruijn if (!napi->weight)
2052b92f1e67SWillem de Bruijn return;
2053b92f1e67SWillem de Bruijn
2054b92f1e67SWillem de Bruijn /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2055b92f1e67SWillem de Bruijn * enable the feature if this is likely affine with the transmit path.
2056b92f1e67SWillem de Bruijn */
2057b92f1e67SWillem de Bruijn if (!vi->affinity_hint_set) {
2058b92f1e67SWillem de Bruijn napi->weight = 0;
2059b92f1e67SWillem de Bruijn return;
2060b92f1e67SWillem de Bruijn }
2061b92f1e67SWillem de Bruijn
2062b92f1e67SWillem de Bruijn return virtnet_napi_enable(vq, napi);
2063b92f1e67SWillem de Bruijn }
2064b92f1e67SWillem de Bruijn
virtnet_napi_tx_disable(struct napi_struct * napi)206578a57b48SWillem de Bruijn static void virtnet_napi_tx_disable(struct napi_struct *napi)
206678a57b48SWillem de Bruijn {
206778a57b48SWillem de Bruijn if (napi->weight)
206878a57b48SWillem de Bruijn napi_disable(napi);
206978a57b48SWillem de Bruijn }
207078a57b48SWillem de Bruijn
refill_work(struct work_struct * work)20713161e453SRusty Russell static void refill_work(struct work_struct *work)
20723161e453SRusty Russell {
2073e9d7417bSJason Wang struct virtnet_info *vi =
2074e9d7417bSJason Wang container_of(work, struct virtnet_info, refill.work);
20753161e453SRusty Russell bool still_empty;
2076986a4f4dSJason Wang int i;
20773161e453SRusty Russell
207855257d72SSasha Levin for (i = 0; i < vi->curr_queue_pairs; i++) {
2079986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[i];
2080986a4f4dSJason Wang
2081986a4f4dSJason Wang napi_disable(&rq->napi);
2082946fa564SMichael S. Tsirkin still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2083e4e8452aSWillem de Bruijn virtnet_napi_enable(rq->vq, &rq->napi);
20843161e453SRusty Russell
20853161e453SRusty Russell /* In theory, this can happen: if we don't get any buffers in
2086986a4f4dSJason Wang * we will *never* try to fill again.
2087986a4f4dSJason Wang */
20883161e453SRusty Russell if (still_empty)
20893b07e9caSTejun Heo schedule_delayed_work(&vi->refill, HZ/2);
20903161e453SRusty Russell }
2091986a4f4dSJason Wang }
20923161e453SRusty Russell
virtnet_receive(struct receive_queue * rq,int budget,unsigned int * xdp_xmit)20932471c75eSJesper Dangaard Brouer static int virtnet_receive(struct receive_queue *rq, int budget,
20942471c75eSJesper Dangaard Brouer unsigned int *xdp_xmit)
2095296f96fcSRusty Russell {
2096e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv;
2097d46eeeafSJason Wang struct virtnet_rq_stats stats = {};
2098a0929a44SToshiaki Makita unsigned int len;
209927debe3eSEric Dumazet int packets = 0;
21009ab86bbcSShirley Ma void *buf;
2101a0929a44SToshiaki Makita int i;
2102296f96fcSRusty Russell
2103192f68cfSJason Wang if (!vi->big_packets || vi->mergeable_rx_bufs) {
2104680557cfSMichael S. Tsirkin void *ctx;
2105680557cfSMichael S. Tsirkin
210627debe3eSEric Dumazet while (packets < budget &&
2107295525e2SXuan Zhuo (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2108a0929a44SToshiaki Makita receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
210927debe3eSEric Dumazet packets++;
2110680557cfSMichael S. Tsirkin }
2111680557cfSMichael S. Tsirkin } else {
211227debe3eSEric Dumazet while (packets < budget &&
2113295525e2SXuan Zhuo (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
2114a0929a44SToshiaki Makita receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
211527debe3eSEric Dumazet packets++;
2116296f96fcSRusty Russell }
2117680557cfSMichael S. Tsirkin }
2118296f96fcSRusty Russell
2119718be6baS? jiang if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
21205a159128SJason Wang if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
21215a159128SJason Wang spin_lock(&vi->refill_lock);
21225a159128SJason Wang if (vi->refill_enabled)
21233b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0);
21245a159128SJason Wang spin_unlock(&vi->refill_lock);
21255a159128SJason Wang }
21263161e453SRusty Russell }
2127296f96fcSRusty Russell
212827debe3eSEric Dumazet u64_stats_set(&stats.packets, packets);
2129d7dfc5cfSToshiaki Makita u64_stats_update_begin(&rq->stats.syncp);
2130a0929a44SToshiaki Makita for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
2131a0929a44SToshiaki Makita size_t offset = virtnet_rq_stats_desc[i].offset;
213227debe3eSEric Dumazet u64_stats_t *item, *src;
2133a0929a44SToshiaki Makita
213427debe3eSEric Dumazet item = (u64_stats_t *)((u8 *)&rq->stats + offset);
213527debe3eSEric Dumazet src = (u64_stats_t *)((u8 *)&stats + offset);
213627debe3eSEric Dumazet u64_stats_add(item, u64_stats_read(src));
2137a0929a44SToshiaki Makita }
2138d7dfc5cfSToshiaki Makita u64_stats_update_end(&rq->stats.syncp);
213961845d20SJason Wang
214027debe3eSEric Dumazet return packets;
21412ffa7598SJason Wang }
21422ffa7598SJason Wang
virtnet_poll_cleantx(struct receive_queue * rq)21437b0411efSWillem de Bruijn static void virtnet_poll_cleantx(struct receive_queue *rq)
21447b0411efSWillem de Bruijn {
21457b0411efSWillem de Bruijn struct virtnet_info *vi = rq->vq->vdev->priv;
21467b0411efSWillem de Bruijn unsigned int index = vq2rxq(rq->vq);
21477b0411efSWillem de Bruijn struct send_queue *sq = &vi->sq[index];
21487b0411efSWillem de Bruijn struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
21497b0411efSWillem de Bruijn
2150534da5e8SToshiaki Makita if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
21517b0411efSWillem de Bruijn return;
21527b0411efSWillem de Bruijn
21537b0411efSWillem de Bruijn if (__netif_tx_trylock(txq)) {
2154ebcce492SXuan Zhuo if (sq->reset) {
2155ebcce492SXuan Zhuo __netif_tx_unlock(txq);
2156ebcce492SXuan Zhuo return;
2157ebcce492SXuan Zhuo }
2158ebcce492SXuan Zhuo
2159a7766ef1SMichael S. Tsirkin do {
2160a7766ef1SMichael S. Tsirkin virtqueue_disable_cb(sq->vq);
2161df133f3fSMichael S. Tsirkin free_old_xmit_skbs(sq, true);
2162a7766ef1SMichael S. Tsirkin } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
21637b0411efSWillem de Bruijn
21647b0411efSWillem de Bruijn if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
21657b0411efSWillem de Bruijn netif_tx_wake_queue(txq);
216622bc63c5SMichael S. Tsirkin
216722bc63c5SMichael S. Tsirkin __netif_tx_unlock(txq);
216822bc63c5SMichael S. Tsirkin }
21697b0411efSWillem de Bruijn }
21707b0411efSWillem de Bruijn
virtnet_poll(struct napi_struct * napi,int budget)21712ffa7598SJason Wang static int virtnet_poll(struct napi_struct *napi, int budget)
21722ffa7598SJason Wang {
21732ffa7598SJason Wang struct receive_queue *rq =
21742ffa7598SJason Wang container_of(napi, struct receive_queue, napi);
21759267c430SJason Wang struct virtnet_info *vi = rq->vq->vdev->priv;
21769267c430SJason Wang struct send_queue *sq;
21772a43565cSToshiaki Makita unsigned int received;
21782471c75eSJesper Dangaard Brouer unsigned int xdp_xmit = 0;
21792ffa7598SJason Wang
21807b0411efSWillem de Bruijn virtnet_poll_cleantx(rq);
21817b0411efSWillem de Bruijn
2182186b3c99SJason Wang received = virtnet_receive(rq, budget, &xdp_xmit);
21832ffa7598SJason Wang
2184ad7e615fSMagnus Karlsson if (xdp_xmit & VIRTIO_XDP_REDIR)
2185ad7e615fSMagnus Karlsson xdp_do_flush();
2186ad7e615fSMagnus Karlsson
21878329d98eSRusty Russell /* Out of packets? */
2188e4e8452aSWillem de Bruijn if (received < budget)
2189e4e8452aSWillem de Bruijn virtqueue_napi_complete(napi, rq->vq, received);
2190296f96fcSRusty Russell
21912471c75eSJesper Dangaard Brouer if (xdp_xmit & VIRTIO_XDP_TX) {
219297c2c69eSXuan Zhuo sq = virtnet_xdp_get_sq(vi);
2193461f03dcSToshiaki Makita if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2194461f03dcSToshiaki Makita u64_stats_update_begin(&sq->stats.syncp);
219527debe3eSEric Dumazet u64_stats_inc(&sq->stats.kicks);
2196461f03dcSToshiaki Makita u64_stats_update_end(&sq->stats.syncp);
2197461f03dcSToshiaki Makita }
219897c2c69eSXuan Zhuo virtnet_xdp_put_sq(vi, sq);
21999267c430SJason Wang }
2200186b3c99SJason Wang
2201296f96fcSRusty Russell return received;
2202296f96fcSRusty Russell }
2203296f96fcSRusty Russell
virtnet_disable_queue_pair(struct virtnet_info * vi,int qp_index)22045306623aSFeng Liu static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
22055306623aSFeng Liu {
22065306623aSFeng Liu virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
22075306623aSFeng Liu napi_disable(&vi->rq[qp_index].napi);
22085306623aSFeng Liu xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
22095306623aSFeng Liu }
22105306623aSFeng Liu
virtnet_enable_queue_pair(struct virtnet_info * vi,int qp_index)22115306623aSFeng Liu static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
22125306623aSFeng Liu {
22135306623aSFeng Liu struct net_device *dev = vi->dev;
22145306623aSFeng Liu int err;
22155306623aSFeng Liu
22165306623aSFeng Liu err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
22175306623aSFeng Liu vi->rq[qp_index].napi.napi_id);
22185306623aSFeng Liu if (err < 0)
22195306623aSFeng Liu return err;
22205306623aSFeng Liu
22215306623aSFeng Liu err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
22225306623aSFeng Liu MEM_TYPE_PAGE_SHARED, NULL);
22235306623aSFeng Liu if (err < 0)
22245306623aSFeng Liu goto err_xdp_reg_mem_model;
22255306623aSFeng Liu
22265306623aSFeng Liu virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
22275306623aSFeng Liu virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
22285306623aSFeng Liu
22295306623aSFeng Liu return 0;
22305306623aSFeng Liu
22315306623aSFeng Liu err_xdp_reg_mem_model:
22325306623aSFeng Liu xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
22335306623aSFeng Liu return err;
22345306623aSFeng Liu }
22355306623aSFeng Liu
virtnet_open(struct net_device * dev)2236986a4f4dSJason Wang static int virtnet_open(struct net_device *dev)
2237986a4f4dSJason Wang {
2238986a4f4dSJason Wang struct virtnet_info *vi = netdev_priv(dev);
2239754b8a21SJesper Dangaard Brouer int i, err;
2240986a4f4dSJason Wang
22415a159128SJason Wang enable_delayed_refill(vi);
22425a159128SJason Wang
2243e4166625SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) {
2244e4166625SJason Wang if (i < vi->curr_queue_pairs)
2245986a4f4dSJason Wang /* Make sure we have some buffers: if oom use wq. */
2246946fa564SMichael S. Tsirkin if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2247986a4f4dSJason Wang schedule_delayed_work(&vi->refill, 0);
2248754b8a21SJesper Dangaard Brouer
22495306623aSFeng Liu err = virtnet_enable_queue_pair(vi, i);
2250754b8a21SJesper Dangaard Brouer if (err < 0)
22515306623aSFeng Liu goto err_enable_qp;
2252986a4f4dSJason Wang }
2253986a4f4dSJason Wang
2254986a4f4dSJason Wang return 0;
22555306623aSFeng Liu
22565306623aSFeng Liu err_enable_qp:
22575306623aSFeng Liu disable_delayed_refill(vi);
22585306623aSFeng Liu cancel_delayed_work_sync(&vi->refill);
22595306623aSFeng Liu
22605306623aSFeng Liu for (i--; i >= 0; i--)
22615306623aSFeng Liu virtnet_disable_queue_pair(vi, i);
22625306623aSFeng Liu return err;
2263986a4f4dSJason Wang }
2264986a4f4dSJason Wang
virtnet_poll_tx(struct napi_struct * napi,int budget)2265b92f1e67SWillem de Bruijn static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2266b92f1e67SWillem de Bruijn {
2267b92f1e67SWillem de Bruijn struct send_queue *sq = container_of(napi, struct send_queue, napi);
2268b92f1e67SWillem de Bruijn struct virtnet_info *vi = sq->vq->vdev->priv;
2269534da5e8SToshiaki Makita unsigned int index = vq2txq(sq->vq);
2270534da5e8SToshiaki Makita struct netdev_queue *txq;
22715a2f966dSMichael S. Tsirkin int opaque;
22725a2f966dSMichael S. Tsirkin bool done;
2273b92f1e67SWillem de Bruijn
2274534da5e8SToshiaki Makita if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2275534da5e8SToshiaki Makita /* We don't need to enable cb for XDP */
2276534da5e8SToshiaki Makita napi_complete_done(napi, 0);
2277534da5e8SToshiaki Makita return 0;
2278534da5e8SToshiaki Makita }
2279534da5e8SToshiaki Makita
2280534da5e8SToshiaki Makita txq = netdev_get_tx_queue(vi->dev, index);
2281b92f1e67SWillem de Bruijn __netif_tx_lock(txq, raw_smp_processor_id());
22825a2f966dSMichael S. Tsirkin virtqueue_disable_cb(sq->vq);
2283df133f3fSMichael S. Tsirkin free_old_xmit_skbs(sq, true);
22845a2f966dSMichael S. Tsirkin
228522bc63c5SMichael S. Tsirkin if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
228622bc63c5SMichael S. Tsirkin netif_tx_wake_queue(txq);
228722bc63c5SMichael S. Tsirkin
22885a2f966dSMichael S. Tsirkin opaque = virtqueue_enable_cb_prepare(sq->vq);
22895a2f966dSMichael S. Tsirkin
22905a2f966dSMichael S. Tsirkin done = napi_complete_done(napi, 0);
22915a2f966dSMichael S. Tsirkin
22925a2f966dSMichael S. Tsirkin if (!done)
22935a2f966dSMichael S. Tsirkin virtqueue_disable_cb(sq->vq);
22945a2f966dSMichael S. Tsirkin
2295b92f1e67SWillem de Bruijn __netif_tx_unlock(txq);
2296b92f1e67SWillem de Bruijn
22975a2f966dSMichael S. Tsirkin if (done) {
22985a2f966dSMichael S. Tsirkin if (unlikely(virtqueue_poll(sq->vq, opaque))) {
22995a2f966dSMichael S. Tsirkin if (napi_schedule_prep(napi)) {
23005a2f966dSMichael S. Tsirkin __netif_tx_lock(txq, raw_smp_processor_id());
23015a2f966dSMichael S. Tsirkin virtqueue_disable_cb(sq->vq);
23025a2f966dSMichael S. Tsirkin __netif_tx_unlock(txq);
23035a2f966dSMichael S. Tsirkin __napi_schedule(napi);
23045a2f966dSMichael S. Tsirkin }
23055a2f966dSMichael S. Tsirkin }
23065a2f966dSMichael S. Tsirkin }
2307b92f1e67SWillem de Bruijn
2308b92f1e67SWillem de Bruijn return 0;
2309b92f1e67SWillem de Bruijn }
2310b92f1e67SWillem de Bruijn
xmit_skb(struct send_queue * sq,struct sk_buff * skb)2311e9d7417bSJason Wang static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2312296f96fcSRusty Russell {
2313012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr;
2314296f96fcSRusty Russell const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2315e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv;
2316e2fcad58SJason A. Donenfeld int num_sg;
2317012873d0SMichael S. Tsirkin unsigned hdr_len = vi->hdr_len;
2318e7428e95SMichael S. Tsirkin bool can_push;
2319296f96fcSRusty Russell
2320e174961cSJohannes Berg pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2321e7428e95SMichael S. Tsirkin
2322e7428e95SMichael S. Tsirkin can_push = vi->any_header_sg &&
2323e7428e95SMichael S. Tsirkin !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2324e7428e95SMichael S. Tsirkin !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2325e7428e95SMichael S. Tsirkin /* Even if we can, don't push here yet as this would skew
2326e7428e95SMichael S. Tsirkin * csum_start offset below. */
2327e7428e95SMichael S. Tsirkin if (can_push)
2328012873d0SMichael S. Tsirkin hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2329e7428e95SMichael S. Tsirkin else
2330dae64749SFeng Liu hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
2331296f96fcSRusty Russell
2332e858fae2SMike Rapoport if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2333fd3a8862SWillem de Bruijn virtio_is_little_endian(vi->vdev), false,
2334fd3a8862SWillem de Bruijn 0))
233585eb1389SXianting Tian return -EPROTO;
2336296f96fcSRusty Russell
2337e7428e95SMichael S. Tsirkin if (vi->mergeable_rx_bufs)
2338012873d0SMichael S. Tsirkin hdr->num_buffers = 0;
23393f2c31d9SMark McLoughlin
2340547c890cSJason Wang sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2341e7428e95SMichael S. Tsirkin if (can_push) {
2342e7428e95SMichael S. Tsirkin __skb_push(skb, hdr_len);
2343e7428e95SMichael S. Tsirkin num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2344e2fcad58SJason A. Donenfeld if (unlikely(num_sg < 0))
2345e2fcad58SJason A. Donenfeld return num_sg;
2346e7428e95SMichael S. Tsirkin /* Pull header back to avoid skew in tx bytes calculations. */
2347e7428e95SMichael S. Tsirkin __skb_pull(skb, hdr_len);
2348e7428e95SMichael S. Tsirkin } else {
2349e7428e95SMichael S. Tsirkin sg_set_buf(sq->sg, hdr, hdr_len);
2350e2fcad58SJason A. Donenfeld num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2351e2fcad58SJason A. Donenfeld if (unlikely(num_sg < 0))
2352e2fcad58SJason A. Donenfeld return num_sg;
2353e2fcad58SJason A. Donenfeld num_sg++;
2354e7428e95SMichael S. Tsirkin }
23559dc7b9e4SRusty Russell return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
235611a3a154SRusty Russell }
235711a3a154SRusty Russell
start_xmit(struct sk_buff * skb,struct net_device * dev)2358424efe9cSStephen Hemminger static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
235999ffc696SRusty Russell {
236099ffc696SRusty Russell struct virtnet_info *vi = netdev_priv(dev);
2361986a4f4dSJason Wang int qnum = skb_get_queue_mapping(skb);
2362986a4f4dSJason Wang struct send_queue *sq = &vi->sq[qnum];
23639ed4cb07SRusty Russell int err;
23644b7fd2e6SMichael S. Tsirkin struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
23656b16f9eeSFlorian Westphal bool kick = !netdev_xmit_more();
2366b92f1e67SWillem de Bruijn bool use_napi = sq->napi.weight;
23672cb9c6baSRusty Russell
23682cb9c6baSRusty Russell /* Free up any pending old buffers before queueing new ones. */
2369a7766ef1SMichael S. Tsirkin do {
2370a7766ef1SMichael S. Tsirkin if (use_napi)
2371a7766ef1SMichael S. Tsirkin virtqueue_disable_cb(sq->vq);
2372a7766ef1SMichael S. Tsirkin
2373df133f3fSMichael S. Tsirkin free_old_xmit_skbs(sq, false);
237499ffc696SRusty Russell
2375a7766ef1SMichael S. Tsirkin } while (use_napi && kick &&
2376a7766ef1SMichael S. Tsirkin unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2377bdb12e0dSWillem de Bruijn
2378074c3582SJacob Keller /* timestamp packet in software */
2379074c3582SJacob Keller skb_tx_timestamp(skb);
2380074c3582SJacob Keller
238103f191baSMichael S. Tsirkin /* Try to transmit */
2382b7dfde95SLinus Torvalds err = xmit_skb(sq, skb);
238399ffc696SRusty Russell
23849ed4cb07SRusty Russell /* This should not happen! */
2385681daee2SJason Wang if (unlikely(err)) {
2386e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, tx_fifo_errors);
23872e57b79cSRick Jones if (net_ratelimit())
238858eba97dSRusty Russell dev_warn(&dev->dev,
23897934b481SYuval Shaia "Unexpected TXQ (%d) queue failure: %d\n",
23907934b481SYuval Shaia qnum, err);
2391e2e5c2a3SEric Dumazet DEV_STATS_INC(dev, tx_dropped);
239285e94525SEric W. Biederman dev_kfree_skb_any(skb);
239358eba97dSRusty Russell return NETDEV_TX_OK;
2394296f96fcSRusty Russell }
239503f191baSMichael S. Tsirkin
239648925e37SRusty Russell /* Don't wait up for transmitted skbs to be freed. */
2397b92f1e67SWillem de Bruijn if (!use_napi) {
239848925e37SRusty Russell skb_orphan(skb);
2399895b5c9fSFlorian Westphal nf_reset_ct(skb);
2400b92f1e67SWillem de Bruijn }
240148925e37SRusty Russell
2402b8ef4809SXuan Zhuo check_sq_full_and_disable(vi, dev, sq);
240348925e37SRusty Russell
2404461f03dcSToshiaki Makita if (kick || netif_xmit_stopped(txq)) {
2405461f03dcSToshiaki Makita if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2406461f03dcSToshiaki Makita u64_stats_update_begin(&sq->stats.syncp);
240727debe3eSEric Dumazet u64_stats_inc(&sq->stats.kicks);
2408461f03dcSToshiaki Makita u64_stats_update_end(&sq->stats.syncp);
2409461f03dcSToshiaki Makita }
2410461f03dcSToshiaki Makita }
24110b725a2cSDavid S. Miller
24120b725a2cSDavid S. Miller return NETDEV_TX_OK;
2413c223a078SDavid S. Miller }
2414c223a078SDavid S. Miller
virtnet_rx_resize(struct virtnet_info * vi,struct receive_queue * rq,u32 ring_num)24156a4763e2SXuan Zhuo static int virtnet_rx_resize(struct virtnet_info *vi,
24166a4763e2SXuan Zhuo struct receive_queue *rq, u32 ring_num)
24176a4763e2SXuan Zhuo {
24186a4763e2SXuan Zhuo bool running = netif_running(vi->dev);
24196a4763e2SXuan Zhuo int err, qindex;
24206a4763e2SXuan Zhuo
24216a4763e2SXuan Zhuo qindex = rq - vi->rq;
24226a4763e2SXuan Zhuo
24236a4763e2SXuan Zhuo if (running)
24246a4763e2SXuan Zhuo napi_disable(&rq->napi);
24256a4763e2SXuan Zhuo
24263ffd05c2SXuan Zhuo err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
24276a4763e2SXuan Zhuo if (err)
24286a4763e2SXuan Zhuo netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
24296a4763e2SXuan Zhuo
24306a4763e2SXuan Zhuo if (!try_fill_recv(vi, rq, GFP_KERNEL))
24316a4763e2SXuan Zhuo schedule_delayed_work(&vi->refill, 0);
24326a4763e2SXuan Zhuo
24336a4763e2SXuan Zhuo if (running)
24346a4763e2SXuan Zhuo virtnet_napi_enable(rq->vq, &rq->napi);
24356a4763e2SXuan Zhuo return err;
24366a4763e2SXuan Zhuo }
24376a4763e2SXuan Zhuo
virtnet_tx_resize(struct virtnet_info * vi,struct send_queue * sq,u32 ring_num)2438ebcce492SXuan Zhuo static int virtnet_tx_resize(struct virtnet_info *vi,
2439ebcce492SXuan Zhuo struct send_queue *sq, u32 ring_num)
2440ebcce492SXuan Zhuo {
2441ebcce492SXuan Zhuo bool running = netif_running(vi->dev);
2442ebcce492SXuan Zhuo struct netdev_queue *txq;
2443ebcce492SXuan Zhuo int err, qindex;
2444ebcce492SXuan Zhuo
2445ebcce492SXuan Zhuo qindex = sq - vi->sq;
2446ebcce492SXuan Zhuo
2447ebcce492SXuan Zhuo if (running)
2448ebcce492SXuan Zhuo virtnet_napi_tx_disable(&sq->napi);
2449ebcce492SXuan Zhuo
2450ebcce492SXuan Zhuo txq = netdev_get_tx_queue(vi->dev, qindex);
2451ebcce492SXuan Zhuo
2452ebcce492SXuan Zhuo /* 1. wait all ximt complete
2453ebcce492SXuan Zhuo * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2454ebcce492SXuan Zhuo */
2455ebcce492SXuan Zhuo __netif_tx_lock_bh(txq);
2456ebcce492SXuan Zhuo
2457ebcce492SXuan Zhuo /* Prevent rx poll from accessing sq. */
2458ebcce492SXuan Zhuo sq->reset = true;
2459ebcce492SXuan Zhuo
2460ebcce492SXuan Zhuo /* Prevent the upper layer from trying to send packets. */
2461ebcce492SXuan Zhuo netif_stop_subqueue(vi->dev, qindex);
2462ebcce492SXuan Zhuo
2463ebcce492SXuan Zhuo __netif_tx_unlock_bh(txq);
2464ebcce492SXuan Zhuo
2465ebcce492SXuan Zhuo err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2466ebcce492SXuan Zhuo if (err)
2467ebcce492SXuan Zhuo netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2468ebcce492SXuan Zhuo
2469ebcce492SXuan Zhuo __netif_tx_lock_bh(txq);
2470ebcce492SXuan Zhuo sq->reset = false;
2471ebcce492SXuan Zhuo netif_tx_wake_queue(txq);
2472ebcce492SXuan Zhuo __netif_tx_unlock_bh(txq);
2473ebcce492SXuan Zhuo
2474ebcce492SXuan Zhuo if (running)
2475ebcce492SXuan Zhuo virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2476ebcce492SXuan Zhuo return err;
2477ebcce492SXuan Zhuo }
2478ebcce492SXuan Zhuo
247940cbfc37SAmos Kong /*
248040cbfc37SAmos Kong * Send command via the control virtqueue and check status. Commands
248140cbfc37SAmos Kong * supported by the hypervisor, as indicated by feature bits, should
2482788a8b6dSstephen hemminger * never fail unless improperly formatted.
248340cbfc37SAmos Kong */
virtnet_send_command(struct virtnet_info * vi,u8 class,u8 cmd,struct scatterlist * out)248440cbfc37SAmos Kong static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2485d24bae32Sstephen hemminger struct scatterlist *out)
248640cbfc37SAmos Kong {
2487f7bc9594SRusty Russell struct scatterlist *sgs[4], hdr, stat;
2488d24bae32Sstephen hemminger unsigned out_num = 0, tmp;
2489222722bcSYunjian Wang int ret;
249040cbfc37SAmos Kong
249140cbfc37SAmos Kong /* Caller should know better */
2492f7bc9594SRusty Russell BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
249340cbfc37SAmos Kong
249412e57169SMichael S. Tsirkin vi->ctrl->status = ~0;
249512e57169SMichael S. Tsirkin vi->ctrl->hdr.class = class;
249612e57169SMichael S. Tsirkin vi->ctrl->hdr.cmd = cmd;
2497f7bc9594SRusty Russell /* Add header */
249812e57169SMichael S. Tsirkin sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2499f7bc9594SRusty Russell sgs[out_num++] = &hdr;
250040cbfc37SAmos Kong
2501f7bc9594SRusty Russell if (out)
2502f7bc9594SRusty Russell sgs[out_num++] = out;
250340cbfc37SAmos Kong
2504f7bc9594SRusty Russell /* Add return status. */
250512e57169SMichael S. Tsirkin sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2506d24bae32Sstephen hemminger sgs[out_num] = &stat;
250740cbfc37SAmos Kong
2508d24bae32Sstephen hemminger BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
2509222722bcSYunjian Wang ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2510222722bcSYunjian Wang if (ret < 0) {
2511222722bcSYunjian Wang dev_warn(&vi->vdev->dev,
2512222722bcSYunjian Wang "Failed to add sgs for command vq: %d\n.", ret);
2513222722bcSYunjian Wang return false;
2514222722bcSYunjian Wang }
251540cbfc37SAmos Kong
251667975901SHeinz Graalfs if (unlikely(!virtqueue_kick(vi->cvq)))
251712e57169SMichael S. Tsirkin return vi->ctrl->status == VIRTIO_NET_OK;
251840cbfc37SAmos Kong
251940cbfc37SAmos Kong /* Spin for a response, the kick causes an ioport write, trapping
252040cbfc37SAmos Kong * into the hypervisor, so the request should be handled immediately.
252140cbfc37SAmos Kong */
2522047b9b94SHeinz Graalfs while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2523047b9b94SHeinz Graalfs !virtqueue_is_broken(vi->cvq))
252440cbfc37SAmos Kong cpu_relax();
252540cbfc37SAmos Kong
252612e57169SMichael S. Tsirkin return vi->ctrl->status == VIRTIO_NET_OK;
252740cbfc37SAmos Kong }
252840cbfc37SAmos Kong
virtnet_set_mac_address(struct net_device * dev,void * p)25299c46f6d4SAlex Williamson static int virtnet_set_mac_address(struct net_device *dev, void *p)
25309c46f6d4SAlex Williamson {
25319c46f6d4SAlex Williamson struct virtnet_info *vi = netdev_priv(dev);
25329c46f6d4SAlex Williamson struct virtio_device *vdev = vi->vdev;
2533f2f2c8b4SJiri Pirko int ret;
2534e37e2ff3SAndy Lutomirski struct sockaddr *addr;
25357e58d5aeSAmos Kong struct scatterlist sg;
25369c46f6d4SAlex Williamson
2537ba5e4426SSridhar Samudrala if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2538ba5e4426SSridhar Samudrala return -EOPNOTSUPP;
2539ba5e4426SSridhar Samudrala
2540801822d1SShyam Saini addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2541e37e2ff3SAndy Lutomirski if (!addr)
2542e37e2ff3SAndy Lutomirski return -ENOMEM;
2543e37e2ff3SAndy Lutomirski
2544e37e2ff3SAndy Lutomirski ret = eth_prepare_mac_addr_change(dev, addr);
2545f2f2c8b4SJiri Pirko if (ret)
2546e37e2ff3SAndy Lutomirski goto out;
25479c46f6d4SAlex Williamson
25487e58d5aeSAmos Kong if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
25497e58d5aeSAmos Kong sg_init_one(&sg, addr->sa_data, dev->addr_len);
25507e58d5aeSAmos Kong if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2551d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
25527e58d5aeSAmos Kong dev_warn(&vdev->dev,
25537e58d5aeSAmos Kong "Failed to set mac address by vq command.\n");
2554e37e2ff3SAndy Lutomirski ret = -EINVAL;
2555e37e2ff3SAndy Lutomirski goto out;
25567e58d5aeSAmos Kong }
25577e93a02fSMichael S. Tsirkin } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
25587e93a02fSMichael S. Tsirkin !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2559855e0c52SRusty Russell unsigned int i;
2560855e0c52SRusty Russell
2561855e0c52SRusty Russell /* Naturally, this has an atomicity problem. */
2562855e0c52SRusty Russell for (i = 0; i < dev->addr_len; i++)
2563855e0c52SRusty Russell virtio_cwrite8(vdev,
2564855e0c52SRusty Russell offsetof(struct virtio_net_config, mac) +
2565855e0c52SRusty Russell i, addr->sa_data[i]);
25667e58d5aeSAmos Kong }
25677e58d5aeSAmos Kong
25687e58d5aeSAmos Kong eth_commit_mac_addr_change(dev, p);
2569e37e2ff3SAndy Lutomirski ret = 0;
25709c46f6d4SAlex Williamson
2571e37e2ff3SAndy Lutomirski out:
2572e37e2ff3SAndy Lutomirski kfree(addr);
2573e37e2ff3SAndy Lutomirski return ret;
25749c46f6d4SAlex Williamson }
25759c46f6d4SAlex Williamson
virtnet_stats(struct net_device * dev,struct rtnl_link_stats64 * tot)2576bc1f4470Sstephen hemminger static void virtnet_stats(struct net_device *dev,
25773fa2a1dfSstephen hemminger struct rtnl_link_stats64 *tot)
25783fa2a1dfSstephen hemminger {
25793fa2a1dfSstephen hemminger struct virtnet_info *vi = netdev_priv(dev);
25803fa2a1dfSstephen hemminger unsigned int start;
2581d7dfc5cfSToshiaki Makita int i;
25823fa2a1dfSstephen hemminger
2583d7dfc5cfSToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) {
2584a520794bSTony Lu u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2585d7dfc5cfSToshiaki Makita struct receive_queue *rq = &vi->rq[i];
2586d7dfc5cfSToshiaki Makita struct send_queue *sq = &vi->sq[i];
25873fa2a1dfSstephen hemminger
25883fa2a1dfSstephen hemminger do {
2589068c38adSThomas Gleixner start = u64_stats_fetch_begin(&sq->stats.syncp);
259027debe3eSEric Dumazet tpackets = u64_stats_read(&sq->stats.packets);
259127debe3eSEric Dumazet tbytes = u64_stats_read(&sq->stats.bytes);
259227debe3eSEric Dumazet terrors = u64_stats_read(&sq->stats.tx_timeouts);
2593068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
259483a27052SEric Dumazet
259583a27052SEric Dumazet do {
2596068c38adSThomas Gleixner start = u64_stats_fetch_begin(&rq->stats.syncp);
259727debe3eSEric Dumazet rpackets = u64_stats_read(&rq->stats.packets);
259827debe3eSEric Dumazet rbytes = u64_stats_read(&rq->stats.bytes);
259927debe3eSEric Dumazet rdrops = u64_stats_read(&rq->stats.drops);
2600068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
26013fa2a1dfSstephen hemminger
26023fa2a1dfSstephen hemminger tot->rx_packets += rpackets;
26033fa2a1dfSstephen hemminger tot->tx_packets += tpackets;
26043fa2a1dfSstephen hemminger tot->rx_bytes += rbytes;
26053fa2a1dfSstephen hemminger tot->tx_bytes += tbytes;
26062c4a2f7dSToshiaki Makita tot->rx_dropped += rdrops;
2607a520794bSTony Lu tot->tx_errors += terrors;
26083fa2a1dfSstephen hemminger }
26093fa2a1dfSstephen hemminger
2610e2e5c2a3SEric Dumazet tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
2611e2e5c2a3SEric Dumazet tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
2612e2e5c2a3SEric Dumazet tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
2613e2e5c2a3SEric Dumazet tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
26143fa2a1dfSstephen hemminger }
26153fa2a1dfSstephen hemminger
virtnet_ack_link_announce(struct virtnet_info * vi)2616586d17c5SJason Wang static void virtnet_ack_link_announce(struct virtnet_info *vi)
2617586d17c5SJason Wang {
2618586d17c5SJason Wang rtnl_lock();
2619586d17c5SJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2620d24bae32Sstephen hemminger VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2621586d17c5SJason Wang dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2622586d17c5SJason Wang rtnl_unlock();
2623586d17c5SJason Wang }
2624586d17c5SJason Wang
_virtnet_set_queues(struct virtnet_info * vi,u16 queue_pairs)262547315329SJohn Fastabend static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2626986a4f4dSJason Wang {
2627986a4f4dSJason Wang struct scatterlist sg;
2628986a4f4dSJason Wang struct net_device *dev = vi->dev;
2629986a4f4dSJason Wang
2630986a4f4dSJason Wang if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2631986a4f4dSJason Wang return 0;
2632986a4f4dSJason Wang
263312e57169SMichael S. Tsirkin vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
263412e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
2635986a4f4dSJason Wang
2636986a4f4dSJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2637d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2638986a4f4dSJason Wang dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2639986a4f4dSJason Wang queue_pairs);
2640986a4f4dSJason Wang return -EINVAL;
264155257d72SSasha Levin } else {
2642986a4f4dSJason Wang vi->curr_queue_pairs = queue_pairs;
264335ed159bSJason Wang /* virtnet_open() will refill when device is going to up. */
264435ed159bSJason Wang if (dev->flags & IFF_UP)
26459b9cd802SJason Wang schedule_delayed_work(&vi->refill, 0);
264655257d72SSasha Levin }
2647986a4f4dSJason Wang
2648986a4f4dSJason Wang return 0;
2649986a4f4dSJason Wang }
2650986a4f4dSJason Wang
virtnet_set_queues(struct virtnet_info * vi,u16 queue_pairs)265147315329SJohn Fastabend static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
265247315329SJohn Fastabend {
265347315329SJohn Fastabend int err;
265447315329SJohn Fastabend
265547315329SJohn Fastabend rtnl_lock();
265647315329SJohn Fastabend err = _virtnet_set_queues(vi, queue_pairs);
265747315329SJohn Fastabend rtnl_unlock();
265847315329SJohn Fastabend return err;
265947315329SJohn Fastabend }
266047315329SJohn Fastabend
virtnet_close(struct net_device * dev)2661296f96fcSRusty Russell static int virtnet_close(struct net_device *dev)
2662296f96fcSRusty Russell {
2663296f96fcSRusty Russell struct virtnet_info *vi = netdev_priv(dev);
2664986a4f4dSJason Wang int i;
2665296f96fcSRusty Russell
26665a159128SJason Wang /* Make sure NAPI doesn't schedule refill work */
26675a159128SJason Wang disable_delayed_refill(vi);
2668b2baed69SRusty Russell /* Make sure refill_work doesn't re-enable napi! */
2669b2baed69SRusty Russell cancel_delayed_work_sync(&vi->refill);
2670986a4f4dSJason Wang
26715306623aSFeng Liu for (i = 0; i < vi->max_queue_pairs; i++)
26725306623aSFeng Liu virtnet_disable_queue_pair(vi, i);
2673296f96fcSRusty Russell
2674296f96fcSRusty Russell return 0;
2675296f96fcSRusty Russell }
2676296f96fcSRusty Russell
virtnet_set_rx_mode(struct net_device * dev)26772af7698eSAlex Williamson static void virtnet_set_rx_mode(struct net_device *dev)
26782af7698eSAlex Williamson {
26792af7698eSAlex Williamson struct virtnet_info *vi = netdev_priv(dev);
2680f565a7c2SAlex Williamson struct scatterlist sg[2];
2681f565a7c2SAlex Williamson struct virtio_net_ctrl_mac *mac_data;
2682ccffad25SJiri Pirko struct netdev_hw_addr *ha;
268332e7bfc4SJiri Pirko int uc_count;
26844cd24eafSJiri Pirko int mc_count;
2685f565a7c2SAlex Williamson void *buf;
2686f565a7c2SAlex Williamson int i;
26872af7698eSAlex Williamson
2688788a8b6dSstephen hemminger /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
26892af7698eSAlex Williamson if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
26902af7698eSAlex Williamson return;
26912af7698eSAlex Williamson
269212e57169SMichael S. Tsirkin vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
269312e57169SMichael S. Tsirkin vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
26942af7698eSAlex Williamson
269512e57169SMichael S. Tsirkin sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
26962af7698eSAlex Williamson
26972af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2698d24bae32Sstephen hemminger VIRTIO_NET_CTRL_RX_PROMISC, sg))
26992af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
270012e57169SMichael S. Tsirkin vi->ctrl->promisc ? "en" : "dis");
27012af7698eSAlex Williamson
270212e57169SMichael S. Tsirkin sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
27032af7698eSAlex Williamson
27042af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2705d24bae32Sstephen hemminger VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
27062af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
270712e57169SMichael S. Tsirkin vi->ctrl->allmulti ? "en" : "dis");
2708f565a7c2SAlex Williamson
270932e7bfc4SJiri Pirko uc_count = netdev_uc_count(dev);
27104cd24eafSJiri Pirko mc_count = netdev_mc_count(dev);
2711f565a7c2SAlex Williamson /* MAC filter - use one buffer for both lists */
27124cd24eafSJiri Pirko buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2713f565a7c2SAlex Williamson (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
27144cd24eafSJiri Pirko mac_data = buf;
2715e68ed8f0SJoe Perches if (!buf)
2716f565a7c2SAlex Williamson return;
2717f565a7c2SAlex Williamson
271823e258e1SAlex Williamson sg_init_table(sg, 2);
271923e258e1SAlex Williamson
2720f565a7c2SAlex Williamson /* Store the unicast list and count in the front of the buffer */
2721fdd819b2SMichael S. Tsirkin mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2722ccffad25SJiri Pirko i = 0;
272332e7bfc4SJiri Pirko netdev_for_each_uc_addr(ha, dev)
2724ccffad25SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2725f565a7c2SAlex Williamson
2726f565a7c2SAlex Williamson sg_set_buf(&sg[0], mac_data,
272732e7bfc4SJiri Pirko sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2728f565a7c2SAlex Williamson
2729f565a7c2SAlex Williamson /* multicast list and count fill the end */
273032e7bfc4SJiri Pirko mac_data = (void *)&mac_data->macs[uc_count][0];
2731f565a7c2SAlex Williamson
2732fdd819b2SMichael S. Tsirkin mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2733567ec874SJiri Pirko i = 0;
273422bedad3SJiri Pirko netdev_for_each_mc_addr(ha, dev)
273522bedad3SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2736f565a7c2SAlex Williamson
2737f565a7c2SAlex Williamson sg_set_buf(&sg[1], mac_data,
27384cd24eafSJiri Pirko sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2739f565a7c2SAlex Williamson
2740f565a7c2SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2741d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
274299e872aeSThomas Huth dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2743f565a7c2SAlex Williamson
2744f565a7c2SAlex Williamson kfree(buf);
27452af7698eSAlex Williamson }
27462af7698eSAlex Williamson
virtnet_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)274780d5c368SPatrick McHardy static int virtnet_vlan_rx_add_vid(struct net_device *dev,
274880d5c368SPatrick McHardy __be16 proto, u16 vid)
27490bde9569SAlex Williamson {
27500bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev);
27510bde9569SAlex Williamson struct scatterlist sg;
27520bde9569SAlex Williamson
2753d7fad4c8SMichael S. Tsirkin vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
275412e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
27550bde9569SAlex Williamson
27560bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2757d24bae32Sstephen hemminger VIRTIO_NET_CTRL_VLAN_ADD, &sg))
27580bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
27598e586137SJiri Pirko return 0;
27600bde9569SAlex Williamson }
27610bde9569SAlex Williamson
virtnet_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)276280d5c368SPatrick McHardy static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
276380d5c368SPatrick McHardy __be16 proto, u16 vid)
27640bde9569SAlex Williamson {
27650bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev);
27660bde9569SAlex Williamson struct scatterlist sg;
27670bde9569SAlex Williamson
2768d7fad4c8SMichael S. Tsirkin vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
276912e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
27700bde9569SAlex Williamson
27710bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2772d24bae32Sstephen hemminger VIRTIO_NET_CTRL_VLAN_DEL, &sg))
27730bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
27748e586137SJiri Pirko return 0;
27750bde9569SAlex Williamson }
27760bde9569SAlex Williamson
virtnet_clean_affinity(struct virtnet_info * vi)2777310974faSPeter Xu static void virtnet_clean_affinity(struct virtnet_info *vi)
2778986a4f4dSJason Wang {
2779986a4f4dSJason Wang int i;
27808898c21cSWanlong Gao
27818898c21cSWanlong Gao if (vi->affinity_hint_set) {
27828898c21cSWanlong Gao for (i = 0; i < vi->max_queue_pairs; i++) {
278319e226e8SCaleb Raitto virtqueue_set_affinity(vi->rq[i].vq, NULL);
278419e226e8SCaleb Raitto virtqueue_set_affinity(vi->sq[i].vq, NULL);
27858898c21cSWanlong Gao }
27868898c21cSWanlong Gao
27878898c21cSWanlong Gao vi->affinity_hint_set = false;
27888898c21cSWanlong Gao }
27898898c21cSWanlong Gao }
27908898c21cSWanlong Gao
virtnet_set_affinity(struct virtnet_info * vi)27918898c21cSWanlong Gao static void virtnet_set_affinity(struct virtnet_info *vi)
2792986a4f4dSJason Wang {
27932ca653d6SCaleb Raitto cpumask_var_t mask;
27942ca653d6SCaleb Raitto int stragglers;
27952ca653d6SCaleb Raitto int group_size;
27962ca653d6SCaleb Raitto int i, j, cpu;
27972ca653d6SCaleb Raitto int num_cpu;
27982ca653d6SCaleb Raitto int stride;
2799986a4f4dSJason Wang
28002ca653d6SCaleb Raitto if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2801310974faSPeter Xu virtnet_clean_affinity(vi);
2802986a4f4dSJason Wang return;
2803986a4f4dSJason Wang }
2804986a4f4dSJason Wang
28052ca653d6SCaleb Raitto num_cpu = num_online_cpus();
28062ca653d6SCaleb Raitto stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
28072ca653d6SCaleb Raitto stragglers = num_cpu >= vi->curr_queue_pairs ?
28082ca653d6SCaleb Raitto num_cpu % vi->curr_queue_pairs :
28092ca653d6SCaleb Raitto 0;
28109b51d9d8SYury Norov cpu = cpumask_first(cpu_online_mask);
28114d99f660SAndrei Vagin
28122ca653d6SCaleb Raitto for (i = 0; i < vi->curr_queue_pairs; i++) {
28132ca653d6SCaleb Raitto group_size = stride + (i < stragglers ? 1 : 0);
28142ca653d6SCaleb Raitto
28152ca653d6SCaleb Raitto for (j = 0; j < group_size; j++) {
28162ca653d6SCaleb Raitto cpumask_set_cpu(cpu, mask);
28172ca653d6SCaleb Raitto cpu = cpumask_next_wrap(cpu, cpu_online_mask,
28182ca653d6SCaleb Raitto nr_cpu_ids, false);
28192ca653d6SCaleb Raitto }
28202ca653d6SCaleb Raitto virtqueue_set_affinity(vi->rq[i].vq, mask);
28212ca653d6SCaleb Raitto virtqueue_set_affinity(vi->sq[i].vq, mask);
2822044ab86dSAntoine Tenart __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
28232ca653d6SCaleb Raitto cpumask_clear(mask);
2824986a4f4dSJason Wang }
2825986a4f4dSJason Wang
2826986a4f4dSJason Wang vi->affinity_hint_set = true;
28272ca653d6SCaleb Raitto free_cpumask_var(mask);
282847be2479SWanlong Gao }
2829986a4f4dSJason Wang
virtnet_cpu_online(unsigned int cpu,struct hlist_node * node)28308017c279SSebastian Andrzej Siewior static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
28318de4b2f3SWanlong Gao {
28328017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
28338017c279SSebastian Andrzej Siewior node);
28348de4b2f3SWanlong Gao virtnet_set_affinity(vi);
28358017c279SSebastian Andrzej Siewior return 0;
28368de4b2f3SWanlong Gao }
28373ab098dfSJason Wang
virtnet_cpu_dead(unsigned int cpu,struct hlist_node * node)28388017c279SSebastian Andrzej Siewior static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
28398017c279SSebastian Andrzej Siewior {
28408017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
28418017c279SSebastian Andrzej Siewior node_dead);
28428017c279SSebastian Andrzej Siewior virtnet_set_affinity(vi);
28438017c279SSebastian Andrzej Siewior return 0;
28448017c279SSebastian Andrzej Siewior }
28458017c279SSebastian Andrzej Siewior
virtnet_cpu_down_prep(unsigned int cpu,struct hlist_node * node)28468017c279SSebastian Andrzej Siewior static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
28478017c279SSebastian Andrzej Siewior {
28488017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
28498017c279SSebastian Andrzej Siewior node);
28508017c279SSebastian Andrzej Siewior
2851310974faSPeter Xu virtnet_clean_affinity(vi);
28528017c279SSebastian Andrzej Siewior return 0;
28538017c279SSebastian Andrzej Siewior }
28548017c279SSebastian Andrzej Siewior
28558017c279SSebastian Andrzej Siewior static enum cpuhp_state virtionet_online;
28568017c279SSebastian Andrzej Siewior
virtnet_cpu_notif_add(struct virtnet_info * vi)28578017c279SSebastian Andrzej Siewior static int virtnet_cpu_notif_add(struct virtnet_info *vi)
28588017c279SSebastian Andrzej Siewior {
28598017c279SSebastian Andrzej Siewior int ret;
28608017c279SSebastian Andrzej Siewior
28618017c279SSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
28628017c279SSebastian Andrzej Siewior if (ret)
28638017c279SSebastian Andrzej Siewior return ret;
28648017c279SSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
28658017c279SSebastian Andrzej Siewior &vi->node_dead);
28668017c279SSebastian Andrzej Siewior if (!ret)
28678017c279SSebastian Andrzej Siewior return ret;
28688017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
28698017c279SSebastian Andrzej Siewior return ret;
28708017c279SSebastian Andrzej Siewior }
28718017c279SSebastian Andrzej Siewior
virtnet_cpu_notif_remove(struct virtnet_info * vi)28728017c279SSebastian Andrzej Siewior static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
28738017c279SSebastian Andrzej Siewior {
28748017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
28758017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
28768017c279SSebastian Andrzej Siewior &vi->node_dead);
2877a9ea3fc6SHerbert Xu }
2878a9ea3fc6SHerbert Xu
virtnet_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)28798f9f4668SRick Jones static void virtnet_get_ringparam(struct net_device *dev,
288074624944SHao Chen struct ethtool_ringparam *ring,
288174624944SHao Chen struct kernel_ethtool_ringparam *kernel_ring,
288274624944SHao Chen struct netlink_ext_ack *extack)
28838f9f4668SRick Jones {
28848f9f4668SRick Jones struct virtnet_info *vi = netdev_priv(dev);
28858f9f4668SRick Jones
28868597b5ddSXuan Zhuo ring->rx_max_pending = vi->rq[0].vq->num_max;
28878597b5ddSXuan Zhuo ring->tx_max_pending = vi->sq[0].vq->num_max;
28888597b5ddSXuan Zhuo ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
28898597b5ddSXuan Zhuo ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
28908f9f4668SRick Jones }
28918f9f4668SRick Jones
289239d591b0SHeng Qi static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
289339d591b0SHeng Qi u16 vqn, u32 max_usecs, u32 max_packets);
289439d591b0SHeng Qi
virtnet_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)2895a335b33fSXuan Zhuo static int virtnet_set_ringparam(struct net_device *dev,
2896a335b33fSXuan Zhuo struct ethtool_ringparam *ring,
2897a335b33fSXuan Zhuo struct kernel_ethtool_ringparam *kernel_ring,
2898a335b33fSXuan Zhuo struct netlink_ext_ack *extack)
2899a335b33fSXuan Zhuo {
2900a335b33fSXuan Zhuo struct virtnet_info *vi = netdev_priv(dev);
2901a335b33fSXuan Zhuo u32 rx_pending, tx_pending;
2902a335b33fSXuan Zhuo struct receive_queue *rq;
2903a335b33fSXuan Zhuo struct send_queue *sq;
2904a335b33fSXuan Zhuo int i, err;
2905a335b33fSXuan Zhuo
2906a335b33fSXuan Zhuo if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2907a335b33fSXuan Zhuo return -EINVAL;
2908a335b33fSXuan Zhuo
2909a335b33fSXuan Zhuo rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2910a335b33fSXuan Zhuo tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2911a335b33fSXuan Zhuo
2912a335b33fSXuan Zhuo if (ring->rx_pending == rx_pending &&
2913a335b33fSXuan Zhuo ring->tx_pending == tx_pending)
2914a335b33fSXuan Zhuo return 0;
2915a335b33fSXuan Zhuo
2916a335b33fSXuan Zhuo if (ring->rx_pending > vi->rq[0].vq->num_max)
2917a335b33fSXuan Zhuo return -EINVAL;
2918a335b33fSXuan Zhuo
2919a335b33fSXuan Zhuo if (ring->tx_pending > vi->sq[0].vq->num_max)
2920a335b33fSXuan Zhuo return -EINVAL;
2921a335b33fSXuan Zhuo
2922a335b33fSXuan Zhuo for (i = 0; i < vi->max_queue_pairs; i++) {
2923a335b33fSXuan Zhuo rq = vi->rq + i;
2924a335b33fSXuan Zhuo sq = vi->sq + i;
2925a335b33fSXuan Zhuo
2926a335b33fSXuan Zhuo if (ring->tx_pending != tx_pending) {
2927a335b33fSXuan Zhuo err = virtnet_tx_resize(vi, sq, ring->tx_pending);
2928a335b33fSXuan Zhuo if (err)
2929a335b33fSXuan Zhuo return err;
293039d591b0SHeng Qi
293139d591b0SHeng Qi /* Upon disabling and re-enabling a transmit virtqueue, the device must
293239d591b0SHeng Qi * set the coalescing parameters of the virtqueue to those configured
293339d591b0SHeng Qi * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
293439d591b0SHeng Qi * did not set any TX coalescing parameters, to 0.
293539d591b0SHeng Qi */
293639d591b0SHeng Qi err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i),
293739d591b0SHeng Qi vi->intr_coal_tx.max_usecs,
293839d591b0SHeng Qi vi->intr_coal_tx.max_packets);
293939d591b0SHeng Qi if (err)
294039d591b0SHeng Qi return err;
294139d591b0SHeng Qi
294239d591b0SHeng Qi vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs;
294339d591b0SHeng Qi vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets;
2944a335b33fSXuan Zhuo }
2945a335b33fSXuan Zhuo
2946a335b33fSXuan Zhuo if (ring->rx_pending != rx_pending) {
2947a335b33fSXuan Zhuo err = virtnet_rx_resize(vi, rq, ring->rx_pending);
2948a335b33fSXuan Zhuo if (err)
2949a335b33fSXuan Zhuo return err;
295039d591b0SHeng Qi
295139d591b0SHeng Qi /* The reason is same as the transmit virtqueue reset */
295239d591b0SHeng Qi err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i),
295339d591b0SHeng Qi vi->intr_coal_rx.max_usecs,
295439d591b0SHeng Qi vi->intr_coal_rx.max_packets);
295539d591b0SHeng Qi if (err)
295639d591b0SHeng Qi return err;
295739d591b0SHeng Qi
295839d591b0SHeng Qi vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs;
295939d591b0SHeng Qi vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets;
2960a335b33fSXuan Zhuo }
2961a335b33fSXuan Zhuo }
2962a335b33fSXuan Zhuo
2963a335b33fSXuan Zhuo return 0;
2964a9ea3fc6SHerbert Xu }
2965a9ea3fc6SHerbert Xu
virtnet_commit_rss_command(struct virtnet_info * vi)2966c7114b12SAndrew Melnychenko static bool virtnet_commit_rss_command(struct virtnet_info *vi)
2967c7114b12SAndrew Melnychenko {
2968c7114b12SAndrew Melnychenko struct net_device *dev = vi->dev;
2969c7114b12SAndrew Melnychenko struct scatterlist sgs[4];
2970c7114b12SAndrew Melnychenko unsigned int sg_buf_size;
2971c7114b12SAndrew Melnychenko
2972c7114b12SAndrew Melnychenko /* prepare sgs */
2973c7114b12SAndrew Melnychenko sg_init_table(sgs, 4);
2974c7114b12SAndrew Melnychenko
2975c7114b12SAndrew Melnychenko sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
2976c7114b12SAndrew Melnychenko sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
2977c7114b12SAndrew Melnychenko
2978c7114b12SAndrew Melnychenko sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
2979c7114b12SAndrew Melnychenko sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
2980c7114b12SAndrew Melnychenko
2981c7114b12SAndrew Melnychenko sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
2982c7114b12SAndrew Melnychenko - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
2983c7114b12SAndrew Melnychenko sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
2984c7114b12SAndrew Melnychenko
2985c7114b12SAndrew Melnychenko sg_buf_size = vi->rss_key_size;
2986c7114b12SAndrew Melnychenko sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
2987c7114b12SAndrew Melnychenko
2988c7114b12SAndrew Melnychenko if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
298991f41f01SAndrew Melnychenko vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
299091f41f01SAndrew Melnychenko : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
2991c7114b12SAndrew Melnychenko dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
2992c7114b12SAndrew Melnychenko return false;
2993c7114b12SAndrew Melnychenko }
2994c7114b12SAndrew Melnychenko return true;
2995c7114b12SAndrew Melnychenko }
2996c7114b12SAndrew Melnychenko
virtnet_init_default_rss(struct virtnet_info * vi)2997c7114b12SAndrew Melnychenko static void virtnet_init_default_rss(struct virtnet_info *vi)
2998c7114b12SAndrew Melnychenko {
2999c7114b12SAndrew Melnychenko u32 indir_val = 0;
3000c7114b12SAndrew Melnychenko int i = 0;
3001c7114b12SAndrew Melnychenko
3002c7114b12SAndrew Melnychenko vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
3003c1170820SAndrew Melnychenko vi->rss_hash_types_saved = vi->rss_hash_types_supported;
3004c7114b12SAndrew Melnychenko vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
3005c7114b12SAndrew Melnychenko ? vi->rss_indir_table_size - 1 : 0;
3006c7114b12SAndrew Melnychenko vi->ctrl->rss.unclassified_queue = 0;
3007c7114b12SAndrew Melnychenko
3008c7114b12SAndrew Melnychenko for (; i < vi->rss_indir_table_size; ++i) {
3009c7114b12SAndrew Melnychenko indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
3010c7114b12SAndrew Melnychenko vi->ctrl->rss.indirection_table[i] = indir_val;
3011c7114b12SAndrew Melnychenko }
3012c7114b12SAndrew Melnychenko
30132c507ce9SHawkins Jiawei vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
3014c7114b12SAndrew Melnychenko vi->ctrl->rss.hash_key_length = vi->rss_key_size;
3015c7114b12SAndrew Melnychenko
3016c7114b12SAndrew Melnychenko netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
3017c7114b12SAndrew Melnychenko }
3018c7114b12SAndrew Melnychenko
virtnet_get_hashflow(const struct virtnet_info * vi,struct ethtool_rxnfc * info)3019c1170820SAndrew Melnychenko static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
3020c1170820SAndrew Melnychenko {
3021c1170820SAndrew Melnychenko info->data = 0;
3022c1170820SAndrew Melnychenko switch (info->flow_type) {
3023c1170820SAndrew Melnychenko case TCP_V4_FLOW:
3024c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
3025c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST |
3026c1170820SAndrew Melnychenko RXH_L4_B_0_1 | RXH_L4_B_2_3;
3027c1170820SAndrew Melnychenko } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3028c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST;
3029c1170820SAndrew Melnychenko }
3030c1170820SAndrew Melnychenko break;
3031c1170820SAndrew Melnychenko case TCP_V6_FLOW:
3032c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
3033c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST |
3034c1170820SAndrew Melnychenko RXH_L4_B_0_1 | RXH_L4_B_2_3;
3035c1170820SAndrew Melnychenko } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3036c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST;
3037c1170820SAndrew Melnychenko }
3038c1170820SAndrew Melnychenko break;
3039c1170820SAndrew Melnychenko case UDP_V4_FLOW:
3040c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
3041c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST |
3042c1170820SAndrew Melnychenko RXH_L4_B_0_1 | RXH_L4_B_2_3;
3043c1170820SAndrew Melnychenko } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3044c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST;
3045c1170820SAndrew Melnychenko }
3046c1170820SAndrew Melnychenko break;
3047c1170820SAndrew Melnychenko case UDP_V6_FLOW:
3048c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
3049c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST |
3050c1170820SAndrew Melnychenko RXH_L4_B_0_1 | RXH_L4_B_2_3;
3051c1170820SAndrew Melnychenko } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3052c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST;
3053c1170820SAndrew Melnychenko }
3054c1170820SAndrew Melnychenko break;
3055c1170820SAndrew Melnychenko case IPV4_FLOW:
3056c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
3057c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST;
3058c1170820SAndrew Melnychenko
3059c1170820SAndrew Melnychenko break;
3060c1170820SAndrew Melnychenko case IPV6_FLOW:
3061c1170820SAndrew Melnychenko if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3062c1170820SAndrew Melnychenko info->data = RXH_IP_SRC | RXH_IP_DST;
3063c1170820SAndrew Melnychenko
3064c1170820SAndrew Melnychenko break;
3065c1170820SAndrew Melnychenko default:
3066c1170820SAndrew Melnychenko info->data = 0;
3067c1170820SAndrew Melnychenko break;
3068c1170820SAndrew Melnychenko }
3069c1170820SAndrew Melnychenko }
3070c1170820SAndrew Melnychenko
virtnet_set_hashflow(struct virtnet_info * vi,struct ethtool_rxnfc * info)3071c1170820SAndrew Melnychenko static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3072c1170820SAndrew Melnychenko {
3073c1170820SAndrew Melnychenko u32 new_hashtypes = vi->rss_hash_types_saved;
3074c1170820SAndrew Melnychenko bool is_disable = info->data & RXH_DISCARD;
3075c1170820SAndrew Melnychenko bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
3076c1170820SAndrew Melnychenko
3077c1170820SAndrew Melnychenko /* supports only 'sd', 'sdfn' and 'r' */
3078c1170820SAndrew Melnychenko if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
3079c1170820SAndrew Melnychenko return false;
3080c1170820SAndrew Melnychenko
3081c1170820SAndrew Melnychenko switch (info->flow_type) {
3082c1170820SAndrew Melnychenko case TCP_V4_FLOW:
3083c1170820SAndrew Melnychenko new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
3084c1170820SAndrew Melnychenko if (!is_disable)
3085c1170820SAndrew Melnychenko new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3086c1170820SAndrew Melnychenko | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
3087c1170820SAndrew Melnychenko break;
3088c1170820SAndrew Melnychenko case UDP_V4_FLOW:
3089c1170820SAndrew Melnychenko new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
3090c1170820SAndrew Melnychenko if (!is_disable)
3091c1170820SAndrew Melnychenko new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3092c1170820SAndrew Melnychenko | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
3093c1170820SAndrew Melnychenko break;
3094c1170820SAndrew Melnychenko case IPV4_FLOW:
3095c1170820SAndrew Melnychenko new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3096c1170820SAndrew Melnychenko if (!is_disable)
3097c1170820SAndrew Melnychenko new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3098c1170820SAndrew Melnychenko break;
3099c1170820SAndrew Melnychenko case TCP_V6_FLOW:
3100c1170820SAndrew Melnychenko new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
3101c1170820SAndrew Melnychenko if (!is_disable)
3102c1170820SAndrew Melnychenko new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3103c1170820SAndrew Melnychenko | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
3104c1170820SAndrew Melnychenko break;
3105c1170820SAndrew Melnychenko case UDP_V6_FLOW:
3106c1170820SAndrew Melnychenko new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
3107c1170820SAndrew Melnychenko if (!is_disable)
3108c1170820SAndrew Melnychenko new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3109c1170820SAndrew Melnychenko | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
3110c1170820SAndrew Melnychenko break;
3111c1170820SAndrew Melnychenko case IPV6_FLOW:
3112c1170820SAndrew Melnychenko new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3113c1170820SAndrew Melnychenko if (!is_disable)
3114c1170820SAndrew Melnychenko new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3115c1170820SAndrew Melnychenko break;
3116c1170820SAndrew Melnychenko default:
3117c1170820SAndrew Melnychenko /* unsupported flow */
3118c1170820SAndrew Melnychenko return false;
3119c1170820SAndrew Melnychenko }
3120c1170820SAndrew Melnychenko
3121c1170820SAndrew Melnychenko /* if unsupported hashtype was set */
3122c1170820SAndrew Melnychenko if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3123c1170820SAndrew Melnychenko return false;
3124c1170820SAndrew Melnychenko
3125c1170820SAndrew Melnychenko if (new_hashtypes != vi->rss_hash_types_saved) {
3126c1170820SAndrew Melnychenko vi->rss_hash_types_saved = new_hashtypes;
3127c1170820SAndrew Melnychenko vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3128c1170820SAndrew Melnychenko if (vi->dev->features & NETIF_F_RXHASH)
3129c1170820SAndrew Melnychenko return virtnet_commit_rss_command(vi);
3130c1170820SAndrew Melnychenko }
3131c1170820SAndrew Melnychenko
3132c1170820SAndrew Melnychenko return true;
3133c1170820SAndrew Melnychenko }
313466846048SRick Jones
virtnet_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)313566846048SRick Jones static void virtnet_get_drvinfo(struct net_device *dev,
313666846048SRick Jones struct ethtool_drvinfo *info)
313766846048SRick Jones {
313866846048SRick Jones struct virtnet_info *vi = netdev_priv(dev);
313966846048SRick Jones struct virtio_device *vdev = vi->vdev;
314066846048SRick Jones
3141fb3ceec1SWolfram Sang strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
3142fb3ceec1SWolfram Sang strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
3143fb3ceec1SWolfram Sang strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
314466846048SRick Jones
314566846048SRick Jones }
314666846048SRick Jones
3147d73bcd2cSJason Wang /* TODO: Eliminate OOO packets during switching */
virtnet_set_channels(struct net_device * dev,struct ethtool_channels * channels)3148d73bcd2cSJason Wang static int virtnet_set_channels(struct net_device *dev,
3149d73bcd2cSJason Wang struct ethtool_channels *channels)
3150d73bcd2cSJason Wang {
3151d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev);
3152d73bcd2cSJason Wang u16 queue_pairs = channels->combined_count;
3153d73bcd2cSJason Wang int err;
3154d73bcd2cSJason Wang
3155d73bcd2cSJason Wang /* We don't support separate rx/tx channels.
3156d73bcd2cSJason Wang * We don't allow setting 'other' channels.
3157d73bcd2cSJason Wang */
3158d73bcd2cSJason Wang if (channels->rx_count || channels->tx_count || channels->other_count)
3159d73bcd2cSJason Wang return -EINVAL;
3160d73bcd2cSJason Wang
3161c18e9cd6SAmos Kong if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3162d73bcd2cSJason Wang return -EINVAL;
3163d73bcd2cSJason Wang
3164f600b690SJohn Fastabend /* For now we don't support modifying channels while XDP is loaded
3165f600b690SJohn Fastabend * also when XDP is loaded all RX queues have XDP programs so we only
3166f600b690SJohn Fastabend * need to check a single RX queue.
3167f600b690SJohn Fastabend */
3168f600b690SJohn Fastabend if (vi->rq[0].xdp_prog)
3169f600b690SJohn Fastabend return -EINVAL;
3170f600b690SJohn Fastabend
3171a0d1d0f4SSebastian Andrzej Siewior cpus_read_lock();
317247315329SJohn Fastabend err = _virtnet_set_queues(vi, queue_pairs);
3173de33212fSJeff Dike if (err) {
3174a0d1d0f4SSebastian Andrzej Siewior cpus_read_unlock();
3175de33212fSJeff Dike goto err;
3176d73bcd2cSJason Wang }
3177de33212fSJeff Dike virtnet_set_affinity(vi);
3178a0d1d0f4SSebastian Andrzej Siewior cpus_read_unlock();
3179d73bcd2cSJason Wang
3180de33212fSJeff Dike netif_set_real_num_tx_queues(dev, queue_pairs);
3181de33212fSJeff Dike netif_set_real_num_rx_queues(dev, queue_pairs);
3182de33212fSJeff Dike err:
3183d73bcd2cSJason Wang return err;
3184d73bcd2cSJason Wang }
3185d73bcd2cSJason Wang
virtnet_get_strings(struct net_device * dev,u32 stringset,u8 * data)3186d7dfc5cfSToshiaki Makita static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3187d7dfc5cfSToshiaki Makita {
3188d7dfc5cfSToshiaki Makita struct virtnet_info *vi = netdev_priv(dev);
3189d7dfc5cfSToshiaki Makita unsigned int i, j;
3190d7a9a01bSAlexander Duyck u8 *p = data;
3191d7dfc5cfSToshiaki Makita
3192d7dfc5cfSToshiaki Makita switch (stringset) {
3193d7dfc5cfSToshiaki Makita case ETH_SS_STATS:
3194d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) {
3195d7a9a01bSAlexander Duyck for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
3196d7a9a01bSAlexander Duyck ethtool_sprintf(&p, "rx_queue_%u_%s", i,
3197d7a9a01bSAlexander Duyck virtnet_rq_stats_desc[j].desc);
3198d7dfc5cfSToshiaki Makita }
3199d7dfc5cfSToshiaki Makita
3200d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) {
3201d7a9a01bSAlexander Duyck for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
3202d7a9a01bSAlexander Duyck ethtool_sprintf(&p, "tx_queue_%u_%s", i,
3203d7a9a01bSAlexander Duyck virtnet_sq_stats_desc[j].desc);
3204d7dfc5cfSToshiaki Makita }
3205d7dfc5cfSToshiaki Makita break;
3206d7dfc5cfSToshiaki Makita }
3207d7dfc5cfSToshiaki Makita }
3208d7dfc5cfSToshiaki Makita
virtnet_get_sset_count(struct net_device * dev,int sset)3209d7dfc5cfSToshiaki Makita static int virtnet_get_sset_count(struct net_device *dev, int sset)
3210d7dfc5cfSToshiaki Makita {
3211d7dfc5cfSToshiaki Makita struct virtnet_info *vi = netdev_priv(dev);
3212d7dfc5cfSToshiaki Makita
3213d7dfc5cfSToshiaki Makita switch (sset) {
3214d7dfc5cfSToshiaki Makita case ETH_SS_STATS:
3215d7dfc5cfSToshiaki Makita return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
3216d7dfc5cfSToshiaki Makita VIRTNET_SQ_STATS_LEN);
3217d7dfc5cfSToshiaki Makita default:
3218d7dfc5cfSToshiaki Makita return -EOPNOTSUPP;
3219d7dfc5cfSToshiaki Makita }
3220d7dfc5cfSToshiaki Makita }
3221d7dfc5cfSToshiaki Makita
virtnet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)3222d7dfc5cfSToshiaki Makita static void virtnet_get_ethtool_stats(struct net_device *dev,
3223d7dfc5cfSToshiaki Makita struct ethtool_stats *stats, u64 *data)
3224d7dfc5cfSToshiaki Makita {
3225d7dfc5cfSToshiaki Makita struct virtnet_info *vi = netdev_priv(dev);
3226d7dfc5cfSToshiaki Makita unsigned int idx = 0, start, i, j;
3227d7dfc5cfSToshiaki Makita const u8 *stats_base;
322827debe3eSEric Dumazet const u64_stats_t *p;
3229d7dfc5cfSToshiaki Makita size_t offset;
3230d7dfc5cfSToshiaki Makita
3231d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) {
3232d7dfc5cfSToshiaki Makita struct receive_queue *rq = &vi->rq[i];
3233d7dfc5cfSToshiaki Makita
323427debe3eSEric Dumazet stats_base = (const u8 *)&rq->stats;
3235d7dfc5cfSToshiaki Makita do {
3236068c38adSThomas Gleixner start = u64_stats_fetch_begin(&rq->stats.syncp);
3237d7dfc5cfSToshiaki Makita for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
3238d7dfc5cfSToshiaki Makita offset = virtnet_rq_stats_desc[j].offset;
323927debe3eSEric Dumazet p = (const u64_stats_t *)(stats_base + offset);
324027debe3eSEric Dumazet data[idx + j] = u64_stats_read(p);
3241d7dfc5cfSToshiaki Makita }
3242068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3243d7dfc5cfSToshiaki Makita idx += VIRTNET_RQ_STATS_LEN;
3244d7dfc5cfSToshiaki Makita }
3245d7dfc5cfSToshiaki Makita
3246d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) {
3247d7dfc5cfSToshiaki Makita struct send_queue *sq = &vi->sq[i];
3248d7dfc5cfSToshiaki Makita
324927debe3eSEric Dumazet stats_base = (const u8 *)&sq->stats;
3250d7dfc5cfSToshiaki Makita do {
3251068c38adSThomas Gleixner start = u64_stats_fetch_begin(&sq->stats.syncp);
3252d7dfc5cfSToshiaki Makita for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
3253d7dfc5cfSToshiaki Makita offset = virtnet_sq_stats_desc[j].offset;
325427debe3eSEric Dumazet p = (const u64_stats_t *)(stats_base + offset);
325527debe3eSEric Dumazet data[idx + j] = u64_stats_read(p);
3256d7dfc5cfSToshiaki Makita }
3257068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3258d7dfc5cfSToshiaki Makita idx += VIRTNET_SQ_STATS_LEN;
3259d7dfc5cfSToshiaki Makita }
3260d7dfc5cfSToshiaki Makita }
3261d7dfc5cfSToshiaki Makita
virtnet_get_channels(struct net_device * dev,struct ethtool_channels * channels)3262d73bcd2cSJason Wang static void virtnet_get_channels(struct net_device *dev,
3263d73bcd2cSJason Wang struct ethtool_channels *channels)
3264d73bcd2cSJason Wang {
3265d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev);
3266d73bcd2cSJason Wang
3267d73bcd2cSJason Wang channels->combined_count = vi->curr_queue_pairs;
3268d73bcd2cSJason Wang channels->max_combined = vi->max_queue_pairs;
3269d73bcd2cSJason Wang channels->max_other = 0;
3270d73bcd2cSJason Wang channels->rx_count = 0;
3271d73bcd2cSJason Wang channels->tx_count = 0;
3272d73bcd2cSJason Wang channels->other_count = 0;
3273d73bcd2cSJason Wang }
3274d73bcd2cSJason Wang
virtnet_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)3275ebb6b4b1SPhilippe Reynes static int virtnet_set_link_ksettings(struct net_device *dev,
3276ebb6b4b1SPhilippe Reynes const struct ethtool_link_ksettings *cmd)
327716032be5SNikolay Aleksandrov {
327816032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev);
327916032be5SNikolay Aleksandrov
32809aedc6e2SCris Forno return ethtool_virtdev_set_link_ksettings(dev, cmd,
32819aedc6e2SCris Forno &vi->speed, &vi->duplex);
328216032be5SNikolay Aleksandrov }
328316032be5SNikolay Aleksandrov
virtnet_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)3284ebb6b4b1SPhilippe Reynes static int virtnet_get_link_ksettings(struct net_device *dev,
3285ebb6b4b1SPhilippe Reynes struct ethtool_link_ksettings *cmd)
328616032be5SNikolay Aleksandrov {
328716032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev);
328816032be5SNikolay Aleksandrov
3289ebb6b4b1SPhilippe Reynes cmd->base.speed = vi->speed;
3290ebb6b4b1SPhilippe Reynes cmd->base.duplex = vi->duplex;
3291ebb6b4b1SPhilippe Reynes cmd->base.port = PORT_OTHER;
329216032be5SNikolay Aleksandrov
329316032be5SNikolay Aleksandrov return 0;
329416032be5SNikolay Aleksandrov }
329516032be5SNikolay Aleksandrov
virtnet_send_notf_coal_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec)3296699b045aSAlvaro Karsz static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3297699b045aSAlvaro Karsz struct ethtool_coalesce *ec)
3298699b045aSAlvaro Karsz {
3299699b045aSAlvaro Karsz struct scatterlist sgs_tx, sgs_rx;
3300829cce76SHeng Qi int i;
3301699b045aSAlvaro Karsz
3302accc1bf2SBrett Creeley vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3303accc1bf2SBrett Creeley vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3304accc1bf2SBrett Creeley sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
3305699b045aSAlvaro Karsz
3306699b045aSAlvaro Karsz if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3307699b045aSAlvaro Karsz VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
3308699b045aSAlvaro Karsz &sgs_tx))
3309699b045aSAlvaro Karsz return -EINVAL;
3310699b045aSAlvaro Karsz
3311699b045aSAlvaro Karsz /* Save parameters */
3312308d7982SGavin Li vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
3313308d7982SGavin Li vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
3314829cce76SHeng Qi for (i = 0; i < vi->max_queue_pairs; i++) {
3315829cce76SHeng Qi vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3316829cce76SHeng Qi vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3317829cce76SHeng Qi }
3318699b045aSAlvaro Karsz
3319accc1bf2SBrett Creeley vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3320accc1bf2SBrett Creeley vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3321accc1bf2SBrett Creeley sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3322699b045aSAlvaro Karsz
3323699b045aSAlvaro Karsz if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3324699b045aSAlvaro Karsz VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
3325699b045aSAlvaro Karsz &sgs_rx))
3326699b045aSAlvaro Karsz return -EINVAL;
3327699b045aSAlvaro Karsz
3328699b045aSAlvaro Karsz /* Save parameters */
3329308d7982SGavin Li vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
3330308d7982SGavin Li vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
3331829cce76SHeng Qi for (i = 0; i < vi->max_queue_pairs; i++) {
3332829cce76SHeng Qi vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3333829cce76SHeng Qi vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3334829cce76SHeng Qi }
3335699b045aSAlvaro Karsz
3336699b045aSAlvaro Karsz return 0;
3337699b045aSAlvaro Karsz }
3338699b045aSAlvaro Karsz
virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info * vi,u16 vqn,u32 max_usecs,u32 max_packets)3339394bd877SGavin Li static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3340394bd877SGavin Li u16 vqn, u32 max_usecs, u32 max_packets)
3341394bd877SGavin Li {
3342394bd877SGavin Li struct scatterlist sgs;
3343394bd877SGavin Li
3344394bd877SGavin Li vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
3345394bd877SGavin Li vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
3346394bd877SGavin Li vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
3347394bd877SGavin Li sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
3348394bd877SGavin Li
3349394bd877SGavin Li if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3350394bd877SGavin Li VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
3351394bd877SGavin Li &sgs))
3352394bd877SGavin Li return -EINVAL;
3353394bd877SGavin Li
3354394bd877SGavin Li return 0;
3355394bd877SGavin Li }
3356394bd877SGavin Li
virtnet_send_notf_coal_vq_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec,u16 queue)3357394bd877SGavin Li static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3358394bd877SGavin Li struct ethtool_coalesce *ec,
3359394bd877SGavin Li u16 queue)
3360394bd877SGavin Li {
3361394bd877SGavin Li int err;
3362394bd877SGavin Li
3363394bd877SGavin Li err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
3364394bd877SGavin Li ec->rx_coalesce_usecs,
3365394bd877SGavin Li ec->rx_max_coalesced_frames);
3366394bd877SGavin Li if (err)
3367394bd877SGavin Li return err;
33687c1453abSHeng Qi
3369394bd877SGavin Li vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3370394bd877SGavin Li vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3371394bd877SGavin Li
3372394bd877SGavin Li err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
3373394bd877SGavin Li ec->tx_coalesce_usecs,
3374394bd877SGavin Li ec->tx_max_coalesced_frames);
3375394bd877SGavin Li if (err)
3376394bd877SGavin Li return err;
33777c1453abSHeng Qi
3378394bd877SGavin Li vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3379394bd877SGavin Li vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3380394bd877SGavin Li
3381394bd877SGavin Li return 0;
3382394bd877SGavin Li }
3383394bd877SGavin Li
virtnet_coal_params_supported(struct ethtool_coalesce * ec)3384699b045aSAlvaro Karsz static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
3385699b045aSAlvaro Karsz {
3386699b045aSAlvaro Karsz /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3387699b045aSAlvaro Karsz * feature is negotiated.
3388699b045aSAlvaro Karsz */
3389699b045aSAlvaro Karsz if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
3390699b045aSAlvaro Karsz return -EOPNOTSUPP;
3391699b045aSAlvaro Karsz
3392699b045aSAlvaro Karsz if (ec->tx_max_coalesced_frames > 1 ||
3393699b045aSAlvaro Karsz ec->rx_max_coalesced_frames != 1)
3394699b045aSAlvaro Karsz return -EINVAL;
3395699b045aSAlvaro Karsz
3396699b045aSAlvaro Karsz return 0;
3397699b045aSAlvaro Karsz }
3398699b045aSAlvaro Karsz
virtnet_should_update_vq_weight(int dev_flags,int weight,int vq_weight,bool * should_update)3399394bd877SGavin Li static int virtnet_should_update_vq_weight(int dev_flags, int weight,
3400394bd877SGavin Li int vq_weight, bool *should_update)
3401394bd877SGavin Li {
3402394bd877SGavin Li if (weight ^ vq_weight) {
3403394bd877SGavin Li if (dev_flags & IFF_UP)
3404394bd877SGavin Li return -EBUSY;
3405394bd877SGavin Li *should_update = true;
3406394bd877SGavin Li }
3407394bd877SGavin Li
3408394bd877SGavin Li return 0;
3409394bd877SGavin Li }
3410394bd877SGavin Li
virtnet_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)34110c465be1SJason Wang static int virtnet_set_coalesce(struct net_device *dev,
3412f3ccfda1SYufeng Mo struct ethtool_coalesce *ec,
3413f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal,
3414f3ccfda1SYufeng Mo struct netlink_ext_ack *extack)
34150c465be1SJason Wang {
34160c465be1SJason Wang struct virtnet_info *vi = netdev_priv(dev);
3417394bd877SGavin Li int ret, queue_number, napi_weight;
3418699b045aSAlvaro Karsz bool update_napi = false;
34190c465be1SJason Wang
3420699b045aSAlvaro Karsz /* Can't change NAPI weight if the link is up */
34210c465be1SJason Wang napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3422394bd877SGavin Li for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
3423394bd877SGavin Li ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3424394bd877SGavin Li vi->sq[queue_number].napi.weight,
3425394bd877SGavin Li &update_napi);
3426394bd877SGavin Li if (ret)
3427394bd877SGavin Li return ret;
3428394bd877SGavin Li
3429394bd877SGavin Li if (update_napi) {
3430394bd877SGavin Li /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3431394bd877SGavin Li * updated for the sake of simplicity, which might not be necessary
3432394bd877SGavin Li */
3433394bd877SGavin Li break;
3434394bd877SGavin Li }
3435699b045aSAlvaro Karsz }
3436699b045aSAlvaro Karsz
3437699b045aSAlvaro Karsz if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3438699b045aSAlvaro Karsz ret = virtnet_send_notf_coal_cmds(vi, ec);
3439699b045aSAlvaro Karsz else
3440699b045aSAlvaro Karsz ret = virtnet_coal_params_supported(ec);
3441699b045aSAlvaro Karsz
3442699b045aSAlvaro Karsz if (ret)
3443699b045aSAlvaro Karsz return ret;
3444699b045aSAlvaro Karsz
3445699b045aSAlvaro Karsz if (update_napi) {
3446394bd877SGavin Li for (; queue_number < vi->max_queue_pairs; queue_number++)
3447394bd877SGavin Li vi->sq[queue_number].napi.weight = napi_weight;
34480c465be1SJason Wang }
34490c465be1SJason Wang
3450699b045aSAlvaro Karsz return ret;
34510c465be1SJason Wang }
34520c465be1SJason Wang
virtnet_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)34530c465be1SJason Wang static int virtnet_get_coalesce(struct net_device *dev,
3454f3ccfda1SYufeng Mo struct ethtool_coalesce *ec,
3455f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal,
3456f3ccfda1SYufeng Mo struct netlink_ext_ack *extack)
34570c465be1SJason Wang {
34580c465be1SJason Wang struct virtnet_info *vi = netdev_priv(dev);
34590c465be1SJason Wang
3460699b045aSAlvaro Karsz if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3461308d7982SGavin Li ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
3462308d7982SGavin Li ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
3463308d7982SGavin Li ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
3464308d7982SGavin Li ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3465699b045aSAlvaro Karsz } else {
3466699b045aSAlvaro Karsz ec->rx_max_coalesced_frames = 1;
34670c465be1SJason Wang
34680c465be1SJason Wang if (vi->sq[0].napi.weight)
34690c465be1SJason Wang ec->tx_max_coalesced_frames = 1;
3470699b045aSAlvaro Karsz }
34710c465be1SJason Wang
34720c465be1SJason Wang return 0;
34730c465be1SJason Wang }
34740c465be1SJason Wang
virtnet_set_per_queue_coalesce(struct net_device * dev,u32 queue,struct ethtool_coalesce * ec)3475394bd877SGavin Li static int virtnet_set_per_queue_coalesce(struct net_device *dev,
3476394bd877SGavin Li u32 queue,
3477394bd877SGavin Li struct ethtool_coalesce *ec)
3478394bd877SGavin Li {
3479394bd877SGavin Li struct virtnet_info *vi = netdev_priv(dev);
3480394bd877SGavin Li int ret, napi_weight;
3481394bd877SGavin Li bool update_napi = false;
3482394bd877SGavin Li
3483394bd877SGavin Li if (queue >= vi->max_queue_pairs)
3484394bd877SGavin Li return -EINVAL;
3485394bd877SGavin Li
3486394bd877SGavin Li /* Can't change NAPI weight if the link is up */
3487394bd877SGavin Li napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3488394bd877SGavin Li ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3489394bd877SGavin Li vi->sq[queue].napi.weight,
3490394bd877SGavin Li &update_napi);
3491394bd877SGavin Li if (ret)
3492394bd877SGavin Li return ret;
3493394bd877SGavin Li
3494394bd877SGavin Li if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3495394bd877SGavin Li ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
3496394bd877SGavin Li else
3497394bd877SGavin Li ret = virtnet_coal_params_supported(ec);
3498394bd877SGavin Li
3499394bd877SGavin Li if (ret)
3500394bd877SGavin Li return ret;
3501394bd877SGavin Li
3502394bd877SGavin Li if (update_napi)
3503394bd877SGavin Li vi->sq[queue].napi.weight = napi_weight;
3504394bd877SGavin Li
3505394bd877SGavin Li return 0;
3506394bd877SGavin Li }
3507394bd877SGavin Li
virtnet_get_per_queue_coalesce(struct net_device * dev,u32 queue,struct ethtool_coalesce * ec)3508394bd877SGavin Li static int virtnet_get_per_queue_coalesce(struct net_device *dev,
3509394bd877SGavin Li u32 queue,
3510394bd877SGavin Li struct ethtool_coalesce *ec)
3511394bd877SGavin Li {
3512394bd877SGavin Li struct virtnet_info *vi = netdev_priv(dev);
3513394bd877SGavin Li
3514394bd877SGavin Li if (queue >= vi->max_queue_pairs)
3515394bd877SGavin Li return -EINVAL;
3516394bd877SGavin Li
3517394bd877SGavin Li if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
3518394bd877SGavin Li ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
3519394bd877SGavin Li ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
3520394bd877SGavin Li ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
3521394bd877SGavin Li ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3522394bd877SGavin Li } else {
3523394bd877SGavin Li ec->rx_max_coalesced_frames = 1;
3524394bd877SGavin Li
352584a056f7SHeng Qi if (vi->sq[queue].napi.weight)
3526394bd877SGavin Li ec->tx_max_coalesced_frames = 1;
3527394bd877SGavin Li }
3528394bd877SGavin Li
3529394bd877SGavin Li return 0;
3530394bd877SGavin Li }
3531394bd877SGavin Li
virtnet_init_settings(struct net_device * dev)353216032be5SNikolay Aleksandrov static void virtnet_init_settings(struct net_device *dev)
353316032be5SNikolay Aleksandrov {
353416032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev);
353516032be5SNikolay Aleksandrov
353616032be5SNikolay Aleksandrov vi->speed = SPEED_UNKNOWN;
353716032be5SNikolay Aleksandrov vi->duplex = DUPLEX_UNKNOWN;
353816032be5SNikolay Aleksandrov }
353916032be5SNikolay Aleksandrov
virtnet_update_settings(struct virtnet_info * vi)3540faa9b39fSJason Baron static void virtnet_update_settings(struct virtnet_info *vi)
3541faa9b39fSJason Baron {
3542faa9b39fSJason Baron u32 speed;
3543faa9b39fSJason Baron u8 duplex;
3544faa9b39fSJason Baron
3545faa9b39fSJason Baron if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3546faa9b39fSJason Baron return;
3547faa9b39fSJason Baron
354864ffa39dSMichael S. Tsirkin virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
354964ffa39dSMichael S. Tsirkin
3550faa9b39fSJason Baron if (ethtool_validate_speed(speed))
3551faa9b39fSJason Baron vi->speed = speed;
355264ffa39dSMichael S. Tsirkin
355364ffa39dSMichael S. Tsirkin virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
355464ffa39dSMichael S. Tsirkin
3555faa9b39fSJason Baron if (ethtool_validate_duplex(duplex))
3556faa9b39fSJason Baron vi->duplex = duplex;
3557faa9b39fSJason Baron }
3558faa9b39fSJason Baron
virtnet_get_rxfh_key_size(struct net_device * dev)3559c7114b12SAndrew Melnychenko static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3560c7114b12SAndrew Melnychenko {
3561c7114b12SAndrew Melnychenko return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3562c7114b12SAndrew Melnychenko }
3563c7114b12SAndrew Melnychenko
virtnet_get_rxfh_indir_size(struct net_device * dev)3564c7114b12SAndrew Melnychenko static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3565c7114b12SAndrew Melnychenko {
3566c7114b12SAndrew Melnychenko return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3567c7114b12SAndrew Melnychenko }
3568c7114b12SAndrew Melnychenko
virtnet_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)3569c7114b12SAndrew Melnychenko static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
3570c7114b12SAndrew Melnychenko {
3571c7114b12SAndrew Melnychenko struct virtnet_info *vi = netdev_priv(dev);
3572c7114b12SAndrew Melnychenko int i;
3573c7114b12SAndrew Melnychenko
3574c7114b12SAndrew Melnychenko if (indir) {
3575c7114b12SAndrew Melnychenko for (i = 0; i < vi->rss_indir_table_size; ++i)
3576c7114b12SAndrew Melnychenko indir[i] = vi->ctrl->rss.indirection_table[i];
3577c7114b12SAndrew Melnychenko }
3578c7114b12SAndrew Melnychenko
3579c7114b12SAndrew Melnychenko if (key)
3580c7114b12SAndrew Melnychenko memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
3581c7114b12SAndrew Melnychenko
3582c7114b12SAndrew Melnychenko if (hfunc)
3583c7114b12SAndrew Melnychenko *hfunc = ETH_RSS_HASH_TOP;
3584c7114b12SAndrew Melnychenko
3585c7114b12SAndrew Melnychenko return 0;
3586c7114b12SAndrew Melnychenko }
3587c7114b12SAndrew Melnychenko
virtnet_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)3588c7114b12SAndrew Melnychenko static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
3589c7114b12SAndrew Melnychenko {
3590c7114b12SAndrew Melnychenko struct virtnet_info *vi = netdev_priv(dev);
359143a71c1bSBreno Leitao bool update = false;
3592c7114b12SAndrew Melnychenko int i;
3593c7114b12SAndrew Melnychenko
3594c7114b12SAndrew Melnychenko if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
3595c7114b12SAndrew Melnychenko return -EOPNOTSUPP;
3596c7114b12SAndrew Melnychenko
3597c7114b12SAndrew Melnychenko if (indir) {
359843a71c1bSBreno Leitao if (!vi->has_rss)
359943a71c1bSBreno Leitao return -EOPNOTSUPP;
360043a71c1bSBreno Leitao
3601c7114b12SAndrew Melnychenko for (i = 0; i < vi->rss_indir_table_size; ++i)
3602c7114b12SAndrew Melnychenko vi->ctrl->rss.indirection_table[i] = indir[i];
360343a71c1bSBreno Leitao update = true;
3604c7114b12SAndrew Melnychenko }
360543a71c1bSBreno Leitao if (key) {
360643a71c1bSBreno Leitao /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
360743a71c1bSBreno Leitao * device provides hash calculation capabilities, that is,
360843a71c1bSBreno Leitao * hash_key is configured.
360943a71c1bSBreno Leitao */
361043a71c1bSBreno Leitao if (!vi->has_rss && !vi->has_rss_hash_report)
361143a71c1bSBreno Leitao return -EOPNOTSUPP;
3612c7114b12SAndrew Melnychenko
361343a71c1bSBreno Leitao memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
361443a71c1bSBreno Leitao update = true;
361543a71c1bSBreno Leitao }
361643a71c1bSBreno Leitao
361743a71c1bSBreno Leitao if (update)
3618c7114b12SAndrew Melnychenko virtnet_commit_rss_command(vi);
3619c7114b12SAndrew Melnychenko
3620c7114b12SAndrew Melnychenko return 0;
3621c7114b12SAndrew Melnychenko }
3622c7114b12SAndrew Melnychenko
virtnet_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rule_locs)3623c7114b12SAndrew Melnychenko static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3624c7114b12SAndrew Melnychenko {
3625c7114b12SAndrew Melnychenko struct virtnet_info *vi = netdev_priv(dev);
3626c7114b12SAndrew Melnychenko int rc = 0;
3627c7114b12SAndrew Melnychenko
3628c7114b12SAndrew Melnychenko switch (info->cmd) {
3629c7114b12SAndrew Melnychenko case ETHTOOL_GRXRINGS:
3630c7114b12SAndrew Melnychenko info->data = vi->curr_queue_pairs;
3631c7114b12SAndrew Melnychenko break;
3632c1170820SAndrew Melnychenko case ETHTOOL_GRXFH:
3633c1170820SAndrew Melnychenko virtnet_get_hashflow(vi, info);
3634c1170820SAndrew Melnychenko break;
3635c1170820SAndrew Melnychenko default:
3636c1170820SAndrew Melnychenko rc = -EOPNOTSUPP;
3637c1170820SAndrew Melnychenko }
3638c1170820SAndrew Melnychenko
3639c1170820SAndrew Melnychenko return rc;
3640c1170820SAndrew Melnychenko }
3641c1170820SAndrew Melnychenko
virtnet_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info)3642c1170820SAndrew Melnychenko static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3643c1170820SAndrew Melnychenko {
3644c1170820SAndrew Melnychenko struct virtnet_info *vi = netdev_priv(dev);
3645c1170820SAndrew Melnychenko int rc = 0;
3646c1170820SAndrew Melnychenko
3647c1170820SAndrew Melnychenko switch (info->cmd) {
3648c1170820SAndrew Melnychenko case ETHTOOL_SRXFH:
3649c1170820SAndrew Melnychenko if (!virtnet_set_hashflow(vi, info))
3650c1170820SAndrew Melnychenko rc = -EINVAL;
3651c1170820SAndrew Melnychenko
3652c1170820SAndrew Melnychenko break;
3653c7114b12SAndrew Melnychenko default:
3654c7114b12SAndrew Melnychenko rc = -EOPNOTSUPP;
3655c7114b12SAndrew Melnychenko }
3656c7114b12SAndrew Melnychenko
3657c7114b12SAndrew Melnychenko return rc;
3658c7114b12SAndrew Melnychenko }
3659c7114b12SAndrew Melnychenko
36600fc0b732SStephen Hemminger static const struct ethtool_ops virtnet_ethtool_ops = {
3661699b045aSAlvaro Karsz .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3662699b045aSAlvaro Karsz ETHTOOL_COALESCE_USECS,
366366846048SRick Jones .get_drvinfo = virtnet_get_drvinfo,
36649f4d26d0SMark McLoughlin .get_link = ethtool_op_get_link,
36658f9f4668SRick Jones .get_ringparam = virtnet_get_ringparam,
3666a335b33fSXuan Zhuo .set_ringparam = virtnet_set_ringparam,
3667d7dfc5cfSToshiaki Makita .get_strings = virtnet_get_strings,
3668d7dfc5cfSToshiaki Makita .get_sset_count = virtnet_get_sset_count,
3669d7dfc5cfSToshiaki Makita .get_ethtool_stats = virtnet_get_ethtool_stats,
3670d73bcd2cSJason Wang .set_channels = virtnet_set_channels,
3671d73bcd2cSJason Wang .get_channels = virtnet_get_channels,
3672074c3582SJacob Keller .get_ts_info = ethtool_op_get_ts_info,
3673ebb6b4b1SPhilippe Reynes .get_link_ksettings = virtnet_get_link_ksettings,
3674ebb6b4b1SPhilippe Reynes .set_link_ksettings = virtnet_set_link_ksettings,
36750c465be1SJason Wang .set_coalesce = virtnet_set_coalesce,
36760c465be1SJason Wang .get_coalesce = virtnet_get_coalesce,
3677394bd877SGavin Li .set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
3678394bd877SGavin Li .get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
3679c7114b12SAndrew Melnychenko .get_rxfh_key_size = virtnet_get_rxfh_key_size,
3680c7114b12SAndrew Melnychenko .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3681c7114b12SAndrew Melnychenko .get_rxfh = virtnet_get_rxfh,
3682c7114b12SAndrew Melnychenko .set_rxfh = virtnet_set_rxfh,
3683c7114b12SAndrew Melnychenko .get_rxnfc = virtnet_get_rxnfc,
3684c1170820SAndrew Melnychenko .set_rxnfc = virtnet_set_rxnfc,
3685a9ea3fc6SHerbert Xu };
3686a9ea3fc6SHerbert Xu
virtnet_freeze_down(struct virtio_device * vdev)36879fe7bfceSJohn Fastabend static void virtnet_freeze_down(struct virtio_device *vdev)
36889fe7bfceSJohn Fastabend {
36899fe7bfceSJohn Fastabend struct virtnet_info *vi = vdev->priv;
36909fe7bfceSJohn Fastabend
36919fe7bfceSJohn Fastabend /* Make sure no work handler is accessing the device */
36929fe7bfceSJohn Fastabend flush_work(&vi->config_work);
36939fe7bfceSJohn Fastabend
369405c998b7SAke Koomsin netif_tx_lock_bh(vi->dev);
36959fe7bfceSJohn Fastabend netif_device_detach(vi->dev);
369605c998b7SAke Koomsin netif_tx_unlock_bh(vi->dev);
36978af52fe9SStephan Gerhold if (netif_running(vi->dev))
36988af52fe9SStephan Gerhold virtnet_close(vi->dev);
36999fe7bfceSJohn Fastabend }
37009fe7bfceSJohn Fastabend
37019fe7bfceSJohn Fastabend static int init_vqs(struct virtnet_info *vi);
37029fe7bfceSJohn Fastabend
virtnet_restore_up(struct virtio_device * vdev)37039fe7bfceSJohn Fastabend static int virtnet_restore_up(struct virtio_device *vdev)
37049fe7bfceSJohn Fastabend {
37059fe7bfceSJohn Fastabend struct virtnet_info *vi = vdev->priv;
37068af52fe9SStephan Gerhold int err;
37079fe7bfceSJohn Fastabend
37089fe7bfceSJohn Fastabend err = init_vqs(vi);
37099fe7bfceSJohn Fastabend if (err)
37109fe7bfceSJohn Fastabend return err;
37119fe7bfceSJohn Fastabend
37129fe7bfceSJohn Fastabend virtio_device_ready(vdev);
37139fe7bfceSJohn Fastabend
37145a159128SJason Wang enable_delayed_refill(vi);
37155a159128SJason Wang
37169fe7bfceSJohn Fastabend if (netif_running(vi->dev)) {
37178af52fe9SStephan Gerhold err = virtnet_open(vi->dev);
37188af52fe9SStephan Gerhold if (err)
37198af52fe9SStephan Gerhold return err;
37209fe7bfceSJohn Fastabend }
37219fe7bfceSJohn Fastabend
372205c998b7SAke Koomsin netif_tx_lock_bh(vi->dev);
37239fe7bfceSJohn Fastabend netif_device_attach(vi->dev);
372405c998b7SAke Koomsin netif_tx_unlock_bh(vi->dev);
37259fe7bfceSJohn Fastabend return err;
37269fe7bfceSJohn Fastabend }
37279fe7bfceSJohn Fastabend
virtnet_set_guest_offloads(struct virtnet_info * vi,u64 offloads)37283f93522fSJason Wang static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
37293f93522fSJason Wang {
37303f93522fSJason Wang struct scatterlist sg;
373112e57169SMichael S. Tsirkin vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
37323f93522fSJason Wang
373312e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
37343f93522fSJason Wang
37353f93522fSJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
37363f93522fSJason Wang VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
37373f93522fSJason Wang dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
37383f93522fSJason Wang return -EINVAL;
37393f93522fSJason Wang }
37403f93522fSJason Wang
37413f93522fSJason Wang return 0;
37423f93522fSJason Wang }
37433f93522fSJason Wang
virtnet_clear_guest_offloads(struct virtnet_info * vi)37443f93522fSJason Wang static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
37453f93522fSJason Wang {
37463f93522fSJason Wang u64 offloads = 0;
37473f93522fSJason Wang
37483f93522fSJason Wang if (!vi->guest_offloads)
37493f93522fSJason Wang return 0;
37503f93522fSJason Wang
37513f93522fSJason Wang return virtnet_set_guest_offloads(vi, offloads);
37523f93522fSJason Wang }
37533f93522fSJason Wang
virtnet_restore_guest_offloads(struct virtnet_info * vi)37543f93522fSJason Wang static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
37553f93522fSJason Wang {
37563f93522fSJason Wang u64 offloads = vi->guest_offloads;
37573f93522fSJason Wang
37583f93522fSJason Wang if (!vi->guest_offloads)
37593f93522fSJason Wang return 0;
37603f93522fSJason Wang
37613f93522fSJason Wang return virtnet_set_guest_offloads(vi, offloads);
37623f93522fSJason Wang }
37633f93522fSJason Wang
virtnet_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)37649861ce03SJakub Kicinski static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
37659861ce03SJakub Kicinski struct netlink_ext_ack *extack)
3766f600b690SJohn Fastabend {
3767e814b958SHeng Qi unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3768e814b958SHeng Qi sizeof(struct skb_shared_info));
3769e814b958SHeng Qi unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3770f600b690SJohn Fastabend struct virtnet_info *vi = netdev_priv(dev);
3771f600b690SJohn Fastabend struct bpf_prog *old_prog;
3772017b29c3SJason Wang u16 xdp_qp = 0, curr_qp;
3773672aafd5SJohn Fastabend int i, err;
3774f600b690SJohn Fastabend
37753f93522fSJason Wang if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
37763f93522fSJason Wang && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
377792502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
377892502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
377918ba58e1SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3780418044e1SAndrew Melnychenko virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3781418044e1SAndrew Melnychenko virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3782418044e1SAndrew Melnychenko virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3783dbcf24d1SJason Wang NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
3784f600b690SJohn Fastabend return -EOPNOTSUPP;
3785f600b690SJohn Fastabend }
3786f600b690SJohn Fastabend
3787f600b690SJohn Fastabend if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
37884d463c4dSDaniel Borkmann NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3789f600b690SJohn Fastabend return -EINVAL;
3790f600b690SJohn Fastabend }
3791f600b690SJohn Fastabend
37928d9bc36dSHeng Qi if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
37938d9bc36dSHeng Qi NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
37948d9bc36dSHeng Qi netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3795f600b690SJohn Fastabend return -EINVAL;
3796f600b690SJohn Fastabend }
3797f600b690SJohn Fastabend
3798672aafd5SJohn Fastabend curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3799672aafd5SJohn Fastabend if (prog)
3800672aafd5SJohn Fastabend xdp_qp = nr_cpu_ids;
3801672aafd5SJohn Fastabend
3802672aafd5SJohn Fastabend /* XDP requires extra queues for XDP_TX */
3803672aafd5SJohn Fastabend if (curr_qp + xdp_qp > vi->max_queue_pairs) {
38049ce4e3d6SXuan Zhuo netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3805672aafd5SJohn Fastabend curr_qp + xdp_qp, vi->max_queue_pairs);
380697c2c69eSXuan Zhuo xdp_qp = 0;
3807672aafd5SJohn Fastabend }
3808672aafd5SJohn Fastabend
380903aa6d34SToshiaki Makita old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
381003aa6d34SToshiaki Makita if (!prog && !old_prog)
381103aa6d34SToshiaki Makita return 0;
381203aa6d34SToshiaki Makita
381385192dbfSAndrii Nakryiko if (prog)
381485192dbfSAndrii Nakryiko bpf_prog_add(prog, vi->max_queue_pairs - 1);
38152de2f7f4SJohn Fastabend
38164941d472SJason Wang /* Make sure NAPI is not using any XDP TX queues for RX. */
3817534da5e8SToshiaki Makita if (netif_running(dev)) {
3818534da5e8SToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) {
38194941d472SJason Wang napi_disable(&vi->rq[i].napi);
3820534da5e8SToshiaki Makita virtnet_napi_tx_disable(&vi->sq[i].napi);
3821534da5e8SToshiaki Makita }
3822534da5e8SToshiaki Makita }
38232de2f7f4SJohn Fastabend
382403aa6d34SToshiaki Makita if (!prog) {
382503aa6d34SToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) {
382603aa6d34SToshiaki Makita rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
382703aa6d34SToshiaki Makita if (i == 0)
382803aa6d34SToshiaki Makita virtnet_restore_guest_offloads(vi);
382903aa6d34SToshiaki Makita }
383003aa6d34SToshiaki Makita synchronize_net();
383103aa6d34SToshiaki Makita }
383203aa6d34SToshiaki Makita
38334941d472SJason Wang err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
38344941d472SJason Wang if (err)
38354941d472SJason Wang goto err;
3836188313c1SToshiaki Makita netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
38374941d472SJason Wang vi->xdp_queue_pairs = xdp_qp;
3838f600b690SJohn Fastabend
383903aa6d34SToshiaki Makita if (prog) {
384097c2c69eSXuan Zhuo vi->xdp_enabled = true;
3841f600b690SJohn Fastabend for (i = 0; i < vi->max_queue_pairs; i++) {
3842f600b690SJohn Fastabend rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
384303aa6d34SToshiaki Makita if (i == 0 && !old_prog)
38443f93522fSJason Wang virtnet_clear_guest_offloads(vi);
38453f93522fSJason Wang }
384666c0e13aSMarek Majtyka if (!old_prog)
384730bbf891SLorenzo Bianconi xdp_features_set_redirect_target(dev, true);
384897c2c69eSXuan Zhuo } else {
384966c0e13aSMarek Majtyka xdp_features_clear_redirect_target(dev);
385097c2c69eSXuan Zhuo vi->xdp_enabled = false;
385103aa6d34SToshiaki Makita }
385203aa6d34SToshiaki Makita
385303aa6d34SToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) {
3854f600b690SJohn Fastabend if (old_prog)
3855f600b690SJohn Fastabend bpf_prog_put(old_prog);
3856534da5e8SToshiaki Makita if (netif_running(dev)) {
38574941d472SJason Wang virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3858534da5e8SToshiaki Makita virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3859534da5e8SToshiaki Makita &vi->sq[i].napi);
3860534da5e8SToshiaki Makita }
3861f600b690SJohn Fastabend }
3862f600b690SJohn Fastabend
3863f600b690SJohn Fastabend return 0;
38642de2f7f4SJohn Fastabend
38654941d472SJason Wang err:
386603aa6d34SToshiaki Makita if (!prog) {
386703aa6d34SToshiaki Makita virtnet_clear_guest_offloads(vi);
38684941d472SJason Wang for (i = 0; i < vi->max_queue_pairs; i++)
386903aa6d34SToshiaki Makita rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
387003aa6d34SToshiaki Makita }
387103aa6d34SToshiaki Makita
38728be4d9a4SToshiaki Makita if (netif_running(dev)) {
3873534da5e8SToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) {
38744941d472SJason Wang virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3875534da5e8SToshiaki Makita virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3876534da5e8SToshiaki Makita &vi->sq[i].napi);
3877534da5e8SToshiaki Makita }
38788be4d9a4SToshiaki Makita }
38792de2f7f4SJohn Fastabend if (prog)
38802de2f7f4SJohn Fastabend bpf_prog_sub(prog, vi->max_queue_pairs - 1);
38812de2f7f4SJohn Fastabend return err;
3882f600b690SJohn Fastabend }
3883f600b690SJohn Fastabend
virtnet_xdp(struct net_device * dev,struct netdev_bpf * xdp)3884f4e63525SJakub Kicinski static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3885f600b690SJohn Fastabend {
3886f600b690SJohn Fastabend switch (xdp->command) {
3887f600b690SJohn Fastabend case XDP_SETUP_PROG:
38889861ce03SJakub Kicinski return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
3889f600b690SJohn Fastabend default:
3890f600b690SJohn Fastabend return -EINVAL;
3891f600b690SJohn Fastabend }
3892f600b690SJohn Fastabend }
3893f600b690SJohn Fastabend
virtnet_get_phys_port_name(struct net_device * dev,char * buf,size_t len)3894ba5e4426SSridhar Samudrala static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
3895ba5e4426SSridhar Samudrala size_t len)
3896ba5e4426SSridhar Samudrala {
3897ba5e4426SSridhar Samudrala struct virtnet_info *vi = netdev_priv(dev);
3898ba5e4426SSridhar Samudrala int ret;
3899ba5e4426SSridhar Samudrala
3900ba5e4426SSridhar Samudrala if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3901ba5e4426SSridhar Samudrala return -EOPNOTSUPP;
3902ba5e4426SSridhar Samudrala
3903ba5e4426SSridhar Samudrala ret = snprintf(buf, len, "sby");
3904ba5e4426SSridhar Samudrala if (ret >= len)
3905ba5e4426SSridhar Samudrala return -EOPNOTSUPP;
3906ba5e4426SSridhar Samudrala
3907ba5e4426SSridhar Samudrala return 0;
3908ba5e4426SSridhar Samudrala }
3909ba5e4426SSridhar Samudrala
virtnet_set_features(struct net_device * dev,netdev_features_t features)3910a02e8964SWillem de Bruijn static int virtnet_set_features(struct net_device *dev,
3911a02e8964SWillem de Bruijn netdev_features_t features)
3912a02e8964SWillem de Bruijn {
3913a02e8964SWillem de Bruijn struct virtnet_info *vi = netdev_priv(dev);
3914cf8691cbSMichael S. Tsirkin u64 offloads;
3915a02e8964SWillem de Bruijn int err;
3916a02e8964SWillem de Bruijn
3917dbcf24d1SJason Wang if ((dev->features ^ features) & NETIF_F_GRO_HW) {
391897c2c69eSXuan Zhuo if (vi->xdp_enabled)
3919a02e8964SWillem de Bruijn return -EBUSY;
3920a02e8964SWillem de Bruijn
3921dbcf24d1SJason Wang if (features & NETIF_F_GRO_HW)
3922cf8691cbSMichael S. Tsirkin offloads = vi->guest_offloads_capable;
3923a02e8964SWillem de Bruijn else
3924cf8691cbSMichael S. Tsirkin offloads = vi->guest_offloads_capable &
3925dbcf24d1SJason Wang ~GUEST_OFFLOAD_GRO_HW_MASK;
3926a02e8964SWillem de Bruijn
3927a02e8964SWillem de Bruijn err = virtnet_set_guest_offloads(vi, offloads);
3928a02e8964SWillem de Bruijn if (err)
3929a02e8964SWillem de Bruijn return err;
39303618ad2aSTonghao Zhang vi->guest_offloads = offloads;
3931cf8691cbSMichael S. Tsirkin }
3932cf8691cbSMichael S. Tsirkin
3933c7114b12SAndrew Melnychenko if ((dev->features ^ features) & NETIF_F_RXHASH) {
3934c7114b12SAndrew Melnychenko if (features & NETIF_F_RXHASH)
3935c1170820SAndrew Melnychenko vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3936c7114b12SAndrew Melnychenko else
3937c7114b12SAndrew Melnychenko vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
3938c7114b12SAndrew Melnychenko
3939c7114b12SAndrew Melnychenko if (!virtnet_commit_rss_command(vi))
3940c7114b12SAndrew Melnychenko return -EINVAL;
3941c7114b12SAndrew Melnychenko }
3942c7114b12SAndrew Melnychenko
3943a02e8964SWillem de Bruijn return 0;
3944a02e8964SWillem de Bruijn }
3945a02e8964SWillem de Bruijn
virtnet_tx_timeout(struct net_device * dev,unsigned int txqueue)3946a520794bSTony Lu static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
3947a520794bSTony Lu {
3948a520794bSTony Lu struct virtnet_info *priv = netdev_priv(dev);
3949a520794bSTony Lu struct send_queue *sq = &priv->sq[txqueue];
3950a520794bSTony Lu struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
3951a520794bSTony Lu
3952a520794bSTony Lu u64_stats_update_begin(&sq->stats.syncp);
395327debe3eSEric Dumazet u64_stats_inc(&sq->stats.tx_timeouts);
3954a520794bSTony Lu u64_stats_update_end(&sq->stats.syncp);
3955a520794bSTony Lu
3956a520794bSTony Lu netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
3957a520794bSTony Lu txqueue, sq->name, sq->vq->index, sq->vq->name,
39585337824fSEric Dumazet jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
3959a520794bSTony Lu }
3960a520794bSTony Lu
396176288b4eSStephen Hemminger static const struct net_device_ops virtnet_netdev = {
396276288b4eSStephen Hemminger .ndo_open = virtnet_open,
396376288b4eSStephen Hemminger .ndo_stop = virtnet_close,
396476288b4eSStephen Hemminger .ndo_start_xmit = start_xmit,
396576288b4eSStephen Hemminger .ndo_validate_addr = eth_validate_addr,
39669c46f6d4SAlex Williamson .ndo_set_mac_address = virtnet_set_mac_address,
39672af7698eSAlex Williamson .ndo_set_rx_mode = virtnet_set_rx_mode,
39683fa2a1dfSstephen hemminger .ndo_get_stats64 = virtnet_stats,
39691824a989SAlex Williamson .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
39701824a989SAlex Williamson .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
3971f4e63525SJakub Kicinski .ndo_bpf = virtnet_xdp,
3972186b3c99SJason Wang .ndo_xdp_xmit = virtnet_xdp_xmit,
39732836b4f2SVlad Yasevich .ndo_features_check = passthru_features_check,
3974ba5e4426SSridhar Samudrala .ndo_get_phys_port_name = virtnet_get_phys_port_name,
3975a02e8964SWillem de Bruijn .ndo_set_features = virtnet_set_features,
3976a520794bSTony Lu .ndo_tx_timeout = virtnet_tx_timeout,
397776288b4eSStephen Hemminger };
397876288b4eSStephen Hemminger
virtnet_config_changed_work(struct work_struct * work)3979586d17c5SJason Wang static void virtnet_config_changed_work(struct work_struct *work)
39809f4d26d0SMark McLoughlin {
3981586d17c5SJason Wang struct virtnet_info *vi =
3982586d17c5SJason Wang container_of(work, struct virtnet_info, config_work);
39839f4d26d0SMark McLoughlin u16 v;
39849f4d26d0SMark McLoughlin
3985855e0c52SRusty Russell if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
3986855e0c52SRusty Russell struct virtio_net_config, status, &v) < 0)
3987507613bfSMichael S. Tsirkin return;
3988586d17c5SJason Wang
3989586d17c5SJason Wang if (v & VIRTIO_NET_S_ANNOUNCE) {
3990ee89bab1SAmerigo Wang netdev_notify_peers(vi->dev);
3991586d17c5SJason Wang virtnet_ack_link_announce(vi);
3992586d17c5SJason Wang }
39939f4d26d0SMark McLoughlin
39949f4d26d0SMark McLoughlin /* Ignore unknown (future) status bits */
39959f4d26d0SMark McLoughlin v &= VIRTIO_NET_S_LINK_UP;
39969f4d26d0SMark McLoughlin
39979f4d26d0SMark McLoughlin if (vi->status == v)
3998507613bfSMichael S. Tsirkin return;
39999f4d26d0SMark McLoughlin
40009f4d26d0SMark McLoughlin vi->status = v;
40019f4d26d0SMark McLoughlin
40029f4d26d0SMark McLoughlin if (vi->status & VIRTIO_NET_S_LINK_UP) {
4003faa9b39fSJason Baron virtnet_update_settings(vi);
40049f4d26d0SMark McLoughlin netif_carrier_on(vi->dev);
4005986a4f4dSJason Wang netif_tx_wake_all_queues(vi->dev);
40069f4d26d0SMark McLoughlin } else {
40079f4d26d0SMark McLoughlin netif_carrier_off(vi->dev);
4008986a4f4dSJason Wang netif_tx_stop_all_queues(vi->dev);
40099f4d26d0SMark McLoughlin }
40109f4d26d0SMark McLoughlin }
40119f4d26d0SMark McLoughlin
virtnet_config_changed(struct virtio_device * vdev)40129f4d26d0SMark McLoughlin static void virtnet_config_changed(struct virtio_device *vdev)
40139f4d26d0SMark McLoughlin {
40149f4d26d0SMark McLoughlin struct virtnet_info *vi = vdev->priv;
40159f4d26d0SMark McLoughlin
40163b07e9caSTejun Heo schedule_work(&vi->config_work);
40179f4d26d0SMark McLoughlin }
40189f4d26d0SMark McLoughlin
virtnet_free_queues(struct virtnet_info * vi)4019986a4f4dSJason Wang static void virtnet_free_queues(struct virtnet_info *vi)
4020986a4f4dSJason Wang {
4021d4fb84eeSAndrey Vagin int i;
4022d4fb84eeSAndrey Vagin
4023ab3971b1SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) {
40245198d545SJakub Kicinski __netif_napi_del(&vi->rq[i].napi);
40255198d545SJakub Kicinski __netif_napi_del(&vi->sq[i].napi);
4026ab3971b1SJason Wang }
4027d4fb84eeSAndrey Vagin
40285198d545SJakub Kicinski /* We called __netif_napi_del(),
4029963abe5cSEric Dumazet * we need to respect an RCU grace period before freeing vi->rq
4030963abe5cSEric Dumazet */
4031963abe5cSEric Dumazet synchronize_net();
4032963abe5cSEric Dumazet
4033986a4f4dSJason Wang kfree(vi->rq);
4034986a4f4dSJason Wang kfree(vi->sq);
403512e57169SMichael S. Tsirkin kfree(vi->ctrl);
4036986a4f4dSJason Wang }
4037986a4f4dSJason Wang
_free_receive_bufs(struct virtnet_info * vi)403847315329SJohn Fastabend static void _free_receive_bufs(struct virtnet_info *vi)
4039986a4f4dSJason Wang {
4040f600b690SJohn Fastabend struct bpf_prog *old_prog;
4041986a4f4dSJason Wang int i;
4042986a4f4dSJason Wang
4043986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) {
4044986a4f4dSJason Wang while (vi->rq[i].pages)
4045986a4f4dSJason Wang __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
4046f600b690SJohn Fastabend
4047f600b690SJohn Fastabend old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
4048f600b690SJohn Fastabend RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
4049f600b690SJohn Fastabend if (old_prog)
4050f600b690SJohn Fastabend bpf_prog_put(old_prog);
4051986a4f4dSJason Wang }
405247315329SJohn Fastabend }
405347315329SJohn Fastabend
free_receive_bufs(struct virtnet_info * vi)405447315329SJohn Fastabend static void free_receive_bufs(struct virtnet_info *vi)
405547315329SJohn Fastabend {
405647315329SJohn Fastabend rtnl_lock();
405747315329SJohn Fastabend _free_receive_bufs(vi);
4058f600b690SJohn Fastabend rtnl_unlock();
4059986a4f4dSJason Wang }
4060986a4f4dSJason Wang
free_receive_page_frags(struct virtnet_info * vi)4061fb51879dSMichael Dalton static void free_receive_page_frags(struct virtnet_info *vi)
4062fb51879dSMichael Dalton {
4063fb51879dSMichael Dalton int i;
4064fb51879dSMichael Dalton for (i = 0; i < vi->max_queue_pairs; i++)
4065295525e2SXuan Zhuo if (vi->rq[i].alloc_frag.page) {
4066295525e2SXuan Zhuo if (vi->rq[i].do_dma && vi->rq[i].last_dma)
4067295525e2SXuan Zhuo virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
4068fb51879dSMichael Dalton put_page(vi->rq[i].alloc_frag.page);
4069fb51879dSMichael Dalton }
4070295525e2SXuan Zhuo }
4071fb51879dSMichael Dalton
virtnet_sq_free_unused_buf(struct virtqueue * vq,void * buf)40726e345f8cSXuan Zhuo static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
40736e345f8cSXuan Zhuo {
40746e345f8cSXuan Zhuo if (!is_xdp_frame(buf))
40756e345f8cSXuan Zhuo dev_kfree_skb(buf);
40766e345f8cSXuan Zhuo else
40776e345f8cSXuan Zhuo xdp_return_frame(ptr_to_xdp(buf));
40786e345f8cSXuan Zhuo }
40796e345f8cSXuan Zhuo
free_unused_bufs(struct virtnet_info * vi)4080986a4f4dSJason Wang static void free_unused_bufs(struct virtnet_info *vi)
4081986a4f4dSJason Wang {
4082986a4f4dSJason Wang void *buf;
4083986a4f4dSJason Wang int i;
4084986a4f4dSJason Wang
4085986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) {
4086986a4f4dSJason Wang struct virtqueue *vq = vi->sq[i].vq;
40876e345f8cSXuan Zhuo while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
40886e345f8cSXuan Zhuo virtnet_sq_free_unused_buf(vq, buf);
4089f8bb5104SWenliang Wang cond_resched();
4090986a4f4dSJason Wang }
4091986a4f4dSJason Wang
4092986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) {
40933ffd05c2SXuan Zhuo struct virtqueue *vq = vi->rq[i].vq;
4094295525e2SXuan Zhuo
40953ffd05c2SXuan Zhuo while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
40963ffd05c2SXuan Zhuo virtnet_rq_unmap_free_buf(vq, buf);
4097f8bb5104SWenliang Wang cond_resched();
4098986a4f4dSJason Wang }
4099ab7db917SMichael Dalton }
4100986a4f4dSJason Wang
virtnet_del_vqs(struct virtnet_info * vi)4101e9d7417bSJason Wang static void virtnet_del_vqs(struct virtnet_info *vi)
4102e9d7417bSJason Wang {
4103e9d7417bSJason Wang struct virtio_device *vdev = vi->vdev;
4104e9d7417bSJason Wang
4105310974faSPeter Xu virtnet_clean_affinity(vi);
4106986a4f4dSJason Wang
4107e9d7417bSJason Wang vdev->config->del_vqs(vdev);
4108986a4f4dSJason Wang
4109986a4f4dSJason Wang virtnet_free_queues(vi);
4110986a4f4dSJason Wang }
4111986a4f4dSJason Wang
4112d85b758fSMichael S. Tsirkin /* How large should a single buffer be so a queue full of these can fit at
4113d85b758fSMichael S. Tsirkin * least one full packet?
4114d85b758fSMichael S. Tsirkin * Logic below assumes the mergeable buffer header is used.
4115d85b758fSMichael S. Tsirkin */
mergeable_min_buf_len(struct virtnet_info * vi,struct virtqueue * vq)4116d85b758fSMichael S. Tsirkin static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
4117d85b758fSMichael S. Tsirkin {
4118c1ddc42dSAndrew Melnychenko const unsigned int hdr_len = vi->hdr_len;
4119d85b758fSMichael S. Tsirkin unsigned int rq_size = virtqueue_get_vring_size(vq);
4120d85b758fSMichael S. Tsirkin unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
4121d85b758fSMichael S. Tsirkin unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
4122d85b758fSMichael S. Tsirkin unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
4123d85b758fSMichael S. Tsirkin
4124f0c3192cSMichael S. Tsirkin return max(max(min_buf_len, hdr_len) - hdr_len,
4125f0c3192cSMichael S. Tsirkin (unsigned int)GOOD_PACKET_LEN);
4126d85b758fSMichael S. Tsirkin }
4127d85b758fSMichael S. Tsirkin
virtnet_find_vqs(struct virtnet_info * vi)4128986a4f4dSJason Wang static int virtnet_find_vqs(struct virtnet_info *vi)
4129986a4f4dSJason Wang {
4130986a4f4dSJason Wang vq_callback_t **callbacks;
4131986a4f4dSJason Wang struct virtqueue **vqs;
4132986a4f4dSJason Wang const char **names;
413320e81d2cSZhu Yanjun int ret = -ENOMEM;
413420e81d2cSZhu Yanjun int total_vqs;
4135d45b897bSMichael S. Tsirkin bool *ctx;
413620e81d2cSZhu Yanjun u16 i;
4137986a4f4dSJason Wang
4138986a4f4dSJason Wang /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
4139986a4f4dSJason Wang * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
4140986a4f4dSJason Wang * possible control vq.
4141986a4f4dSJason Wang */
4142986a4f4dSJason Wang total_vqs = vi->max_queue_pairs * 2 +
4143986a4f4dSJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
4144986a4f4dSJason Wang
4145986a4f4dSJason Wang /* Allocate space for find_vqs parameters */
41466396bb22SKees Cook vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
4147986a4f4dSJason Wang if (!vqs)
4148986a4f4dSJason Wang goto err_vq;
41496da2ec56SKees Cook callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
4150986a4f4dSJason Wang if (!callbacks)
4151986a4f4dSJason Wang goto err_callback;
41526da2ec56SKees Cook names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
4153986a4f4dSJason Wang if (!names)
4154986a4f4dSJason Wang goto err_names;
4155192f68cfSJason Wang if (!vi->big_packets || vi->mergeable_rx_bufs) {
41566396bb22SKees Cook ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
4157d45b897bSMichael S. Tsirkin if (!ctx)
4158d45b897bSMichael S. Tsirkin goto err_ctx;
4159d45b897bSMichael S. Tsirkin } else {
4160d45b897bSMichael S. Tsirkin ctx = NULL;
4161d45b897bSMichael S. Tsirkin }
4162986a4f4dSJason Wang
4163986a4f4dSJason Wang /* Parameters for control virtqueue, if any */
4164986a4f4dSJason Wang if (vi->has_cvq) {
4165986a4f4dSJason Wang callbacks[total_vqs - 1] = NULL;
4166986a4f4dSJason Wang names[total_vqs - 1] = "control";
4167986a4f4dSJason Wang }
4168986a4f4dSJason Wang
4169986a4f4dSJason Wang /* Allocate/initialize parameters for send/receive virtqueues */
4170986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) {
4171986a4f4dSJason Wang callbacks[rxq2vq(i)] = skb_recv_done;
4172986a4f4dSJason Wang callbacks[txq2vq(i)] = skb_xmit_done;
417320e81d2cSZhu Yanjun sprintf(vi->rq[i].name, "input.%u", i);
417420e81d2cSZhu Yanjun sprintf(vi->sq[i].name, "output.%u", i);
4175986a4f4dSJason Wang names[rxq2vq(i)] = vi->rq[i].name;
4176986a4f4dSJason Wang names[txq2vq(i)] = vi->sq[i].name;
4177d45b897bSMichael S. Tsirkin if (ctx)
4178d45b897bSMichael S. Tsirkin ctx[rxq2vq(i)] = true;
4179986a4f4dSJason Wang }
4180986a4f4dSJason Wang
41812e9ca760SMichael S. Tsirkin ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
41822e9ca760SMichael S. Tsirkin names, ctx, NULL);
4183986a4f4dSJason Wang if (ret)
4184986a4f4dSJason Wang goto err_find;
4185986a4f4dSJason Wang
4186986a4f4dSJason Wang if (vi->has_cvq) {
4187986a4f4dSJason Wang vi->cvq = vqs[total_vqs - 1];
4188986a4f4dSJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
4189f646968fSPatrick McHardy vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4190986a4f4dSJason Wang }
4191986a4f4dSJason Wang
4192986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) {
4193986a4f4dSJason Wang vi->rq[i].vq = vqs[rxq2vq(i)];
4194d85b758fSMichael S. Tsirkin vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
4195986a4f4dSJason Wang vi->sq[i].vq = vqs[txq2vq(i)];
4196986a4f4dSJason Wang }
4197986a4f4dSJason Wang
41982fa3c8a8STonghao Zhang /* run here: ret == 0. */
4199986a4f4dSJason Wang
4200986a4f4dSJason Wang
4201986a4f4dSJason Wang err_find:
4202d45b897bSMichael S. Tsirkin kfree(ctx);
4203d45b897bSMichael S. Tsirkin err_ctx:
4204986a4f4dSJason Wang kfree(names);
4205986a4f4dSJason Wang err_names:
4206986a4f4dSJason Wang kfree(callbacks);
4207986a4f4dSJason Wang err_callback:
4208986a4f4dSJason Wang kfree(vqs);
4209986a4f4dSJason Wang err_vq:
4210986a4f4dSJason Wang return ret;
4211986a4f4dSJason Wang }
4212986a4f4dSJason Wang
virtnet_alloc_queues(struct virtnet_info * vi)4213986a4f4dSJason Wang static int virtnet_alloc_queues(struct virtnet_info *vi)
4214986a4f4dSJason Wang {
4215986a4f4dSJason Wang int i;
4216986a4f4dSJason Wang
4217122b84a1SMax Gurtovoy if (vi->has_cvq) {
421812e57169SMichael S. Tsirkin vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
421912e57169SMichael S. Tsirkin if (!vi->ctrl)
422012e57169SMichael S. Tsirkin goto err_ctrl;
4221122b84a1SMax Gurtovoy } else {
4222122b84a1SMax Gurtovoy vi->ctrl = NULL;
4223122b84a1SMax Gurtovoy }
42246396bb22SKees Cook vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
4225986a4f4dSJason Wang if (!vi->sq)
4226986a4f4dSJason Wang goto err_sq;
42276396bb22SKees Cook vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
4228008d4278SAmerigo Wang if (!vi->rq)
4229986a4f4dSJason Wang goto err_rq;
4230986a4f4dSJason Wang
4231986a4f4dSJason Wang INIT_DELAYED_WORK(&vi->refill, refill_work);
4232986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) {
4233986a4f4dSJason Wang vi->rq[i].pages = NULL;
4234d484735dSJakub Kicinski netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
4235986a4f4dSJason Wang napi_weight);
42368d602e1aSJakub Kicinski netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
42378d602e1aSJakub Kicinski virtnet_poll_tx,
4238b92f1e67SWillem de Bruijn napi_tx ? napi_weight : 0);
4239986a4f4dSJason Wang
4240986a4f4dSJason Wang sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
42415377d758SJohannes Berg ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
4242986a4f4dSJason Wang sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
4243d7dfc5cfSToshiaki Makita
4244d7dfc5cfSToshiaki Makita u64_stats_init(&vi->rq[i].stats.syncp);
4245d7dfc5cfSToshiaki Makita u64_stats_init(&vi->sq[i].stats.syncp);
4246986a4f4dSJason Wang }
4247986a4f4dSJason Wang
4248986a4f4dSJason Wang return 0;
4249986a4f4dSJason Wang
4250986a4f4dSJason Wang err_rq:
4251986a4f4dSJason Wang kfree(vi->sq);
4252986a4f4dSJason Wang err_sq:
425312e57169SMichael S. Tsirkin kfree(vi->ctrl);
425412e57169SMichael S. Tsirkin err_ctrl:
4255986a4f4dSJason Wang return -ENOMEM;
4256e9d7417bSJason Wang }
4257e9d7417bSJason Wang
init_vqs(struct virtnet_info * vi)42583f9c10b0SAmit Shah static int init_vqs(struct virtnet_info *vi)
42593f9c10b0SAmit Shah {
4260986a4f4dSJason Wang int ret;
42613f9c10b0SAmit Shah
4262986a4f4dSJason Wang /* Allocate send & receive queues */
4263986a4f4dSJason Wang ret = virtnet_alloc_queues(vi);
4264986a4f4dSJason Wang if (ret)
4265986a4f4dSJason Wang goto err;
42663f9c10b0SAmit Shah
4267986a4f4dSJason Wang ret = virtnet_find_vqs(vi);
4268986a4f4dSJason Wang if (ret)
4269986a4f4dSJason Wang goto err_free;
42703f9c10b0SAmit Shah
4271295525e2SXuan Zhuo virtnet_rq_set_premapped(vi);
4272295525e2SXuan Zhuo
4273a0d1d0f4SSebastian Andrzej Siewior cpus_read_lock();
42748898c21cSWanlong Gao virtnet_set_affinity(vi);
4275a0d1d0f4SSebastian Andrzej Siewior cpus_read_unlock();
427647be2479SWanlong Gao
42773f9c10b0SAmit Shah return 0;
4278986a4f4dSJason Wang
4279986a4f4dSJason Wang err_free:
4280986a4f4dSJason Wang virtnet_free_queues(vi);
4281986a4f4dSJason Wang err:
4282986a4f4dSJason Wang return ret;
42833f9c10b0SAmit Shah }
42843f9c10b0SAmit Shah
4285fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS
mergeable_rx_buffer_size_show(struct netdev_rx_queue * queue,char * buf)4286fbf28d78SMichael Dalton static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
4287718ad681Sstephen hemminger char *buf)
4288fbf28d78SMichael Dalton {
4289fbf28d78SMichael Dalton struct virtnet_info *vi = netdev_priv(queue->dev);
4290fbf28d78SMichael Dalton unsigned int queue_index = get_netdev_rx_queue_index(queue);
42913cc81a9aSJason Wang unsigned int headroom = virtnet_get_headroom(vi);
42923cc81a9aSJason Wang unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
42935377d758SJohannes Berg struct ewma_pkt_len *avg;
4294fbf28d78SMichael Dalton
4295fbf28d78SMichael Dalton BUG_ON(queue_index >= vi->max_queue_pairs);
4296fbf28d78SMichael Dalton avg = &vi->rq[queue_index].mrg_avg_pkt_len;
4297d85b758fSMichael S. Tsirkin return sprintf(buf, "%u\n",
42983cc81a9aSJason Wang get_mergeable_buf_len(&vi->rq[queue_index], avg,
42993cc81a9aSJason Wang SKB_DATA_ALIGN(headroom + tailroom)));
4300fbf28d78SMichael Dalton }
4301fbf28d78SMichael Dalton
4302fbf28d78SMichael Dalton static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
4303fbf28d78SMichael Dalton __ATTR_RO(mergeable_rx_buffer_size);
4304fbf28d78SMichael Dalton
4305fbf28d78SMichael Dalton static struct attribute *virtio_net_mrg_rx_attrs[] = {
4306fbf28d78SMichael Dalton &mergeable_rx_buffer_size_attribute.attr,
4307fbf28d78SMichael Dalton NULL
4308fbf28d78SMichael Dalton };
4309fbf28d78SMichael Dalton
4310fbf28d78SMichael Dalton static const struct attribute_group virtio_net_mrg_rx_group = {
4311fbf28d78SMichael Dalton .name = "virtio_net",
4312fbf28d78SMichael Dalton .attrs = virtio_net_mrg_rx_attrs
4313fbf28d78SMichael Dalton };
4314fbf28d78SMichael Dalton #endif
4315fbf28d78SMichael Dalton
virtnet_fail_on_feature(struct virtio_device * vdev,unsigned int fbit,const char * fname,const char * dname)4316892d6eb1SJason Wang static bool virtnet_fail_on_feature(struct virtio_device *vdev,
4317892d6eb1SJason Wang unsigned int fbit,
4318892d6eb1SJason Wang const char *fname, const char *dname)
4319892d6eb1SJason Wang {
4320892d6eb1SJason Wang if (!virtio_has_feature(vdev, fbit))
4321892d6eb1SJason Wang return false;
4322892d6eb1SJason Wang
4323892d6eb1SJason Wang dev_err(&vdev->dev, "device advertises feature %s but not %s",
4324892d6eb1SJason Wang fname, dname);
4325892d6eb1SJason Wang
4326892d6eb1SJason Wang return true;
4327892d6eb1SJason Wang }
4328892d6eb1SJason Wang
4329892d6eb1SJason Wang #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
4330892d6eb1SJason Wang virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
4331892d6eb1SJason Wang
virtnet_validate_features(struct virtio_device * vdev)4332892d6eb1SJason Wang static bool virtnet_validate_features(struct virtio_device *vdev)
4333892d6eb1SJason Wang {
4334892d6eb1SJason Wang if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
4335892d6eb1SJason Wang (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
4336892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") ||
4337892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
4338892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") ||
4339892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
4340892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") ||
4341892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
4342892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
4343c7114b12SAndrew Melnychenko "VIRTIO_NET_F_CTRL_VQ") ||
4344c7114b12SAndrew Melnychenko VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
434591f41f01SAndrew Melnychenko "VIRTIO_NET_F_CTRL_VQ") ||
434691f41f01SAndrew Melnychenko VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
4347699b045aSAlvaro Karsz "VIRTIO_NET_F_CTRL_VQ") ||
4348699b045aSAlvaro Karsz VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
43498af3bf66SGavin Li "VIRTIO_NET_F_CTRL_VQ") ||
43508af3bf66SGavin Li VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
4351892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ"))) {
4352892d6eb1SJason Wang return false;
4353892d6eb1SJason Wang }
4354892d6eb1SJason Wang
4355892d6eb1SJason Wang return true;
4356892d6eb1SJason Wang }
4357892d6eb1SJason Wang
4358d0c2c997SJarod Wilson #define MIN_MTU ETH_MIN_MTU
4359d0c2c997SJarod Wilson #define MAX_MTU ETH_MAX_MTU
4360d0c2c997SJarod Wilson
virtnet_validate(struct virtio_device * vdev)4361fe36cbe0SMichael S. Tsirkin static int virtnet_validate(struct virtio_device *vdev)
4362296f96fcSRusty Russell {
43636ba42248SMichael S. Tsirkin if (!vdev->config->get) {
43646ba42248SMichael S. Tsirkin dev_err(&vdev->dev, "%s failure: config access disabled\n",
43656ba42248SMichael S. Tsirkin __func__);
43666ba42248SMichael S. Tsirkin return -EINVAL;
43676ba42248SMichael S. Tsirkin }
43686ba42248SMichael S. Tsirkin
4369892d6eb1SJason Wang if (!virtnet_validate_features(vdev))
4370892d6eb1SJason Wang return -EINVAL;
4371892d6eb1SJason Wang
4372fe36cbe0SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4373fe36cbe0SMichael S. Tsirkin int mtu = virtio_cread16(vdev,
4374fe36cbe0SMichael S. Tsirkin offsetof(struct virtio_net_config,
4375fe36cbe0SMichael S. Tsirkin mtu));
4376fe36cbe0SMichael S. Tsirkin if (mtu < MIN_MTU)
4377fe36cbe0SMichael S. Tsirkin __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
4378fe36cbe0SMichael S. Tsirkin }
4379fe36cbe0SMichael S. Tsirkin
43807c06458cSLaurent Vivier if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
43817c06458cSLaurent Vivier !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
43827c06458cSLaurent Vivier dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
43837c06458cSLaurent Vivier __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
43847c06458cSLaurent Vivier }
43857c06458cSLaurent Vivier
4386fe36cbe0SMichael S. Tsirkin return 0;
4387fe36cbe0SMichael S. Tsirkin }
4388fe36cbe0SMichael S. Tsirkin
virtnet_check_guest_gso(const struct virtnet_info * vi)438946cd26f4SGavin Li static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
439046cd26f4SGavin Li {
439146cd26f4SGavin Li return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
439246cd26f4SGavin Li virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
439346cd26f4SGavin Li virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4394418044e1SAndrew Melnychenko virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4395418044e1SAndrew Melnychenko (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4396418044e1SAndrew Melnychenko virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
439746cd26f4SGavin Li }
439846cd26f4SGavin Li
virtnet_set_big_packets(struct virtnet_info * vi,const int mtu)43994959aebbSGavin Li static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
44004959aebbSGavin Li {
44014959aebbSGavin Li bool guest_gso = virtnet_check_guest_gso(vi);
44024959aebbSGavin Li
44034959aebbSGavin Li /* If device can receive ANY guest GSO packets, regardless of mtu,
44044959aebbSGavin Li * allocate packets of maximum size, otherwise limit it to only
44054959aebbSGavin Li * mtu size worth only.
44064959aebbSGavin Li */
44074959aebbSGavin Li if (mtu > ETH_DATA_LEN || guest_gso) {
44084959aebbSGavin Li vi->big_packets = true;
44094959aebbSGavin Li vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
44104959aebbSGavin Li }
44114959aebbSGavin Li }
44124959aebbSGavin Li
virtnet_probe(struct virtio_device * vdev)4413fe36cbe0SMichael S. Tsirkin static int virtnet_probe(struct virtio_device *vdev)
4414fe36cbe0SMichael S. Tsirkin {
4415d7dfc5cfSToshiaki Makita int i, err = -ENOMEM;
4416fe36cbe0SMichael S. Tsirkin struct net_device *dev;
4417fe36cbe0SMichael S. Tsirkin struct virtnet_info *vi;
4418fe36cbe0SMichael S. Tsirkin u16 max_queue_pairs;
44194959aebbSGavin Li int mtu = 0;
4420fe36cbe0SMichael S. Tsirkin
4421c7114b12SAndrew Melnychenko /* Find if host supports multiqueue/rss virtio_net device */
4422c7114b12SAndrew Melnychenko max_queue_pairs = 1;
4423c7114b12SAndrew Melnychenko if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4424c7114b12SAndrew Melnychenko max_queue_pairs =
4425c7114b12SAndrew Melnychenko virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
4426986a4f4dSJason Wang
4427986a4f4dSJason Wang /* We need at least 2 queue's */
4428c7114b12SAndrew Melnychenko if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
4429986a4f4dSJason Wang max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
4430986a4f4dSJason Wang !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4431986a4f4dSJason Wang max_queue_pairs = 1;
4432296f96fcSRusty Russell
4433296f96fcSRusty Russell /* Allocate ourselves a network device with room for our info */
4434986a4f4dSJason Wang dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
4435296f96fcSRusty Russell if (!dev)
4436296f96fcSRusty Russell return -ENOMEM;
4437296f96fcSRusty Russell
4438296f96fcSRusty Russell /* Set up network device as normal. */
4439ab5bd583SXuan Zhuo dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
4440ab5bd583SXuan Zhuo IFF_TX_SKB_NO_LINEAR;
444176288b4eSStephen Hemminger dev->netdev_ops = &virtnet_netdev;
4442296f96fcSRusty Russell dev->features = NETIF_F_HIGHDMA;
44433fa2a1dfSstephen hemminger
44447ad24ea4SWilfried Klaebe dev->ethtool_ops = &virtnet_ethtool_ops;
4445296f96fcSRusty Russell SET_NETDEV_DEV(dev, &vdev->dev);
4446296f96fcSRusty Russell
4447296f96fcSRusty Russell /* Do we support "hardware" checksums? */
444898e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
4449296f96fcSRusty Russell /* This opens up the world of extra features. */
445048900cb6SJason Wang dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
445198e778c9SMichał Mirosław if (csum)
445248900cb6SJason Wang dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
445398e778c9SMichał Mirosław
445498e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
4455e078de03SDavid S. Miller dev->hw_features |= NETIF_F_TSO
445634a48579SRusty Russell | NETIF_F_TSO_ECN | NETIF_F_TSO6;
445734a48579SRusty Russell }
44585539ae96SRusty Russell /* Individual feature bits: what can host handle? */
445998e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
446098e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO;
446198e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
446298e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO6;
446398e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
446498e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO_ECN;
4465418044e1SAndrew Melnychenko if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
4466418044e1SAndrew Melnychenko dev->hw_features |= NETIF_F_GSO_UDP_L4;
446798e778c9SMichał Mirosław
446841f2f127SJason Wang dev->features |= NETIF_F_GSO_ROBUST;
446941f2f127SJason Wang
447098e778c9SMichał Mirosław if (gso)
4471e078de03SDavid S. Miller dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
447298e778c9SMichał Mirosław /* (!csum && gso) case will be fixed by register_netdev() */
4473296f96fcSRusty Russell }
4474ef609fd7SHeng Qi
4475ef609fd7SHeng Qi /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
4476ef609fd7SHeng Qi * need to calculate checksums for partially checksummed packets,
4477ef609fd7SHeng Qi * as they're considered valid by the upper layer.
4478ef609fd7SHeng Qi * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
4479ef609fd7SHeng Qi * receives fully checksummed packets. The device may assist in
4480ef609fd7SHeng Qi * validating these packets' checksums, so the driver won't have to.
4481ef609fd7SHeng Qi */
44824f49129bSThomas Huth dev->features |= NETIF_F_RXCSUM;
4483ef609fd7SHeng Qi
4484a02e8964SWillem de Bruijn if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4485a02e8964SWillem de Bruijn virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
4486dbcf24d1SJason Wang dev->features |= NETIF_F_GRO_HW;
4487cf8691cbSMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
4488dbcf24d1SJason Wang dev->hw_features |= NETIF_F_GRO_HW;
4489296f96fcSRusty Russell
44904fda8302SJason Wang dev->vlan_features = dev->features;
449166c0e13aSMarek Majtyka dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
44924fda8302SJason Wang
4493d0c2c997SJarod Wilson /* MTU range: 68 - 65535 */
4494d0c2c997SJarod Wilson dev->min_mtu = MIN_MTU;
4495d0c2c997SJarod Wilson dev->max_mtu = MAX_MTU;
4496d0c2c997SJarod Wilson
4497296f96fcSRusty Russell /* Configuration may specify what MAC to use. Otherwise random. */
4498f2edaa4aSJakub Kicinski if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4499f2edaa4aSJakub Kicinski u8 addr[ETH_ALEN];
4500f2edaa4aSJakub Kicinski
4501855e0c52SRusty Russell virtio_cread_bytes(vdev,
4502a586d4f6SRusty Russell offsetof(struct virtio_net_config, mac),
4503f2edaa4aSJakub Kicinski addr, ETH_ALEN);
4504f2edaa4aSJakub Kicinski eth_hw_addr_set(dev, addr);
4505f2edaa4aSJakub Kicinski } else {
4506f2cedb63SDanny Kukawka eth_hw_addr_random(dev);
45079f62d221SLaurent Vivier dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
45089f62d221SLaurent Vivier dev->dev_addr);
4509f2edaa4aSJakub Kicinski }
4510296f96fcSRusty Russell
4511296f96fcSRusty Russell /* Set up our device-specific information */
4512296f96fcSRusty Russell vi = netdev_priv(dev);
4513296f96fcSRusty Russell vi->dev = dev;
4514296f96fcSRusty Russell vi->vdev = vdev;
4515d9d5dcc8SChristian Borntraeger vdev->priv = vi;
4516827da44cSJohn Stultz
4517586d17c5SJason Wang INIT_WORK(&vi->config_work, virtnet_config_changed_work);
45185a159128SJason Wang spin_lock_init(&vi->refill_lock);
4519296f96fcSRusty Russell
452030bbf891SLorenzo Bianconi if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
45213f2c31d9SMark McLoughlin vi->mergeable_rx_bufs = true;
452230bbf891SLorenzo Bianconi dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
452330bbf891SLorenzo Bianconi }
45243f2c31d9SMark McLoughlin
4525699b045aSAlvaro Karsz if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4526308d7982SGavin Li vi->intr_coal_rx.max_usecs = 0;
4527308d7982SGavin Li vi->intr_coal_tx.max_usecs = 0;
4528308d7982SGavin Li vi->intr_coal_tx.max_packets = 0;
4529308d7982SGavin Li vi->intr_coal_rx.max_packets = 0;
4530699b045aSAlvaro Karsz }
4531699b045aSAlvaro Karsz
453291f41f01SAndrew Melnychenko if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
453391f41f01SAndrew Melnychenko vi->has_rss_hash_report = true;
453491f41f01SAndrew Melnychenko
453543a71c1bSBreno Leitao if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
4536c7114b12SAndrew Melnychenko vi->has_rss = true;
453791f41f01SAndrew Melnychenko
4538c7114b12SAndrew Melnychenko vi->rss_indir_table_size =
4539c7114b12SAndrew Melnychenko virtio_cread16(vdev, offsetof(struct virtio_net_config,
4540c7114b12SAndrew Melnychenko rss_max_indirection_table_length));
454143a71c1bSBreno Leitao }
454243a71c1bSBreno Leitao
454343a71c1bSBreno Leitao if (vi->has_rss || vi->has_rss_hash_report) {
4544c7114b12SAndrew Melnychenko vi->rss_key_size =
4545c7114b12SAndrew Melnychenko virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
4546c7114b12SAndrew Melnychenko
4547c7114b12SAndrew Melnychenko vi->rss_hash_types_supported =
4548c7114b12SAndrew Melnychenko virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4549c7114b12SAndrew Melnychenko vi->rss_hash_types_supported &=
4550c7114b12SAndrew Melnychenko ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4551c7114b12SAndrew Melnychenko VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4552c7114b12SAndrew Melnychenko VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4553c7114b12SAndrew Melnychenko
4554c7114b12SAndrew Melnychenko dev->hw_features |= NETIF_F_RXHASH;
4555c7114b12SAndrew Melnychenko }
455691f41f01SAndrew Melnychenko
455791f41f01SAndrew Melnychenko if (vi->has_rss_hash_report)
455891f41f01SAndrew Melnychenko vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
455991f41f01SAndrew Melnychenko else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4560d04302b3SMichael S. Tsirkin virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4561012873d0SMichael S. Tsirkin vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4562012873d0SMichael S. Tsirkin else
4563012873d0SMichael S. Tsirkin vi->hdr_len = sizeof(struct virtio_net_hdr);
4564012873d0SMichael S. Tsirkin
456575993300SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
456675993300SMichael S. Tsirkin virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4567e7428e95SMichael S. Tsirkin vi->any_header_sg = true;
4568e7428e95SMichael S. Tsirkin
4569986a4f4dSJason Wang if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4570986a4f4dSJason Wang vi->has_cvq = true;
4571986a4f4dSJason Wang
457214de9d11SAaron Conole if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
457314de9d11SAaron Conole mtu = virtio_cread16(vdev,
457414de9d11SAaron Conole offsetof(struct virtio_net_config,
457514de9d11SAaron Conole mtu));
457693a205eeSAaron Conole if (mtu < dev->min_mtu) {
4577fe36cbe0SMichael S. Tsirkin /* Should never trigger: MTU was previously validated
4578fe36cbe0SMichael S. Tsirkin * in virtnet_validate.
4579fe36cbe0SMichael S. Tsirkin */
45807934b481SYuval Shaia dev_err(&vdev->dev,
45817934b481SYuval Shaia "device MTU appears to have changed it is now %d < %d",
45827934b481SYuval Shaia mtu, dev->min_mtu);
4583411ea23aSDan Carpenter err = -EINVAL;
4584d7dfc5cfSToshiaki Makita goto free;
4585fe36cbe0SMichael S. Tsirkin }
4586fe36cbe0SMichael S. Tsirkin
4587d0c2c997SJarod Wilson dev->mtu = mtu;
458893a205eeSAaron Conole dev->max_mtu = mtu;
458914de9d11SAaron Conole }
459014de9d11SAaron Conole
45914959aebbSGavin Li virtnet_set_big_packets(vi, mtu);
45924959aebbSGavin Li
4593012873d0SMichael S. Tsirkin if (vi->any_header_sg)
4594012873d0SMichael S. Tsirkin dev->needed_headroom = vi->hdr_len;
45956ebbc1a6SZhangjie \(HZ\)
459644900010SJason Wang /* Enable multiqueue by default */
459744900010SJason Wang if (num_online_cpus() >= max_queue_pairs)
459844900010SJason Wang vi->curr_queue_pairs = max_queue_pairs;
459944900010SJason Wang else
460044900010SJason Wang vi->curr_queue_pairs = num_online_cpus();
4601986a4f4dSJason Wang vi->max_queue_pairs = max_queue_pairs;
4602986a4f4dSJason Wang
4603986a4f4dSJason Wang /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
46043f9c10b0SAmit Shah err = init_vqs(vi);
4605d2a7dddaSMichael S. Tsirkin if (err)
4606d7dfc5cfSToshiaki Makita goto free;
4607d2a7dddaSMichael S. Tsirkin
4608fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS
4609fbf28d78SMichael Dalton if (vi->mergeable_rx_bufs)
4610fbf28d78SMichael Dalton dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4611fbf28d78SMichael Dalton #endif
46120f13b66bSZhi Yong Wu netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
46130f13b66bSZhi Yong Wu netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4614986a4f4dSJason Wang
46152e9ca760SMichael S. Tsirkin virtnet_init_settings(dev);
46162e9ca760SMichael S. Tsirkin
4617ba5e4426SSridhar Samudrala if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4618ba5e4426SSridhar Samudrala vi->failover = net_failover_create(vi->dev);
46194b8e6ac4SWei Yongjun if (IS_ERR(vi->failover)) {
46204b8e6ac4SWei Yongjun err = PTR_ERR(vi->failover);
4621ba5e4426SSridhar Samudrala goto free_vqs;
4622ba5e4426SSridhar Samudrala }
46234b8e6ac4SWei Yongjun }
4624ba5e4426SSridhar Samudrala
462591f41f01SAndrew Melnychenko if (vi->has_rss || vi->has_rss_hash_report)
4626c7114b12SAndrew Melnychenko virtnet_init_default_rss(vi);
4627c7114b12SAndrew Melnychenko
462850c0ada6SJason Wang /* serialize netdev register + virtio_device_ready() with ndo_open() */
462950c0ada6SJason Wang rtnl_lock();
463050c0ada6SJason Wang
463150c0ada6SJason Wang err = register_netdevice(dev);
4632296f96fcSRusty Russell if (err) {
4633296f96fcSRusty Russell pr_debug("virtio_net: registering device failed\n");
463450c0ada6SJason Wang rtnl_unlock();
4635ba5e4426SSridhar Samudrala goto free_failover;
4636296f96fcSRusty Russell }
4637b3369c1fSRusty Russell
46384baf1e33SMichael S. Tsirkin virtio_device_ready(vdev);
46394baf1e33SMichael S. Tsirkin
464051b81317SJason Wang _virtnet_set_queues(vi, vi->curr_queue_pairs);
464151b81317SJason Wang
46429f62d221SLaurent Vivier /* a random MAC address has been assigned, notify the device.
46439f62d221SLaurent Vivier * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
46449f62d221SLaurent Vivier * because many devices work fine without getting MAC explicitly
46459f62d221SLaurent Vivier */
46469f62d221SLaurent Vivier if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
46479f62d221SLaurent Vivier virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
46489f62d221SLaurent Vivier struct scatterlist sg;
46499f62d221SLaurent Vivier
46509f62d221SLaurent Vivier sg_init_one(&sg, dev->dev_addr, dev->addr_len);
46519f62d221SLaurent Vivier if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
46529f62d221SLaurent Vivier VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
46539f62d221SLaurent Vivier pr_debug("virtio_net: setting MAC address failed\n");
46549f62d221SLaurent Vivier rtnl_unlock();
46559f62d221SLaurent Vivier err = -EINVAL;
46569f62d221SLaurent Vivier goto free_unregister_netdev;
46579f62d221SLaurent Vivier }
46589f62d221SLaurent Vivier }
46599f62d221SLaurent Vivier
466050c0ada6SJason Wang rtnl_unlock();
466150c0ada6SJason Wang
46628017c279SSebastian Andrzej Siewior err = virtnet_cpu_notif_add(vi);
46638de4b2f3SWanlong Gao if (err) {
46648de4b2f3SWanlong Gao pr_debug("virtio_net: registering cpu notifier failed\n");
4665f00e35e2Swangyunjian goto free_unregister_netdev;
46668de4b2f3SWanlong Gao }
46678de4b2f3SWanlong Gao
4668167c25e4SJason Wang /* Assume link up if device can't report link status,
4669167c25e4SJason Wang otherwise get link status from config. */
4670167c25e4SJason Wang netif_carrier_off(dev);
4671bda7fab5SJay Vosburgh if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
46723b07e9caSTejun Heo schedule_work(&vi->config_work);
4673167c25e4SJason Wang } else {
4674167c25e4SJason Wang vi->status = VIRTIO_NET_S_LINK_UP;
4675faa9b39fSJason Baron virtnet_update_settings(vi);
46764783256eSPantelis Koukousoulas netif_carrier_on(dev);
4677167c25e4SJason Wang }
46789f4d26d0SMark McLoughlin
46793f93522fSJason Wang for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
46803f93522fSJason Wang if (virtio_has_feature(vi->vdev, guest_offloads[i]))
46813f93522fSJason Wang set_bit(guest_offloads[i], &vi->guest_offloads);
4682a02e8964SWillem de Bruijn vi->guest_offloads_capable = vi->guest_offloads;
46833f93522fSJason Wang
4684986a4f4dSJason Wang pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4685986a4f4dSJason Wang dev->name, max_queue_pairs);
4686986a4f4dSJason Wang
4687296f96fcSRusty Russell return 0;
4688296f96fcSRusty Russell
4689f00e35e2Swangyunjian free_unregister_netdev:
4690b3369c1fSRusty Russell unregister_netdev(dev);
4691ba5e4426SSridhar Samudrala free_failover:
4692ba5e4426SSridhar Samudrala net_failover_destroy(vi->failover);
4693d2a7dddaSMichael S. Tsirkin free_vqs:
4694b0686565SLi Zetao virtio_reset_device(vdev);
4695986a4f4dSJason Wang cancel_delayed_work_sync(&vi->refill);
4696fb51879dSMichael Dalton free_receive_page_frags(vi);
4697e9d7417bSJason Wang virtnet_del_vqs(vi);
4698296f96fcSRusty Russell free:
4699296f96fcSRusty Russell free_netdev(dev);
4700296f96fcSRusty Russell return err;
4701296f96fcSRusty Russell }
4702296f96fcSRusty Russell
remove_vq_common(struct virtnet_info * vi)470304486ed0SAmit Shah static void remove_vq_common(struct virtnet_info *vi)
4704296f96fcSRusty Russell {
4705d9679d00SMichael S. Tsirkin virtio_reset_device(vi->vdev);
4706830a8a97SShirley Ma
4707830a8a97SShirley Ma /* Free unused buffers in both send and recv, if any. */
47089ab86bbcSShirley Ma free_unused_bufs(vi);
4709fb6813f4SRusty Russell
4710986a4f4dSJason Wang free_receive_bufs(vi);
4711d2a7dddaSMichael S. Tsirkin
4712fb51879dSMichael Dalton free_receive_page_frags(vi);
4713fb51879dSMichael Dalton
4714986a4f4dSJason Wang virtnet_del_vqs(vi);
471504486ed0SAmit Shah }
471604486ed0SAmit Shah
virtnet_remove(struct virtio_device * vdev)47178cc085d6SBill Pemberton static void virtnet_remove(struct virtio_device *vdev)
471804486ed0SAmit Shah {
471904486ed0SAmit Shah struct virtnet_info *vi = vdev->priv;
472004486ed0SAmit Shah
47218017c279SSebastian Andrzej Siewior virtnet_cpu_notif_remove(vi);
47228de4b2f3SWanlong Gao
4723102a2786SMichael S. Tsirkin /* Make sure no work handler is accessing the device. */
4724102a2786SMichael S. Tsirkin flush_work(&vi->config_work);
4725586d17c5SJason Wang
472604486ed0SAmit Shah unregister_netdev(vi->dev);
472704486ed0SAmit Shah
4728ba5e4426SSridhar Samudrala net_failover_destroy(vi->failover);
4729ba5e4426SSridhar Samudrala
473004486ed0SAmit Shah remove_vq_common(vi);
4731fb6813f4SRusty Russell
473274b2553fSRusty Russell free_netdev(vi->dev);
4733296f96fcSRusty Russell }
4734296f96fcSRusty Russell
virtnet_freeze(struct virtio_device * vdev)473567a75194SArnd Bergmann static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
47360741bcb5SAmit Shah {
47370741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv;
47380741bcb5SAmit Shah
47398017c279SSebastian Andrzej Siewior virtnet_cpu_notif_remove(vi);
47409fe7bfceSJohn Fastabend virtnet_freeze_down(vdev);
47410741bcb5SAmit Shah remove_vq_common(vi);
47420741bcb5SAmit Shah
47430741bcb5SAmit Shah return 0;
47440741bcb5SAmit Shah }
47450741bcb5SAmit Shah
virtnet_restore(struct virtio_device * vdev)474667a75194SArnd Bergmann static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
47470741bcb5SAmit Shah {
47480741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv;
47499fe7bfceSJohn Fastabend int err;
47500741bcb5SAmit Shah
47519fe7bfceSJohn Fastabend err = virtnet_restore_up(vdev);
47520741bcb5SAmit Shah if (err)
47530741bcb5SAmit Shah return err;
4754986a4f4dSJason Wang virtnet_set_queues(vi, vi->curr_queue_pairs);
4755986a4f4dSJason Wang
47568017c279SSebastian Andrzej Siewior err = virtnet_cpu_notif_add(vi);
47573f2869caSXie Yongji if (err) {
47583f2869caSXie Yongji virtnet_freeze_down(vdev);
47593f2869caSXie Yongji remove_vq_common(vi);
4760ec9debbdSJason Wang return err;
47613f2869caSXie Yongji }
4762ec9debbdSJason Wang
47630741bcb5SAmit Shah return 0;
47640741bcb5SAmit Shah }
47650741bcb5SAmit Shah
4766296f96fcSRusty Russell static struct virtio_device_id id_table[] = {
4767296f96fcSRusty Russell { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4768296f96fcSRusty Russell { 0 },
4769296f96fcSRusty Russell };
4770296f96fcSRusty Russell
4771f3358507SMichael S. Tsirkin #define VIRTNET_FEATURES \
4772f3358507SMichael S. Tsirkin VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4773f3358507SMichael S. Tsirkin VIRTIO_NET_F_MAC, \
4774f3358507SMichael S. Tsirkin VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4775f3358507SMichael S. Tsirkin VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4776f3358507SMichael S. Tsirkin VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4777418044e1SAndrew Melnychenko VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4778f3358507SMichael S. Tsirkin VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4779f3358507SMichael S. Tsirkin VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4780f3358507SMichael S. Tsirkin VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4781f3358507SMichael S. Tsirkin VIRTIO_NET_F_CTRL_MAC_ADDR, \
4782faa9b39fSJason Baron VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4783c7114b12SAndrew Melnychenko VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4784be50da3eSJiri Pirko VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
47858af3bf66SGavin Li VIRTIO_NET_F_VQ_NOTF_COAL, \
4786be50da3eSJiri Pirko VIRTIO_NET_F_GUEST_HDRLEN
4787f3358507SMichael S. Tsirkin
4788c45a6816SRusty Russell static unsigned int features[] = {
4789f3358507SMichael S. Tsirkin VIRTNET_FEATURES,
4790f3358507SMichael S. Tsirkin };
4791f3358507SMichael S. Tsirkin
4792f3358507SMichael S. Tsirkin static unsigned int features_legacy[] = {
4793f3358507SMichael S. Tsirkin VIRTNET_FEATURES,
4794f3358507SMichael S. Tsirkin VIRTIO_NET_F_GSO,
4795e7428e95SMichael S. Tsirkin VIRTIO_F_ANY_LAYOUT,
4796c45a6816SRusty Russell };
4797c45a6816SRusty Russell
479822402529SUwe Kleine-König static struct virtio_driver virtio_net_driver = {
4799c45a6816SRusty Russell .feature_table = features,
4800c45a6816SRusty Russell .feature_table_size = ARRAY_SIZE(features),
4801f3358507SMichael S. Tsirkin .feature_table_legacy = features_legacy,
4802f3358507SMichael S. Tsirkin .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
4803296f96fcSRusty Russell .driver.name = KBUILD_MODNAME,
4804296f96fcSRusty Russell .driver.owner = THIS_MODULE,
4805296f96fcSRusty Russell .id_table = id_table,
4806fe36cbe0SMichael S. Tsirkin .validate = virtnet_validate,
4807296f96fcSRusty Russell .probe = virtnet_probe,
48088cc085d6SBill Pemberton .remove = virtnet_remove,
48099f4d26d0SMark McLoughlin .config_changed = virtnet_config_changed,
481089107000SAaron Lu #ifdef CONFIG_PM_SLEEP
48110741bcb5SAmit Shah .freeze = virtnet_freeze,
48120741bcb5SAmit Shah .restore = virtnet_restore,
48130741bcb5SAmit Shah #endif
4814296f96fcSRusty Russell };
4815296f96fcSRusty Russell
virtio_net_driver_init(void)48168017c279SSebastian Andrzej Siewior static __init int virtio_net_driver_init(void)
48178017c279SSebastian Andrzej Siewior {
48188017c279SSebastian Andrzej Siewior int ret;
48198017c279SSebastian Andrzej Siewior
482073c1b41eSThomas Gleixner ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
48218017c279SSebastian Andrzej Siewior virtnet_cpu_online,
48228017c279SSebastian Andrzej Siewior virtnet_cpu_down_prep);
48238017c279SSebastian Andrzej Siewior if (ret < 0)
48248017c279SSebastian Andrzej Siewior goto out;
48258017c279SSebastian Andrzej Siewior virtionet_online = ret;
482673c1b41eSThomas Gleixner ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
48278017c279SSebastian Andrzej Siewior NULL, virtnet_cpu_dead);
48288017c279SSebastian Andrzej Siewior if (ret)
48298017c279SSebastian Andrzej Siewior goto err_dead;
48308017c279SSebastian Andrzej Siewior ret = register_virtio_driver(&virtio_net_driver);
48318017c279SSebastian Andrzej Siewior if (ret)
48328017c279SSebastian Andrzej Siewior goto err_virtio;
48338017c279SSebastian Andrzej Siewior return 0;
48348017c279SSebastian Andrzej Siewior err_virtio:
48358017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
48368017c279SSebastian Andrzej Siewior err_dead:
48378017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(virtionet_online);
48388017c279SSebastian Andrzej Siewior out:
48398017c279SSebastian Andrzej Siewior return ret;
48408017c279SSebastian Andrzej Siewior }
48418017c279SSebastian Andrzej Siewior module_init(virtio_net_driver_init);
48428017c279SSebastian Andrzej Siewior
virtio_net_driver_exit(void)48438017c279SSebastian Andrzej Siewior static __exit void virtio_net_driver_exit(void)
48448017c279SSebastian Andrzej Siewior {
4845cfa0ebc9SAndrew Jones unregister_virtio_driver(&virtio_net_driver);
48468017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
48478017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(virtionet_online);
48488017c279SSebastian Andrzej Siewior }
48498017c279SSebastian Andrzej Siewior module_exit(virtio_net_driver_exit);
4850296f96fcSRusty Russell
4851296f96fcSRusty Russell MODULE_DEVICE_TABLE(virtio, id_table);
4852296f96fcSRusty Russell MODULE_DESCRIPTION("Virtio network driver");
4853296f96fcSRusty Russell MODULE_LICENSE("GPL");
4854