148925e37SRusty Russell /* A network driver using virtio. 2296f96fcSRusty Russell * 3296f96fcSRusty Russell * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4296f96fcSRusty Russell * 5296f96fcSRusty Russell * This program is free software; you can redistribute it and/or modify 6296f96fcSRusty Russell * it under the terms of the GNU General Public License as published by 7296f96fcSRusty Russell * the Free Software Foundation; either version 2 of the License, or 8296f96fcSRusty Russell * (at your option) any later version. 9296f96fcSRusty Russell * 10296f96fcSRusty Russell * This program is distributed in the hope that it will be useful, 11296f96fcSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 12296f96fcSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13296f96fcSRusty Russell * GNU General Public License for more details. 14296f96fcSRusty Russell * 15296f96fcSRusty Russell * You should have received a copy of the GNU General Public License 16adf8d3ffSJeff Kirsher * along with this program; if not, see <http://www.gnu.org/licenses/>. 17296f96fcSRusty Russell */ 18296f96fcSRusty Russell //#define DEBUG 19296f96fcSRusty Russell #include <linux/netdevice.h> 20296f96fcSRusty Russell #include <linux/etherdevice.h> 21a9ea3fc6SHerbert Xu #include <linux/ethtool.h> 22296f96fcSRusty Russell #include <linux/module.h> 23296f96fcSRusty Russell #include <linux/virtio.h> 24296f96fcSRusty Russell #include <linux/virtio_net.h> 25f600b690SJohn Fastabend #include <linux/bpf.h> 26a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h> 27296f96fcSRusty Russell #include <linux/scatterlist.h> 28e918085aSAlex Williamson #include <linux/if_vlan.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 308de4b2f3SWanlong Gao #include <linux/cpu.h> 31ab7db917SMichael Dalton #include <linux/average.h> 32186b3c99SJason Wang #include <linux/filter.h> 33d85b758fSMichael S. Tsirkin #include <net/route.h> 34754b8a21SJesper Dangaard Brouer #include <net/xdp.h> 35296f96fcSRusty Russell 36d34710e3SAmerigo Wang static int napi_weight = NAPI_POLL_WEIGHT; 376c0cd7c0SDor Laor module_param(napi_weight, int, 0444); 386c0cd7c0SDor Laor 39b92f1e67SWillem de Bruijn static bool csum = true, gso = true, napi_tx; 4034a48579SRusty Russell module_param(csum, bool, 0444); 4134a48579SRusty Russell module_param(gso, bool, 0444); 42b92f1e67SWillem de Bruijn module_param(napi_tx, bool, 0644); 4334a48579SRusty Russell 44296f96fcSRusty Russell /* FIXME: MTU in config. */ 455061de36SMichael Dalton #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 463f2c31d9SMark McLoughlin #define GOOD_COPY_LEN 128 47296f96fcSRusty Russell 48f6b10209SJason Wang #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 49f6b10209SJason Wang 502de2f7f4SJohn Fastabend /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ 512de2f7f4SJohn Fastabend #define VIRTIO_XDP_HEADROOM 256 522de2f7f4SJohn Fastabend 535377d758SJohannes Berg /* RX packet size EWMA. The average packet size is used to determine the packet 545377d758SJohannes Berg * buffer size when refilling RX rings. As the entire RX ring may be refilled 555377d758SJohannes Berg * at once, the weight is chosen so that the EWMA will be insensitive to short- 565377d758SJohannes Berg * term, transient changes in packet size. 57ab7db917SMichael Dalton */ 58eb1e011aSJohannes Berg DECLARE_EWMA(pkt_len, 0, 64) 59ab7db917SMichael Dalton 6066846048SRick Jones #define VIRTNET_DRIVER_VERSION "1.0.0" 612a41f71dSAlex Williamson 627acd4329SColin Ian King static const unsigned long guest_offloads[] = { 637acd4329SColin Ian King VIRTIO_NET_F_GUEST_TSO4, 643f93522fSJason Wang VIRTIO_NET_F_GUEST_TSO6, 653f93522fSJason Wang VIRTIO_NET_F_GUEST_ECN, 667acd4329SColin Ian King VIRTIO_NET_F_GUEST_UFO 677acd4329SColin Ian King }; 683f93522fSJason Wang 69d7dfc5cfSToshiaki Makita struct virtnet_stat_desc { 70d7dfc5cfSToshiaki Makita char desc[ETH_GSTRING_LEN]; 71d7dfc5cfSToshiaki Makita size_t offset; 723fa2a1dfSstephen hemminger }; 733fa2a1dfSstephen hemminger 74d7dfc5cfSToshiaki Makita struct virtnet_sq_stats { 75d7dfc5cfSToshiaki Makita struct u64_stats_sync syncp; 76d7dfc5cfSToshiaki Makita u64 packets; 77d7dfc5cfSToshiaki Makita u64 bytes; 78d7dfc5cfSToshiaki Makita }; 79d7dfc5cfSToshiaki Makita 80d7dfc5cfSToshiaki Makita struct virtnet_rq_stats { 81d7dfc5cfSToshiaki Makita struct u64_stats_sync syncp; 82d7dfc5cfSToshiaki Makita u64 packets; 83d7dfc5cfSToshiaki Makita u64 bytes; 84d7dfc5cfSToshiaki Makita }; 85d7dfc5cfSToshiaki Makita 86d7dfc5cfSToshiaki Makita #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) 87d7dfc5cfSToshiaki Makita #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) 88d7dfc5cfSToshiaki Makita 89d7dfc5cfSToshiaki Makita static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { 90d7dfc5cfSToshiaki Makita { "packets", VIRTNET_SQ_STAT(packets) }, 91d7dfc5cfSToshiaki Makita { "bytes", VIRTNET_SQ_STAT(bytes) }, 92d7dfc5cfSToshiaki Makita }; 93d7dfc5cfSToshiaki Makita 94d7dfc5cfSToshiaki Makita static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { 95d7dfc5cfSToshiaki Makita { "packets", VIRTNET_RQ_STAT(packets) }, 96d7dfc5cfSToshiaki Makita { "bytes", VIRTNET_RQ_STAT(bytes) }, 97d7dfc5cfSToshiaki Makita }; 98d7dfc5cfSToshiaki Makita 99d7dfc5cfSToshiaki Makita #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) 100d7dfc5cfSToshiaki Makita #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) 101d7dfc5cfSToshiaki Makita 102e9d7417bSJason Wang /* Internal representation of a send virtqueue */ 103e9d7417bSJason Wang struct send_queue { 104e9d7417bSJason Wang /* Virtqueue associated with this send _queue */ 105e9d7417bSJason Wang struct virtqueue *vq; 106e9d7417bSJason Wang 107e9d7417bSJason Wang /* TX: fragments + linear part + virtio header */ 108e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 109986a4f4dSJason Wang 110986a4f4dSJason Wang /* Name of the send queue: output.$index */ 111986a4f4dSJason Wang char name[40]; 112b92f1e67SWillem de Bruijn 113d7dfc5cfSToshiaki Makita struct virtnet_sq_stats stats; 114d7dfc5cfSToshiaki Makita 115b92f1e67SWillem de Bruijn struct napi_struct napi; 116e9d7417bSJason Wang }; 117e9d7417bSJason Wang 118e9d7417bSJason Wang /* Internal representation of a receive virtqueue */ 119e9d7417bSJason Wang struct receive_queue { 120e9d7417bSJason Wang /* Virtqueue associated with this receive_queue */ 121e9d7417bSJason Wang struct virtqueue *vq; 122e9d7417bSJason Wang 123296f96fcSRusty Russell struct napi_struct napi; 124296f96fcSRusty Russell 125f600b690SJohn Fastabend struct bpf_prog __rcu *xdp_prog; 126f600b690SJohn Fastabend 127d7dfc5cfSToshiaki Makita struct virtnet_rq_stats stats; 128d7dfc5cfSToshiaki Makita 129e9d7417bSJason Wang /* Chain pages by the private ptr. */ 130e9d7417bSJason Wang struct page *pages; 131e9d7417bSJason Wang 132ab7db917SMichael Dalton /* Average packet length for mergeable receive buffers. */ 1335377d758SJohannes Berg struct ewma_pkt_len mrg_avg_pkt_len; 134ab7db917SMichael Dalton 135fb51879dSMichael Dalton /* Page frag for packet buffer allocation. */ 136fb51879dSMichael Dalton struct page_frag alloc_frag; 137fb51879dSMichael Dalton 138e9d7417bSJason Wang /* RX: fragments + linear part + virtio header */ 139e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 140986a4f4dSJason Wang 141d85b758fSMichael S. Tsirkin /* Min single buffer size for mergeable buffers case. */ 142d85b758fSMichael S. Tsirkin unsigned int min_buf_len; 143d85b758fSMichael S. Tsirkin 144986a4f4dSJason Wang /* Name of this receive queue: input.$index */ 145986a4f4dSJason Wang char name[40]; 146754b8a21SJesper Dangaard Brouer 147754b8a21SJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq; 148e9d7417bSJason Wang }; 149e9d7417bSJason Wang 15012e57169SMichael S. Tsirkin /* Control VQ buffers: protected by the rtnl lock */ 15112e57169SMichael S. Tsirkin struct control_buf { 15212e57169SMichael S. Tsirkin struct virtio_net_ctrl_hdr hdr; 15312e57169SMichael S. Tsirkin virtio_net_ctrl_ack status; 15412e57169SMichael S. Tsirkin struct virtio_net_ctrl_mq mq; 15512e57169SMichael S. Tsirkin u8 promisc; 15612e57169SMichael S. Tsirkin u8 allmulti; 157d7fad4c8SMichael S. Tsirkin __virtio16 vid; 158f4ee703aSMichael S. Tsirkin __virtio64 offloads; 15912e57169SMichael S. Tsirkin }; 16012e57169SMichael S. Tsirkin 161e9d7417bSJason Wang struct virtnet_info { 162e9d7417bSJason Wang struct virtio_device *vdev; 163e9d7417bSJason Wang struct virtqueue *cvq; 164e9d7417bSJason Wang struct net_device *dev; 165986a4f4dSJason Wang struct send_queue *sq; 166986a4f4dSJason Wang struct receive_queue *rq; 167e9d7417bSJason Wang unsigned int status; 168e9d7417bSJason Wang 169986a4f4dSJason Wang /* Max # of queue pairs supported by the device */ 170986a4f4dSJason Wang u16 max_queue_pairs; 171986a4f4dSJason Wang 172986a4f4dSJason Wang /* # of queue pairs currently used by the driver */ 173986a4f4dSJason Wang u16 curr_queue_pairs; 174986a4f4dSJason Wang 175672aafd5SJohn Fastabend /* # of XDP queue pairs currently used by the driver */ 176672aafd5SJohn Fastabend u16 xdp_queue_pairs; 177672aafd5SJohn Fastabend 17897402b96SHerbert Xu /* I like... big packets and I cannot lie! */ 17997402b96SHerbert Xu bool big_packets; 18097402b96SHerbert Xu 1813f2c31d9SMark McLoughlin /* Host will merge rx buffers for big packets (shake it! shake it!) */ 1823f2c31d9SMark McLoughlin bool mergeable_rx_bufs; 1833f2c31d9SMark McLoughlin 184986a4f4dSJason Wang /* Has control virtqueue */ 185986a4f4dSJason Wang bool has_cvq; 186986a4f4dSJason Wang 187e7428e95SMichael S. Tsirkin /* Host can handle any s/g split between our header and packet data */ 188e7428e95SMichael S. Tsirkin bool any_header_sg; 189e7428e95SMichael S. Tsirkin 190012873d0SMichael S. Tsirkin /* Packet virtio header size */ 191012873d0SMichael S. Tsirkin u8 hdr_len; 192012873d0SMichael S. Tsirkin 1933161e453SRusty Russell /* Work struct for refilling if we run low on memory. */ 1943161e453SRusty Russell struct delayed_work refill; 1953161e453SRusty Russell 196586d17c5SJason Wang /* Work struct for config space updates */ 197586d17c5SJason Wang struct work_struct config_work; 198586d17c5SJason Wang 199986a4f4dSJason Wang /* Does the affinity hint is set for virtqueues? */ 200986a4f4dSJason Wang bool affinity_hint_set; 20147be2479SWanlong Gao 2028017c279SSebastian Andrzej Siewior /* CPU hotplug instances for online & dead */ 2038017c279SSebastian Andrzej Siewior struct hlist_node node; 2048017c279SSebastian Andrzej Siewior struct hlist_node node_dead; 2052ac46030SMichael S. Tsirkin 20612e57169SMichael S. Tsirkin struct control_buf *ctrl; 20716032be5SNikolay Aleksandrov 20816032be5SNikolay Aleksandrov /* Ethtool settings */ 20916032be5SNikolay Aleksandrov u8 duplex; 21016032be5SNikolay Aleksandrov u32 speed; 2113f93522fSJason Wang 2123f93522fSJason Wang unsigned long guest_offloads; 213296f96fcSRusty Russell }; 214296f96fcSRusty Russell 2159ab86bbcSShirley Ma struct padded_vnet_hdr { 216012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf hdr; 2179ab86bbcSShirley Ma /* 218012873d0SMichael S. Tsirkin * hdr is in a separate sg buffer, and data sg buffer shares same page 219012873d0SMichael S. Tsirkin * with this header sg. This padding makes next sg 16 byte aligned 220012873d0SMichael S. Tsirkin * after the header. 2219ab86bbcSShirley Ma */ 222012873d0SMichael S. Tsirkin char padding[4]; 2239ab86bbcSShirley Ma }; 2249ab86bbcSShirley Ma 225986a4f4dSJason Wang /* Converting between virtqueue no. and kernel tx/rx queue no. 226986a4f4dSJason Wang * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 227986a4f4dSJason Wang */ 228986a4f4dSJason Wang static int vq2txq(struct virtqueue *vq) 229986a4f4dSJason Wang { 2309d0ca6edSRusty Russell return (vq->index - 1) / 2; 231986a4f4dSJason Wang } 232986a4f4dSJason Wang 233986a4f4dSJason Wang static int txq2vq(int txq) 234986a4f4dSJason Wang { 235986a4f4dSJason Wang return txq * 2 + 1; 236986a4f4dSJason Wang } 237986a4f4dSJason Wang 238986a4f4dSJason Wang static int vq2rxq(struct virtqueue *vq) 239986a4f4dSJason Wang { 2409d0ca6edSRusty Russell return vq->index / 2; 241986a4f4dSJason Wang } 242986a4f4dSJason Wang 243986a4f4dSJason Wang static int rxq2vq(int rxq) 244986a4f4dSJason Wang { 245986a4f4dSJason Wang return rxq * 2; 246986a4f4dSJason Wang } 247986a4f4dSJason Wang 248012873d0SMichael S. Tsirkin static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) 249296f96fcSRusty Russell { 250012873d0SMichael S. Tsirkin return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; 251296f96fcSRusty Russell } 252296f96fcSRusty Russell 2539ab86bbcSShirley Ma /* 2549ab86bbcSShirley Ma * private is used to chain pages for big packets, put the whole 2559ab86bbcSShirley Ma * most recent used list in the beginning for reuse 2569ab86bbcSShirley Ma */ 257e9d7417bSJason Wang static void give_pages(struct receive_queue *rq, struct page *page) 258fb6813f4SRusty Russell { 2599ab86bbcSShirley Ma struct page *end; 2609ab86bbcSShirley Ma 261e9d7417bSJason Wang /* Find end of list, sew whole thing into vi->rq.pages. */ 2629ab86bbcSShirley Ma for (end = page; end->private; end = (struct page *)end->private); 263e9d7417bSJason Wang end->private = (unsigned long)rq->pages; 264e9d7417bSJason Wang rq->pages = page; 265fb6813f4SRusty Russell } 266fb6813f4SRusty Russell 267e9d7417bSJason Wang static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 268fb6813f4SRusty Russell { 269e9d7417bSJason Wang struct page *p = rq->pages; 270fb6813f4SRusty Russell 2719ab86bbcSShirley Ma if (p) { 272e9d7417bSJason Wang rq->pages = (struct page *)p->private; 2739ab86bbcSShirley Ma /* clear private here, it is used to chain pages */ 2749ab86bbcSShirley Ma p->private = 0; 2759ab86bbcSShirley Ma } else 276fb6813f4SRusty Russell p = alloc_page(gfp_mask); 277fb6813f4SRusty Russell return p; 278fb6813f4SRusty Russell } 279fb6813f4SRusty Russell 280e4e8452aSWillem de Bruijn static void virtqueue_napi_schedule(struct napi_struct *napi, 281e4e8452aSWillem de Bruijn struct virtqueue *vq) 282e4e8452aSWillem de Bruijn { 283e4e8452aSWillem de Bruijn if (napi_schedule_prep(napi)) { 284e4e8452aSWillem de Bruijn virtqueue_disable_cb(vq); 285e4e8452aSWillem de Bruijn __napi_schedule(napi); 286e4e8452aSWillem de Bruijn } 287e4e8452aSWillem de Bruijn } 288e4e8452aSWillem de Bruijn 289e4e8452aSWillem de Bruijn static void virtqueue_napi_complete(struct napi_struct *napi, 290e4e8452aSWillem de Bruijn struct virtqueue *vq, int processed) 291e4e8452aSWillem de Bruijn { 292e4e8452aSWillem de Bruijn int opaque; 293e4e8452aSWillem de Bruijn 294e4e8452aSWillem de Bruijn opaque = virtqueue_enable_cb_prepare(vq); 295fdaa767aSToshiaki Makita if (napi_complete_done(napi, processed)) { 296fdaa767aSToshiaki Makita if (unlikely(virtqueue_poll(vq, opaque))) 297e4e8452aSWillem de Bruijn virtqueue_napi_schedule(napi, vq); 298fdaa767aSToshiaki Makita } else { 299fdaa767aSToshiaki Makita virtqueue_disable_cb(vq); 300fdaa767aSToshiaki Makita } 301e4e8452aSWillem de Bruijn } 302e4e8452aSWillem de Bruijn 303e9d7417bSJason Wang static void skb_xmit_done(struct virtqueue *vq) 304296f96fcSRusty Russell { 305e9d7417bSJason Wang struct virtnet_info *vi = vq->vdev->priv; 306b92f1e67SWillem de Bruijn struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; 307296f96fcSRusty Russell 3082cb9c6baSRusty Russell /* Suppress further interrupts. */ 309e9d7417bSJason Wang virtqueue_disable_cb(vq); 31011a3a154SRusty Russell 311b92f1e67SWillem de Bruijn if (napi->weight) 312b92f1e67SWillem de Bruijn virtqueue_napi_schedule(napi, vq); 313b92f1e67SWillem de Bruijn else 314363f1514SRusty Russell /* We were probably waiting for more output buffers. */ 315986a4f4dSJason Wang netif_wake_subqueue(vi->dev, vq2txq(vq)); 316296f96fcSRusty Russell } 317296f96fcSRusty Russell 31828b39bc7SJason Wang #define MRG_CTX_HEADER_SHIFT 22 31928b39bc7SJason Wang static void *mergeable_len_to_ctx(unsigned int truesize, 32028b39bc7SJason Wang unsigned int headroom) 32128b39bc7SJason Wang { 32228b39bc7SJason Wang return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); 32328b39bc7SJason Wang } 32428b39bc7SJason Wang 32528b39bc7SJason Wang static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) 32628b39bc7SJason Wang { 32728b39bc7SJason Wang return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; 32828b39bc7SJason Wang } 32928b39bc7SJason Wang 33028b39bc7SJason Wang static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) 33128b39bc7SJason Wang { 33228b39bc7SJason Wang return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); 33328b39bc7SJason Wang } 33428b39bc7SJason Wang 3353464645aSMike Waychison /* Called from bottom half context */ 336946fa564SMichael S. Tsirkin static struct sk_buff *page_to_skb(struct virtnet_info *vi, 337946fa564SMichael S. Tsirkin struct receive_queue *rq, 3382613af0eSMichael Dalton struct page *page, unsigned int offset, 3392613af0eSMichael Dalton unsigned int len, unsigned int truesize) 3409ab86bbcSShirley Ma { 3419ab86bbcSShirley Ma struct sk_buff *skb; 342012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 3432613af0eSMichael Dalton unsigned int copy, hdr_len, hdr_padded_len; 3449ab86bbcSShirley Ma char *p; 3459ab86bbcSShirley Ma 3462613af0eSMichael Dalton p = page_address(page) + offset; 3479ab86bbcSShirley Ma 3489ab86bbcSShirley Ma /* copy small packet so we can reuse these pages for small data */ 349c67f5db8SPaolo Abeni skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 3509ab86bbcSShirley Ma if (unlikely(!skb)) 3519ab86bbcSShirley Ma return NULL; 3529ab86bbcSShirley Ma 3539ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 3549ab86bbcSShirley Ma 355012873d0SMichael S. Tsirkin hdr_len = vi->hdr_len; 356012873d0SMichael S. Tsirkin if (vi->mergeable_rx_bufs) 357a4a76503Sstephen hemminger hdr_padded_len = sizeof(*hdr); 358012873d0SMichael S. Tsirkin else 3592613af0eSMichael Dalton hdr_padded_len = sizeof(struct padded_vnet_hdr); 3603f2c31d9SMark McLoughlin 3619ab86bbcSShirley Ma memcpy(hdr, p, hdr_len); 3623f2c31d9SMark McLoughlin 3639ab86bbcSShirley Ma len -= hdr_len; 3642613af0eSMichael Dalton offset += hdr_padded_len; 3652613af0eSMichael Dalton p += hdr_padded_len; 3663f2c31d9SMark McLoughlin 3673f2c31d9SMark McLoughlin copy = len; 3683f2c31d9SMark McLoughlin if (copy > skb_tailroom(skb)) 3693f2c31d9SMark McLoughlin copy = skb_tailroom(skb); 37059ae1d12SJohannes Berg skb_put_data(skb, p, copy); 3713f2c31d9SMark McLoughlin 3723f2c31d9SMark McLoughlin len -= copy; 3739ab86bbcSShirley Ma offset += copy; 3743f2c31d9SMark McLoughlin 3752613af0eSMichael Dalton if (vi->mergeable_rx_bufs) { 3762613af0eSMichael Dalton if (len) 3772613af0eSMichael Dalton skb_add_rx_frag(skb, 0, page, offset, len, truesize); 3782613af0eSMichael Dalton else 3792613af0eSMichael Dalton put_page(page); 3802613af0eSMichael Dalton return skb; 3812613af0eSMichael Dalton } 3822613af0eSMichael Dalton 383e878d78bSSasha Levin /* 384e878d78bSSasha Levin * Verify that we can indeed put this data into a skb. 385e878d78bSSasha Levin * This is here to handle cases when the device erroneously 386e878d78bSSasha Levin * tries to receive more than is possible. This is usually 387e878d78bSSasha Levin * the case of a broken device. 388e878d78bSSasha Levin */ 389e878d78bSSasha Levin if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 390be443899SAmerigo Wang net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 391e878d78bSSasha Levin dev_kfree_skb(skb); 392e878d78bSSasha Levin return NULL; 393e878d78bSSasha Levin } 3942613af0eSMichael Dalton BUG_ON(offset >= PAGE_SIZE); 3959ab86bbcSShirley Ma while (len) { 3962613af0eSMichael Dalton unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 3972613af0eSMichael Dalton skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 3982613af0eSMichael Dalton frag_size, truesize); 3992613af0eSMichael Dalton len -= frag_size; 4009ab86bbcSShirley Ma page = (struct page *)page->private; 4019ab86bbcSShirley Ma offset = 0; 4023f2c31d9SMark McLoughlin } 4033f2c31d9SMark McLoughlin 4049ab86bbcSShirley Ma if (page) 405e9d7417bSJason Wang give_pages(rq, page); 4063f2c31d9SMark McLoughlin 4079ab86bbcSShirley Ma return skb; 4089ab86bbcSShirley Ma } 4099ab86bbcSShirley Ma 410186b3c99SJason Wang static void virtnet_xdp_flush(struct net_device *dev) 411186b3c99SJason Wang { 412186b3c99SJason Wang struct virtnet_info *vi = netdev_priv(dev); 413186b3c99SJason Wang struct send_queue *sq; 414186b3c99SJason Wang unsigned int qp; 415186b3c99SJason Wang 416186b3c99SJason Wang qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); 417186b3c99SJason Wang sq = &vi->sq[qp]; 418186b3c99SJason Wang 419186b3c99SJason Wang virtqueue_kick(sq->vq); 420186b3c99SJason Wang } 421186b3c99SJason Wang 422*735fc405SJesper Dangaard Brouer static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, 423*735fc405SJesper Dangaard Brouer struct send_queue *sq, 42444fa2dbdSJesper Dangaard Brouer struct xdp_frame *xdpf) 42556434a01SJohn Fastabend { 42656434a01SJohn Fastabend struct virtio_net_hdr_mrg_rxbuf *hdr; 42756434a01SJohn Fastabend int err; 42856434a01SJohn Fastabend 429cac320c8SJesper Dangaard Brouer /* virtqueue want to use data area in-front of packet */ 430cac320c8SJesper Dangaard Brouer if (unlikely(xdpf->metasize > 0)) 431cac320c8SJesper Dangaard Brouer return -EOPNOTSUPP; 432cac320c8SJesper Dangaard Brouer 433cac320c8SJesper Dangaard Brouer if (unlikely(xdpf->headroom < vi->hdr_len)) 434cac320c8SJesper Dangaard Brouer return -EOVERFLOW; 435cac320c8SJesper Dangaard Brouer 436cac320c8SJesper Dangaard Brouer /* Make room for virtqueue hdr (also change xdpf->headroom?) */ 437cac320c8SJesper Dangaard Brouer xdpf->data -= vi->hdr_len; 43856434a01SJohn Fastabend /* Zero header and leave csum up to XDP layers */ 439cac320c8SJesper Dangaard Brouer hdr = xdpf->data; 44056434a01SJohn Fastabend memset(hdr, 0, vi->hdr_len); 441cac320c8SJesper Dangaard Brouer xdpf->len += vi->hdr_len; 44256434a01SJohn Fastabend 443cac320c8SJesper Dangaard Brouer sg_init_one(sq->sg, xdpf->data, xdpf->len); 444bb91accfSJason Wang 445cac320c8SJesper Dangaard Brouer err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); 44611b7d897SJesper Dangaard Brouer if (unlikely(err)) 447cac320c8SJesper Dangaard Brouer return -ENOSPC; /* Caller handle free/refcnt */ 44856434a01SJohn Fastabend 449cac320c8SJesper Dangaard Brouer return 0; 45056434a01SJohn Fastabend } 45156434a01SJohn Fastabend 452*735fc405SJesper Dangaard Brouer static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi, 453*735fc405SJesper Dangaard Brouer struct xdp_frame *xdpf) 454*735fc405SJesper Dangaard Brouer { 455*735fc405SJesper Dangaard Brouer struct xdp_frame *xdpf_sent; 456*735fc405SJesper Dangaard Brouer struct send_queue *sq; 457*735fc405SJesper Dangaard Brouer unsigned int len; 458*735fc405SJesper Dangaard Brouer unsigned int qp; 459*735fc405SJesper Dangaard Brouer 460*735fc405SJesper Dangaard Brouer qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); 461*735fc405SJesper Dangaard Brouer sq = &vi->sq[qp]; 462*735fc405SJesper Dangaard Brouer 463*735fc405SJesper Dangaard Brouer /* Free up any pending old buffers before queueing new ones. */ 464*735fc405SJesper Dangaard Brouer while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) 465*735fc405SJesper Dangaard Brouer xdp_return_frame(xdpf_sent); 466*735fc405SJesper Dangaard Brouer 467*735fc405SJesper Dangaard Brouer return __virtnet_xdp_xmit_one(vi, sq, xdpf); 468*735fc405SJesper Dangaard Brouer } 469*735fc405SJesper Dangaard Brouer 470*735fc405SJesper Dangaard Brouer static int virtnet_xdp_xmit(struct net_device *dev, 471*735fc405SJesper Dangaard Brouer int n, struct xdp_frame **frames) 472186b3c99SJason Wang { 473186b3c99SJason Wang struct virtnet_info *vi = netdev_priv(dev); 4748dcc5b0aSJesper Dangaard Brouer struct receive_queue *rq = vi->rq; 475*735fc405SJesper Dangaard Brouer struct xdp_frame *xdpf_sent; 4768dcc5b0aSJesper Dangaard Brouer struct bpf_prog *xdp_prog; 477*735fc405SJesper Dangaard Brouer struct send_queue *sq; 478*735fc405SJesper Dangaard Brouer unsigned int len; 479*735fc405SJesper Dangaard Brouer unsigned int qp; 480*735fc405SJesper Dangaard Brouer int drops = 0; 481*735fc405SJesper Dangaard Brouer int err; 482*735fc405SJesper Dangaard Brouer int i; 483*735fc405SJesper Dangaard Brouer 484*735fc405SJesper Dangaard Brouer qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); 485*735fc405SJesper Dangaard Brouer sq = &vi->sq[qp]; 486186b3c99SJason Wang 4878dcc5b0aSJesper Dangaard Brouer /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 4888dcc5b0aSJesper Dangaard Brouer * indicate XDP resources have been successfully allocated. 4898dcc5b0aSJesper Dangaard Brouer */ 4908dcc5b0aSJesper Dangaard Brouer xdp_prog = rcu_dereference(rq->xdp_prog); 4918dcc5b0aSJesper Dangaard Brouer if (!xdp_prog) 4928dcc5b0aSJesper Dangaard Brouer return -ENXIO; 4938dcc5b0aSJesper Dangaard Brouer 494*735fc405SJesper Dangaard Brouer /* Free up any pending old buffers before queueing new ones. */ 495*735fc405SJesper Dangaard Brouer while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) 496*735fc405SJesper Dangaard Brouer xdp_return_frame(xdpf_sent); 497*735fc405SJesper Dangaard Brouer 498*735fc405SJesper Dangaard Brouer for (i = 0; i < n; i++) { 499*735fc405SJesper Dangaard Brouer struct xdp_frame *xdpf = frames[i]; 500*735fc405SJesper Dangaard Brouer 501*735fc405SJesper Dangaard Brouer err = __virtnet_xdp_xmit_one(vi, sq, xdpf); 502*735fc405SJesper Dangaard Brouer if (err) { 503*735fc405SJesper Dangaard Brouer xdp_return_frame_rx_napi(xdpf); 504*735fc405SJesper Dangaard Brouer drops++; 505*735fc405SJesper Dangaard Brouer } 506*735fc405SJesper Dangaard Brouer } 507*735fc405SJesper Dangaard Brouer return n - drops; 508186b3c99SJason Wang } 509186b3c99SJason Wang 510f6b10209SJason Wang static unsigned int virtnet_get_headroom(struct virtnet_info *vi) 511f6b10209SJason Wang { 512f6b10209SJason Wang return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; 513f6b10209SJason Wang } 514f6b10209SJason Wang 5154941d472SJason Wang /* We copy the packet for XDP in the following cases: 5164941d472SJason Wang * 5174941d472SJason Wang * 1) Packet is scattered across multiple rx buffers. 5184941d472SJason Wang * 2) Headroom space is insufficient. 5194941d472SJason Wang * 5204941d472SJason Wang * This is inefficient but it's a temporary condition that 5214941d472SJason Wang * we hit right after XDP is enabled and until queue is refilled 5224941d472SJason Wang * with large buffers with sufficient headroom - so it should affect 5234941d472SJason Wang * at most queue size packets. 5244941d472SJason Wang * Afterwards, the conditions to enable 5254941d472SJason Wang * XDP should preclude the underlying device from sending packets 5264941d472SJason Wang * across multiple buffers (num_buf > 1), and we make sure buffers 5274941d472SJason Wang * have enough headroom. 52872979a6cSJohn Fastabend */ 52972979a6cSJohn Fastabend static struct page *xdp_linearize_page(struct receive_queue *rq, 53056a86f84SJason Wang u16 *num_buf, 53172979a6cSJohn Fastabend struct page *p, 53272979a6cSJohn Fastabend int offset, 5334941d472SJason Wang int page_off, 53472979a6cSJohn Fastabend unsigned int *len) 53572979a6cSJohn Fastabend { 53672979a6cSJohn Fastabend struct page *page = alloc_page(GFP_ATOMIC); 53772979a6cSJohn Fastabend 53872979a6cSJohn Fastabend if (!page) 53972979a6cSJohn Fastabend return NULL; 54072979a6cSJohn Fastabend 54172979a6cSJohn Fastabend memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 54272979a6cSJohn Fastabend page_off += *len; 54372979a6cSJohn Fastabend 54456a86f84SJason Wang while (--*num_buf) { 5453cc81a9aSJason Wang int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 54672979a6cSJohn Fastabend unsigned int buflen; 54772979a6cSJohn Fastabend void *buf; 54872979a6cSJohn Fastabend int off; 54972979a6cSJohn Fastabend 550680557cfSMichael S. Tsirkin buf = virtqueue_get_buf(rq->vq, &buflen); 551680557cfSMichael S. Tsirkin if (unlikely(!buf)) 55272979a6cSJohn Fastabend goto err_buf; 55372979a6cSJohn Fastabend 55472979a6cSJohn Fastabend p = virt_to_head_page(buf); 55572979a6cSJohn Fastabend off = buf - page_address(p); 55672979a6cSJohn Fastabend 55756a86f84SJason Wang /* guard against a misconfigured or uncooperative backend that 55856a86f84SJason Wang * is sending packet larger than the MTU. 55956a86f84SJason Wang */ 5603cc81a9aSJason Wang if ((page_off + buflen + tailroom) > PAGE_SIZE) { 56156a86f84SJason Wang put_page(p); 56256a86f84SJason Wang goto err_buf; 56356a86f84SJason Wang } 56456a86f84SJason Wang 56572979a6cSJohn Fastabend memcpy(page_address(page) + page_off, 56672979a6cSJohn Fastabend page_address(p) + off, buflen); 56772979a6cSJohn Fastabend page_off += buflen; 56856a86f84SJason Wang put_page(p); 56972979a6cSJohn Fastabend } 57072979a6cSJohn Fastabend 5712de2f7f4SJohn Fastabend /* Headroom does not contribute to packet length */ 5722de2f7f4SJohn Fastabend *len = page_off - VIRTIO_XDP_HEADROOM; 57372979a6cSJohn Fastabend return page; 57472979a6cSJohn Fastabend err_buf: 57572979a6cSJohn Fastabend __free_pages(page, 0); 57672979a6cSJohn Fastabend return NULL; 57772979a6cSJohn Fastabend } 57872979a6cSJohn Fastabend 5794941d472SJason Wang static struct sk_buff *receive_small(struct net_device *dev, 5804941d472SJason Wang struct virtnet_info *vi, 5814941d472SJason Wang struct receive_queue *rq, 5824941d472SJason Wang void *buf, void *ctx, 583186b3c99SJason Wang unsigned int len, 584186b3c99SJason Wang bool *xdp_xmit) 5854941d472SJason Wang { 5864941d472SJason Wang struct sk_buff *skb; 5874941d472SJason Wang struct bpf_prog *xdp_prog; 5884941d472SJason Wang unsigned int xdp_headroom = (unsigned long)ctx; 5894941d472SJason Wang unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; 5904941d472SJason Wang unsigned int headroom = vi->hdr_len + header_offset; 5914941d472SJason Wang unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 5924941d472SJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5934941d472SJason Wang struct page *page = virt_to_head_page(buf); 59411b7d897SJesper Dangaard Brouer unsigned int delta = 0; 5954941d472SJason Wang struct page *xdp_page; 59611b7d897SJesper Dangaard Brouer int err; 59711b7d897SJesper Dangaard Brouer 5984941d472SJason Wang len -= vi->hdr_len; 5994941d472SJason Wang 6004941d472SJason Wang rcu_read_lock(); 6014941d472SJason Wang xdp_prog = rcu_dereference(rq->xdp_prog); 6024941d472SJason Wang if (xdp_prog) { 6034941d472SJason Wang struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; 60444fa2dbdSJesper Dangaard Brouer struct xdp_frame *xdpf; 6054941d472SJason Wang struct xdp_buff xdp; 6064941d472SJason Wang void *orig_data; 6074941d472SJason Wang u32 act; 6084941d472SJason Wang 60995dbe9e7SJesper Dangaard Brouer if (unlikely(hdr->hdr.gso_type)) 6104941d472SJason Wang goto err_xdp; 6114941d472SJason Wang 6124941d472SJason Wang if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { 6134941d472SJason Wang int offset = buf - page_address(page) + header_offset; 6144941d472SJason Wang unsigned int tlen = len + vi->hdr_len; 6154941d472SJason Wang u16 num_buf = 1; 6164941d472SJason Wang 6174941d472SJason Wang xdp_headroom = virtnet_get_headroom(vi); 6184941d472SJason Wang header_offset = VIRTNET_RX_PAD + xdp_headroom; 6194941d472SJason Wang headroom = vi->hdr_len + header_offset; 6204941d472SJason Wang buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 6214941d472SJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 6224941d472SJason Wang xdp_page = xdp_linearize_page(rq, &num_buf, page, 6234941d472SJason Wang offset, header_offset, 6244941d472SJason Wang &tlen); 6254941d472SJason Wang if (!xdp_page) 6264941d472SJason Wang goto err_xdp; 6274941d472SJason Wang 6284941d472SJason Wang buf = page_address(xdp_page); 6294941d472SJason Wang put_page(page); 6304941d472SJason Wang page = xdp_page; 6314941d472SJason Wang } 6324941d472SJason Wang 6334941d472SJason Wang xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; 6344941d472SJason Wang xdp.data = xdp.data_hard_start + xdp_headroom; 635de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 6364941d472SJason Wang xdp.data_end = xdp.data + len; 637754b8a21SJesper Dangaard Brouer xdp.rxq = &rq->xdp_rxq; 6384941d472SJason Wang orig_data = xdp.data; 6394941d472SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 6404941d472SJason Wang 6414941d472SJason Wang switch (act) { 6424941d472SJason Wang case XDP_PASS: 6434941d472SJason Wang /* Recalculate length in case bpf program changed it */ 6444941d472SJason Wang delta = orig_data - xdp.data; 6456870de43SNikita V. Shirokov len = xdp.data_end - xdp.data; 6464941d472SJason Wang break; 6474941d472SJason Wang case XDP_TX: 64844fa2dbdSJesper Dangaard Brouer xdpf = convert_to_xdp_frame(&xdp); 64944fa2dbdSJesper Dangaard Brouer if (unlikely(!xdpf)) 65044fa2dbdSJesper Dangaard Brouer goto err_xdp; 651*735fc405SJesper Dangaard Brouer err = __virtnet_xdp_tx_xmit(vi, xdpf); 652cac320c8SJesper Dangaard Brouer if (unlikely(err)) { 6534941d472SJason Wang trace_xdp_exception(vi->dev, xdp_prog, act); 65411b7d897SJesper Dangaard Brouer goto err_xdp; 65511b7d897SJesper Dangaard Brouer } 656186b3c99SJason Wang *xdp_xmit = true; 657186b3c99SJason Wang rcu_read_unlock(); 658186b3c99SJason Wang goto xdp_xmit; 659186b3c99SJason Wang case XDP_REDIRECT: 660186b3c99SJason Wang err = xdp_do_redirect(dev, &xdp, xdp_prog); 66111b7d897SJesper Dangaard Brouer if (err) 66211b7d897SJesper Dangaard Brouer goto err_xdp; 663186b3c99SJason Wang *xdp_xmit = true; 6644941d472SJason Wang rcu_read_unlock(); 6654941d472SJason Wang goto xdp_xmit; 6664941d472SJason Wang default: 6674941d472SJason Wang bpf_warn_invalid_xdp_action(act); 6684941d472SJason Wang case XDP_ABORTED: 6694941d472SJason Wang trace_xdp_exception(vi->dev, xdp_prog, act); 6704941d472SJason Wang case XDP_DROP: 6714941d472SJason Wang goto err_xdp; 6724941d472SJason Wang } 6734941d472SJason Wang } 6744941d472SJason Wang rcu_read_unlock(); 6754941d472SJason Wang 6764941d472SJason Wang skb = build_skb(buf, buflen); 6774941d472SJason Wang if (!skb) { 6784941d472SJason Wang put_page(page); 6794941d472SJason Wang goto err; 6804941d472SJason Wang } 6814941d472SJason Wang skb_reserve(skb, headroom - delta); 6826870de43SNikita V. Shirokov skb_put(skb, len); 6834941d472SJason Wang if (!delta) { 6844941d472SJason Wang buf += header_offset; 6854941d472SJason Wang memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); 6864941d472SJason Wang } /* keep zeroed vnet hdr since packet was changed by bpf */ 6874941d472SJason Wang 6884941d472SJason Wang err: 6894941d472SJason Wang return skb; 6904941d472SJason Wang 6914941d472SJason Wang err_xdp: 6924941d472SJason Wang rcu_read_unlock(); 6934941d472SJason Wang dev->stats.rx_dropped++; 6944941d472SJason Wang put_page(page); 6954941d472SJason Wang xdp_xmit: 6964941d472SJason Wang return NULL; 6974941d472SJason Wang } 6984941d472SJason Wang 6994941d472SJason Wang static struct sk_buff *receive_big(struct net_device *dev, 7004941d472SJason Wang struct virtnet_info *vi, 7014941d472SJason Wang struct receive_queue *rq, 7024941d472SJason Wang void *buf, 7034941d472SJason Wang unsigned int len) 7044941d472SJason Wang { 7054941d472SJason Wang struct page *page = buf; 7064941d472SJason Wang struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); 7074941d472SJason Wang 7084941d472SJason Wang if (unlikely(!skb)) 7094941d472SJason Wang goto err; 7104941d472SJason Wang 7114941d472SJason Wang return skb; 7124941d472SJason Wang 7134941d472SJason Wang err: 7144941d472SJason Wang dev->stats.rx_dropped++; 7154941d472SJason Wang give_pages(rq, page); 7164941d472SJason Wang return NULL; 7174941d472SJason Wang } 7184941d472SJason Wang 7198fc3b9e9SMichael S. Tsirkin static struct sk_buff *receive_mergeable(struct net_device *dev, 720fdd819b2SMichael S. Tsirkin struct virtnet_info *vi, 7218fc3b9e9SMichael S. Tsirkin struct receive_queue *rq, 722680557cfSMichael S. Tsirkin void *buf, 723680557cfSMichael S. Tsirkin void *ctx, 724186b3c99SJason Wang unsigned int len, 725186b3c99SJason Wang bool *xdp_xmit) 7269ab86bbcSShirley Ma { 727012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 728012873d0SMichael S. Tsirkin u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 7298fc3b9e9SMichael S. Tsirkin struct page *page = virt_to_head_page(buf); 7308fc3b9e9SMichael S. Tsirkin int offset = buf - page_address(page); 731f600b690SJohn Fastabend struct sk_buff *head_skb, *curr_skb; 732f600b690SJohn Fastabend struct bpf_prog *xdp_prog; 733f600b690SJohn Fastabend unsigned int truesize; 7344941d472SJason Wang unsigned int headroom = mergeable_ctx_to_headroom(ctx); 7353cc81a9aSJason Wang int err; 736ab7db917SMichael Dalton 73756434a01SJohn Fastabend head_skb = NULL; 73856434a01SJohn Fastabend 739f600b690SJohn Fastabend rcu_read_lock(); 740f600b690SJohn Fastabend xdp_prog = rcu_dereference(rq->xdp_prog); 741f600b690SJohn Fastabend if (xdp_prog) { 74244fa2dbdSJesper Dangaard Brouer struct xdp_frame *xdpf; 74372979a6cSJohn Fastabend struct page *xdp_page; 7440354e4d1SJohn Fastabend struct xdp_buff xdp; 7450354e4d1SJohn Fastabend void *data; 746f600b690SJohn Fastabend u32 act; 747f600b690SJohn Fastabend 7483cc81a9aSJason Wang /* This happens when rx buffer size is underestimated 7493cc81a9aSJason Wang * or headroom is not enough because of the buffer 7503cc81a9aSJason Wang * was refilled before XDP is set. This should only 7513cc81a9aSJason Wang * happen for the first several packets, so we don't 7523cc81a9aSJason Wang * care much about its performance. 7533cc81a9aSJason Wang */ 7544941d472SJason Wang if (unlikely(num_buf > 1 || 7554941d472SJason Wang headroom < virtnet_get_headroom(vi))) { 75672979a6cSJohn Fastabend /* linearize data for XDP */ 75756a86f84SJason Wang xdp_page = xdp_linearize_page(rq, &num_buf, 7584941d472SJason Wang page, offset, 7594941d472SJason Wang VIRTIO_XDP_HEADROOM, 7604941d472SJason Wang &len); 76172979a6cSJohn Fastabend if (!xdp_page) 762f600b690SJohn Fastabend goto err_xdp; 7632de2f7f4SJohn Fastabend offset = VIRTIO_XDP_HEADROOM; 76472979a6cSJohn Fastabend } else { 76572979a6cSJohn Fastabend xdp_page = page; 766f600b690SJohn Fastabend } 767f600b690SJohn Fastabend 768f600b690SJohn Fastabend /* Transient failure which in theory could occur if 769f600b690SJohn Fastabend * in-flight packets from before XDP was enabled reach 770f600b690SJohn Fastabend * the receive path after XDP is loaded. In practice I 771f600b690SJohn Fastabend * was not able to create this condition. 772f600b690SJohn Fastabend */ 773b00f70b0SJason Wang if (unlikely(hdr->hdr.gso_type)) 774f600b690SJohn Fastabend goto err_xdp; 775f600b690SJohn Fastabend 7762de2f7f4SJohn Fastabend /* Allow consuming headroom but reserve enough space to push 7772de2f7f4SJohn Fastabend * the descriptor on if we get an XDP_TX return code. 7782de2f7f4SJohn Fastabend */ 7790354e4d1SJohn Fastabend data = page_address(xdp_page) + offset; 7802de2f7f4SJohn Fastabend xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; 7810354e4d1SJohn Fastabend xdp.data = data + vi->hdr_len; 782de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 7830354e4d1SJohn Fastabend xdp.data_end = xdp.data + (len - vi->hdr_len); 784754b8a21SJesper Dangaard Brouer xdp.rxq = &rq->xdp_rxq; 785754b8a21SJesper Dangaard Brouer 7860354e4d1SJohn Fastabend act = bpf_prog_run_xdp(xdp_prog, &xdp); 7870354e4d1SJohn Fastabend 78856434a01SJohn Fastabend switch (act) { 78956434a01SJohn Fastabend case XDP_PASS: 7902de2f7f4SJohn Fastabend /* recalculate offset to account for any header 7912de2f7f4SJohn Fastabend * adjustments. Note other cases do not build an 7922de2f7f4SJohn Fastabend * skb and avoid using offset 7932de2f7f4SJohn Fastabend */ 7942de2f7f4SJohn Fastabend offset = xdp.data - 7952de2f7f4SJohn Fastabend page_address(xdp_page) - vi->hdr_len; 7962de2f7f4SJohn Fastabend 7976870de43SNikita V. Shirokov /* recalculate len if xdp.data or xdp.data_end were 7986870de43SNikita V. Shirokov * adjusted 7996870de43SNikita V. Shirokov */ 800aaa64527SNikita V. Shirokov len = xdp.data_end - xdp.data + vi->hdr_len; 8011830f893SJason Wang /* We can only create skb based on xdp_page. */ 8021830f893SJason Wang if (unlikely(xdp_page != page)) { 8031830f893SJason Wang rcu_read_unlock(); 8041830f893SJason Wang put_page(page); 8051830f893SJason Wang head_skb = page_to_skb(vi, rq, xdp_page, 8062de2f7f4SJohn Fastabend offset, len, PAGE_SIZE); 8071830f893SJason Wang return head_skb; 8081830f893SJason Wang } 80956434a01SJohn Fastabend break; 81056434a01SJohn Fastabend case XDP_TX: 81144fa2dbdSJesper Dangaard Brouer xdpf = convert_to_xdp_frame(&xdp); 81244fa2dbdSJesper Dangaard Brouer if (unlikely(!xdpf)) 81344fa2dbdSJesper Dangaard Brouer goto err_xdp; 814*735fc405SJesper Dangaard Brouer err = __virtnet_xdp_tx_xmit(vi, xdpf); 815cac320c8SJesper Dangaard Brouer if (unlikely(err)) { 8160354e4d1SJohn Fastabend trace_xdp_exception(vi->dev, xdp_prog, act); 81711b7d897SJesper Dangaard Brouer if (unlikely(xdp_page != page)) 81811b7d897SJesper Dangaard Brouer put_page(xdp_page); 81911b7d897SJesper Dangaard Brouer goto err_xdp; 82011b7d897SJesper Dangaard Brouer } 821186b3c99SJason Wang *xdp_xmit = true; 82272979a6cSJohn Fastabend if (unlikely(xdp_page != page)) 82372979a6cSJohn Fastabend goto err_xdp; 82456434a01SJohn Fastabend rcu_read_unlock(); 82556434a01SJohn Fastabend goto xdp_xmit; 8263cc81a9aSJason Wang case XDP_REDIRECT: 8273cc81a9aSJason Wang err = xdp_do_redirect(dev, &xdp, xdp_prog); 8283cc81a9aSJason Wang if (err) { 8293cc81a9aSJason Wang if (unlikely(xdp_page != page)) 8303cc81a9aSJason Wang put_page(xdp_page); 8313cc81a9aSJason Wang goto err_xdp; 8323cc81a9aSJason Wang } 8333cc81a9aSJason Wang *xdp_xmit = true; 8343cc81a9aSJason Wang if (unlikely(xdp_page != page)) 8353cc81a9aSJason Wang goto err_xdp; 8363cc81a9aSJason Wang rcu_read_unlock(); 8373cc81a9aSJason Wang goto xdp_xmit; 83856434a01SJohn Fastabend default: 8390354e4d1SJohn Fastabend bpf_warn_invalid_xdp_action(act); 8400354e4d1SJohn Fastabend case XDP_ABORTED: 8410354e4d1SJohn Fastabend trace_xdp_exception(vi->dev, xdp_prog, act); 8420354e4d1SJohn Fastabend case XDP_DROP: 84372979a6cSJohn Fastabend if (unlikely(xdp_page != page)) 84472979a6cSJohn Fastabend __free_pages(xdp_page, 0); 845f600b690SJohn Fastabend goto err_xdp; 846f600b690SJohn Fastabend } 84756434a01SJohn Fastabend } 848f600b690SJohn Fastabend rcu_read_unlock(); 849f600b690SJohn Fastabend 85028b39bc7SJason Wang truesize = mergeable_ctx_to_truesize(ctx); 85128b39bc7SJason Wang if (unlikely(len > truesize)) { 85256da5fd0SDan Carpenter pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 853680557cfSMichael S. Tsirkin dev->name, len, (unsigned long)ctx); 854680557cfSMichael S. Tsirkin dev->stats.rx_length_errors++; 855680557cfSMichael S. Tsirkin goto err_skb; 856680557cfSMichael S. Tsirkin } 85728b39bc7SJason Wang 858f600b690SJohn Fastabend head_skb = page_to_skb(vi, rq, page, offset, len, truesize); 859f600b690SJohn Fastabend curr_skb = head_skb; 8609ab86bbcSShirley Ma 8618fc3b9e9SMichael S. Tsirkin if (unlikely(!curr_skb)) 8628fc3b9e9SMichael S. Tsirkin goto err_skb; 8639ab86bbcSShirley Ma while (--num_buf) { 8648fc3b9e9SMichael S. Tsirkin int num_skb_frags; 8658fc3b9e9SMichael S. Tsirkin 866680557cfSMichael S. Tsirkin buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); 86703e9f8a0SYunjian Wang if (unlikely(!buf)) { 8688fc3b9e9SMichael S. Tsirkin pr_debug("%s: rx error: %d buffers out of %d missing\n", 869fdd819b2SMichael S. Tsirkin dev->name, num_buf, 870012873d0SMichael S. Tsirkin virtio16_to_cpu(vi->vdev, 871012873d0SMichael S. Tsirkin hdr->num_buffers)); 8728fc3b9e9SMichael S. Tsirkin dev->stats.rx_length_errors++; 8738fc3b9e9SMichael S. Tsirkin goto err_buf; 8743f2c31d9SMark McLoughlin } 8758fc3b9e9SMichael S. Tsirkin 8768fc3b9e9SMichael S. Tsirkin page = virt_to_head_page(buf); 87728b39bc7SJason Wang 87828b39bc7SJason Wang truesize = mergeable_ctx_to_truesize(ctx); 87928b39bc7SJason Wang if (unlikely(len > truesize)) { 88056da5fd0SDan Carpenter pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 881680557cfSMichael S. Tsirkin dev->name, len, (unsigned long)ctx); 882680557cfSMichael S. Tsirkin dev->stats.rx_length_errors++; 883680557cfSMichael S. Tsirkin goto err_skb; 884680557cfSMichael S. Tsirkin } 8858fc3b9e9SMichael S. Tsirkin 8868fc3b9e9SMichael S. Tsirkin num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 8872613af0eSMichael Dalton if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 8882613af0eSMichael Dalton struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 8898fc3b9e9SMichael S. Tsirkin 8908fc3b9e9SMichael S. Tsirkin if (unlikely(!nskb)) 8918fc3b9e9SMichael S. Tsirkin goto err_skb; 8922613af0eSMichael Dalton if (curr_skb == head_skb) 8932613af0eSMichael Dalton skb_shinfo(curr_skb)->frag_list = nskb; 8942613af0eSMichael Dalton else 8952613af0eSMichael Dalton curr_skb->next = nskb; 8962613af0eSMichael Dalton curr_skb = nskb; 8972613af0eSMichael Dalton head_skb->truesize += nskb->truesize; 8982613af0eSMichael Dalton num_skb_frags = 0; 8992613af0eSMichael Dalton } 9002613af0eSMichael Dalton if (curr_skb != head_skb) { 9012613af0eSMichael Dalton head_skb->data_len += len; 9022613af0eSMichael Dalton head_skb->len += len; 903fb51879dSMichael Dalton head_skb->truesize += truesize; 9042613af0eSMichael Dalton } 9058fc3b9e9SMichael S. Tsirkin offset = buf - page_address(page); 906ba275241SJason Wang if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 907ba275241SJason Wang put_page(page); 908ba275241SJason Wang skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 909fb51879dSMichael Dalton len, truesize); 910ba275241SJason Wang } else { 9112613af0eSMichael Dalton skb_add_rx_frag(curr_skb, num_skb_frags, page, 912fb51879dSMichael Dalton offset, len, truesize); 913ba275241SJason Wang } 9148fc3b9e9SMichael S. Tsirkin } 9158fc3b9e9SMichael S. Tsirkin 9165377d758SJohannes Berg ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 9178fc3b9e9SMichael S. Tsirkin return head_skb; 9188fc3b9e9SMichael S. Tsirkin 919f600b690SJohn Fastabend err_xdp: 920f600b690SJohn Fastabend rcu_read_unlock(); 9218fc3b9e9SMichael S. Tsirkin err_skb: 9228fc3b9e9SMichael S. Tsirkin put_page(page); 9238fc3b9e9SMichael S. Tsirkin while (--num_buf) { 924680557cfSMichael S. Tsirkin buf = virtqueue_get_buf(rq->vq, &len); 925680557cfSMichael S. Tsirkin if (unlikely(!buf)) { 9268fc3b9e9SMichael S. Tsirkin pr_debug("%s: rx error: %d buffers missing\n", 9278fc3b9e9SMichael S. Tsirkin dev->name, num_buf); 9288fc3b9e9SMichael S. Tsirkin dev->stats.rx_length_errors++; 9298fc3b9e9SMichael S. Tsirkin break; 9308fc3b9e9SMichael S. Tsirkin } 931680557cfSMichael S. Tsirkin page = virt_to_head_page(buf); 9328fc3b9e9SMichael S. Tsirkin put_page(page); 9333f2c31d9SMark McLoughlin } 9348fc3b9e9SMichael S. Tsirkin err_buf: 9358fc3b9e9SMichael S. Tsirkin dev->stats.rx_dropped++; 9368fc3b9e9SMichael S. Tsirkin dev_kfree_skb(head_skb); 93756434a01SJohn Fastabend xdp_xmit: 9388fc3b9e9SMichael S. Tsirkin return NULL; 9399ab86bbcSShirley Ma } 9409ab86bbcSShirley Ma 94161845d20SJason Wang static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 942186b3c99SJason Wang void *buf, unsigned int len, void **ctx, bool *xdp_xmit) 9439ab86bbcSShirley Ma { 944e9d7417bSJason Wang struct net_device *dev = vi->dev; 9459ab86bbcSShirley Ma struct sk_buff *skb; 946012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 94761845d20SJason Wang int ret; 9489ab86bbcSShirley Ma 949bcff3162SMichael S. Tsirkin if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 9509ab86bbcSShirley Ma pr_debug("%s: short packet %i\n", dev->name, len); 9519ab86bbcSShirley Ma dev->stats.rx_length_errors++; 952ab7db917SMichael Dalton if (vi->mergeable_rx_bufs) { 953680557cfSMichael S. Tsirkin put_page(virt_to_head_page(buf)); 954ab7db917SMichael Dalton } else if (vi->big_packets) { 95598bfd23cSMichael Dalton give_pages(rq, buf); 956ab7db917SMichael Dalton } else { 957f6b10209SJason Wang put_page(virt_to_head_page(buf)); 958ab7db917SMichael Dalton } 95961845d20SJason Wang return 0; 9609ab86bbcSShirley Ma } 9619ab86bbcSShirley Ma 962f121159dSMichael S. Tsirkin if (vi->mergeable_rx_bufs) 963186b3c99SJason Wang skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit); 964f121159dSMichael S. Tsirkin else if (vi->big_packets) 965946fa564SMichael S. Tsirkin skb = receive_big(dev, vi, rq, buf, len); 966f121159dSMichael S. Tsirkin else 967186b3c99SJason Wang skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit); 968f121159dSMichael S. Tsirkin 9698fc3b9e9SMichael S. Tsirkin if (unlikely(!skb)) 97061845d20SJason Wang return 0; 9713f2c31d9SMark McLoughlin 9729ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 9733fa2a1dfSstephen hemminger 97461845d20SJason Wang ret = skb->len; 975296f96fcSRusty Russell 976e858fae2SMike Rapoport if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 97710a8d94aSJason Wang skb->ip_summed = CHECKSUM_UNNECESSARY; 978296f96fcSRusty Russell 979e858fae2SMike Rapoport if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 980e858fae2SMike Rapoport virtio_is_little_endian(vi->vdev))) { 981e858fae2SMike Rapoport net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 982e858fae2SMike Rapoport dev->name, hdr->hdr.gso_type, 983fdd819b2SMichael S. Tsirkin hdr->hdr.gso_size); 984296f96fcSRusty Russell goto frame_err; 985296f96fcSRusty Russell } 986296f96fcSRusty Russell 987d1dc06dcSMike Rapoport skb->protocol = eth_type_trans(skb, dev); 988d1dc06dcSMike Rapoport pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 989d1dc06dcSMike Rapoport ntohs(skb->protocol), skb->len, skb->pkt_type); 990d1dc06dcSMike Rapoport 9910fbd050aSEric Dumazet napi_gro_receive(&rq->napi, skb); 99261845d20SJason Wang return ret; 993296f96fcSRusty Russell 994296f96fcSRusty Russell frame_err: 995296f96fcSRusty Russell dev->stats.rx_frame_errors++; 996296f96fcSRusty Russell dev_kfree_skb(skb); 99761845d20SJason Wang return 0; 998296f96fcSRusty Russell } 999296f96fcSRusty Russell 1000192f68cfSJason Wang /* Unlike mergeable buffers, all buffers are allocated to the 1001192f68cfSJason Wang * same size, except for the headroom. For this reason we do 1002192f68cfSJason Wang * not need to use mergeable_len_to_ctx here - it is enough 1003192f68cfSJason Wang * to store the headroom as the context ignoring the truesize. 1004192f68cfSJason Wang */ 1005946fa564SMichael S. Tsirkin static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 1006946fa564SMichael S. Tsirkin gfp_t gfp) 1007296f96fcSRusty Russell { 1008f6b10209SJason Wang struct page_frag *alloc_frag = &rq->alloc_frag; 1009f6b10209SJason Wang char *buf; 10102de2f7f4SJohn Fastabend unsigned int xdp_headroom = virtnet_get_headroom(vi); 1011192f68cfSJason Wang void *ctx = (void *)(unsigned long)xdp_headroom; 1012f6b10209SJason Wang int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; 10139ab86bbcSShirley Ma int err; 10143f2c31d9SMark McLoughlin 1015f6b10209SJason Wang len = SKB_DATA_ALIGN(len) + 1016f6b10209SJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1017f6b10209SJason Wang if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) 10189ab86bbcSShirley Ma return -ENOMEM; 1019296f96fcSRusty Russell 1020f6b10209SJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1021f6b10209SJason Wang get_page(alloc_frag->page); 1022f6b10209SJason Wang alloc_frag->offset += len; 1023f6b10209SJason Wang sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, 1024f6b10209SJason Wang vi->hdr_len + GOOD_PACKET_LEN); 1025192f68cfSJason Wang err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 10269ab86bbcSShirley Ma if (err < 0) 1027f6b10209SJason Wang put_page(virt_to_head_page(buf)); 10289ab86bbcSShirley Ma return err; 102997402b96SHerbert Xu } 103097402b96SHerbert Xu 1031012873d0SMichael S. Tsirkin static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 1032012873d0SMichael S. Tsirkin gfp_t gfp) 10339ab86bbcSShirley Ma { 10349ab86bbcSShirley Ma struct page *first, *list = NULL; 10359ab86bbcSShirley Ma char *p; 10369ab86bbcSShirley Ma int i, err, offset; 1037296f96fcSRusty Russell 1038a5835440SRusty Russell sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); 1039a5835440SRusty Russell 1040e9d7417bSJason Wang /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ 10419ab86bbcSShirley Ma for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 1042e9d7417bSJason Wang first = get_a_page(rq, gfp); 10439ab86bbcSShirley Ma if (!first) { 10449ab86bbcSShirley Ma if (list) 1045e9d7417bSJason Wang give_pages(rq, list); 10469ab86bbcSShirley Ma return -ENOMEM; 1047296f96fcSRusty Russell } 1048e9d7417bSJason Wang sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 10499ab86bbcSShirley Ma 10509ab86bbcSShirley Ma /* chain new page in list head to match sg */ 10519ab86bbcSShirley Ma first->private = (unsigned long)list; 10529ab86bbcSShirley Ma list = first; 10539ab86bbcSShirley Ma } 10549ab86bbcSShirley Ma 1055e9d7417bSJason Wang first = get_a_page(rq, gfp); 10569ab86bbcSShirley Ma if (!first) { 1057e9d7417bSJason Wang give_pages(rq, list); 10589ab86bbcSShirley Ma return -ENOMEM; 10599ab86bbcSShirley Ma } 10609ab86bbcSShirley Ma p = page_address(first); 10619ab86bbcSShirley Ma 1062e9d7417bSJason Wang /* rq->sg[0], rq->sg[1] share the same page */ 1063012873d0SMichael S. Tsirkin /* a separated rq->sg[0] for header - required in case !any_header_sg */ 1064012873d0SMichael S. Tsirkin sg_set_buf(&rq->sg[0], p, vi->hdr_len); 10659ab86bbcSShirley Ma 1066e9d7417bSJason Wang /* rq->sg[1] for data packet, from offset */ 10679ab86bbcSShirley Ma offset = sizeof(struct padded_vnet_hdr); 1068e9d7417bSJason Wang sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 10699ab86bbcSShirley Ma 10709ab86bbcSShirley Ma /* chain first in list head */ 10719ab86bbcSShirley Ma first->private = (unsigned long)list; 10729dc7b9e4SRusty Russell err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, 1073aa989f5eSMichael S. Tsirkin first, gfp); 10749ab86bbcSShirley Ma if (err < 0) 1075e9d7417bSJason Wang give_pages(rq, first); 10769ab86bbcSShirley Ma 10779ab86bbcSShirley Ma return err; 10789ab86bbcSShirley Ma } 10799ab86bbcSShirley Ma 1080d85b758fSMichael S. Tsirkin static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 10813cc81a9aSJason Wang struct ewma_pkt_len *avg_pkt_len, 10823cc81a9aSJason Wang unsigned int room) 10839ab86bbcSShirley Ma { 1084ab7db917SMichael Dalton const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1085fbf28d78SMichael Dalton unsigned int len; 1086fbf28d78SMichael Dalton 10873cc81a9aSJason Wang if (room) 10883cc81a9aSJason Wang return PAGE_SIZE - room; 10893cc81a9aSJason Wang 10905377d758SJohannes Berg len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 1091f0c3192cSMichael S. Tsirkin rq->min_buf_len, PAGE_SIZE - hdr_len); 10923cc81a9aSJason Wang 1093e377fcc8SMichael S. Tsirkin return ALIGN(len, L1_CACHE_BYTES); 1094fbf28d78SMichael Dalton } 1095fbf28d78SMichael Dalton 10962de2f7f4SJohn Fastabend static int add_recvbuf_mergeable(struct virtnet_info *vi, 10972de2f7f4SJohn Fastabend struct receive_queue *rq, gfp_t gfp) 1098fbf28d78SMichael Dalton { 1099fb51879dSMichael Dalton struct page_frag *alloc_frag = &rq->alloc_frag; 11002de2f7f4SJohn Fastabend unsigned int headroom = virtnet_get_headroom(vi); 11013cc81a9aSJason Wang unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 11023cc81a9aSJason Wang unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 1103fb51879dSMichael Dalton char *buf; 1104680557cfSMichael S. Tsirkin void *ctx; 11059ab86bbcSShirley Ma int err; 1106fb51879dSMichael Dalton unsigned int len, hole; 11079ab86bbcSShirley Ma 11083cc81a9aSJason Wang /* Extra tailroom is needed to satisfy XDP's assumption. This 11093cc81a9aSJason Wang * means rx frags coalescing won't work, but consider we've 11103cc81a9aSJason Wang * disabled GSO for XDP, it won't be a big issue. 11113cc81a9aSJason Wang */ 11123cc81a9aSJason Wang len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); 11133cc81a9aSJason Wang if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) 11149ab86bbcSShirley Ma return -ENOMEM; 1115ab7db917SMichael Dalton 1116fb51879dSMichael Dalton buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 11172de2f7f4SJohn Fastabend buf += headroom; /* advance address leaving hole at front of pkt */ 1118fb51879dSMichael Dalton get_page(alloc_frag->page); 11193cc81a9aSJason Wang alloc_frag->offset += len + room; 1120fb51879dSMichael Dalton hole = alloc_frag->size - alloc_frag->offset; 11213cc81a9aSJason Wang if (hole < len + room) { 1122ab7db917SMichael Dalton /* To avoid internal fragmentation, if there is very likely not 1123ab7db917SMichael Dalton * enough space for another buffer, add the remaining space to 11241daa8790SMichael S. Tsirkin * the current buffer. 1125ab7db917SMichael Dalton */ 1126fb51879dSMichael Dalton len += hole; 1127fb51879dSMichael Dalton alloc_frag->offset += hole; 1128fb51879dSMichael Dalton } 11299ab86bbcSShirley Ma 1130fb51879dSMichael Dalton sg_init_one(rq->sg, buf, len); 113129fda25aSDavid S. Miller ctx = mergeable_len_to_ctx(len, headroom); 1132680557cfSMichael S. Tsirkin err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 11339ab86bbcSShirley Ma if (err < 0) 11342613af0eSMichael Dalton put_page(virt_to_head_page(buf)); 11359ab86bbcSShirley Ma 11369ab86bbcSShirley Ma return err; 1137296f96fcSRusty Russell } 1138296f96fcSRusty Russell 1139b2baed69SRusty Russell /* 1140b2baed69SRusty Russell * Returns false if we couldn't fill entirely (OOM). 1141b2baed69SRusty Russell * 1142b2baed69SRusty Russell * Normally run in the receive path, but can also be run from ndo_open 1143b2baed69SRusty Russell * before we're receiving packets, or from refill_work which is 1144b2baed69SRusty Russell * careful to disable receiving (using napi_disable). 1145b2baed69SRusty Russell */ 1146946fa564SMichael S. Tsirkin static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 1147946fa564SMichael S. Tsirkin gfp_t gfp) 11483f2c31d9SMark McLoughlin { 11493f2c31d9SMark McLoughlin int err; 11501788f495SMichael S. Tsirkin bool oom; 11513f2c31d9SMark McLoughlin 11520aea51c3SAmit Shah do { 11539ab86bbcSShirley Ma if (vi->mergeable_rx_bufs) 11542de2f7f4SJohn Fastabend err = add_recvbuf_mergeable(vi, rq, gfp); 11559ab86bbcSShirley Ma else if (vi->big_packets) 1156012873d0SMichael S. Tsirkin err = add_recvbuf_big(vi, rq, gfp); 11579ab86bbcSShirley Ma else 1158946fa564SMichael S. Tsirkin err = add_recvbuf_small(vi, rq, gfp); 11593f2c31d9SMark McLoughlin 11601788f495SMichael S. Tsirkin oom = err == -ENOMEM; 11619ed4cb07SRusty Russell if (err) 11623f2c31d9SMark McLoughlin break; 1163b7dfde95SLinus Torvalds } while (rq->vq->num_free); 1164681daee2SJason Wang virtqueue_kick(rq->vq); 11653161e453SRusty Russell return !oom; 11663f2c31d9SMark McLoughlin } 11673f2c31d9SMark McLoughlin 116818445c4dSRusty Russell static void skb_recv_done(struct virtqueue *rvq) 1169296f96fcSRusty Russell { 1170296f96fcSRusty Russell struct virtnet_info *vi = rvq->vdev->priv; 1171986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 1172e9d7417bSJason Wang 1173e4e8452aSWillem de Bruijn virtqueue_napi_schedule(&rq->napi, rvq); 1174296f96fcSRusty Russell } 1175296f96fcSRusty Russell 1176e4e8452aSWillem de Bruijn static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) 11773e9d08ecSBruce Rogers { 1178e4e8452aSWillem de Bruijn napi_enable(napi); 11793e9d08ecSBruce Rogers 11803e9d08ecSBruce Rogers /* If all buffers were filled by other side before we napi_enabled, we 1181e4e8452aSWillem de Bruijn * won't get another interrupt, so process any outstanding packets now. 1182e4e8452aSWillem de Bruijn * Call local_bh_enable after to trigger softIRQ processing. 1183e4e8452aSWillem de Bruijn */ 1184ec13ee80SMichael S. Tsirkin local_bh_disable(); 1185e4e8452aSWillem de Bruijn virtqueue_napi_schedule(napi, vq); 1186ec13ee80SMichael S. Tsirkin local_bh_enable(); 11873e9d08ecSBruce Rogers } 11883e9d08ecSBruce Rogers 1189b92f1e67SWillem de Bruijn static void virtnet_napi_tx_enable(struct virtnet_info *vi, 1190b92f1e67SWillem de Bruijn struct virtqueue *vq, 1191b92f1e67SWillem de Bruijn struct napi_struct *napi) 1192b92f1e67SWillem de Bruijn { 1193b92f1e67SWillem de Bruijn if (!napi->weight) 1194b92f1e67SWillem de Bruijn return; 1195b92f1e67SWillem de Bruijn 1196b92f1e67SWillem de Bruijn /* Tx napi touches cachelines on the cpu handling tx interrupts. Only 1197b92f1e67SWillem de Bruijn * enable the feature if this is likely affine with the transmit path. 1198b92f1e67SWillem de Bruijn */ 1199b92f1e67SWillem de Bruijn if (!vi->affinity_hint_set) { 1200b92f1e67SWillem de Bruijn napi->weight = 0; 1201b92f1e67SWillem de Bruijn return; 1202b92f1e67SWillem de Bruijn } 1203b92f1e67SWillem de Bruijn 1204b92f1e67SWillem de Bruijn return virtnet_napi_enable(vq, napi); 1205b92f1e67SWillem de Bruijn } 1206b92f1e67SWillem de Bruijn 120778a57b48SWillem de Bruijn static void virtnet_napi_tx_disable(struct napi_struct *napi) 120878a57b48SWillem de Bruijn { 120978a57b48SWillem de Bruijn if (napi->weight) 121078a57b48SWillem de Bruijn napi_disable(napi); 121178a57b48SWillem de Bruijn } 121278a57b48SWillem de Bruijn 12133161e453SRusty Russell static void refill_work(struct work_struct *work) 12143161e453SRusty Russell { 1215e9d7417bSJason Wang struct virtnet_info *vi = 1216e9d7417bSJason Wang container_of(work, struct virtnet_info, refill.work); 12173161e453SRusty Russell bool still_empty; 1218986a4f4dSJason Wang int i; 12193161e453SRusty Russell 122055257d72SSasha Levin for (i = 0; i < vi->curr_queue_pairs; i++) { 1221986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[i]; 1222986a4f4dSJason Wang 1223986a4f4dSJason Wang napi_disable(&rq->napi); 1224946fa564SMichael S. Tsirkin still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 1225e4e8452aSWillem de Bruijn virtnet_napi_enable(rq->vq, &rq->napi); 12263161e453SRusty Russell 12273161e453SRusty Russell /* In theory, this can happen: if we don't get any buffers in 1228986a4f4dSJason Wang * we will *never* try to fill again. 1229986a4f4dSJason Wang */ 12303161e453SRusty Russell if (still_empty) 12313b07e9caSTejun Heo schedule_delayed_work(&vi->refill, HZ/2); 12323161e453SRusty Russell } 1233986a4f4dSJason Wang } 12343161e453SRusty Russell 1235186b3c99SJason Wang static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) 1236296f96fcSRusty Russell { 1237e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 123861845d20SJason Wang unsigned int len, received = 0, bytes = 0; 12399ab86bbcSShirley Ma void *buf; 1240296f96fcSRusty Russell 1241192f68cfSJason Wang if (!vi->big_packets || vi->mergeable_rx_bufs) { 1242680557cfSMichael S. Tsirkin void *ctx; 1243680557cfSMichael S. Tsirkin 1244680557cfSMichael S. Tsirkin while (received < budget && 1245680557cfSMichael S. Tsirkin (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { 1246186b3c99SJason Wang bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit); 1247680557cfSMichael S. Tsirkin received++; 1248680557cfSMichael S. Tsirkin } 1249680557cfSMichael S. Tsirkin } else { 1250296f96fcSRusty Russell while (received < budget && 1251e9d7417bSJason Wang (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 1252186b3c99SJason Wang bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit); 1253296f96fcSRusty Russell received++; 1254296f96fcSRusty Russell } 1255680557cfSMichael S. Tsirkin } 1256296f96fcSRusty Russell 1257be121f46SJason Wang if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { 1258946fa564SMichael S. Tsirkin if (!try_fill_recv(vi, rq, GFP_ATOMIC)) 12593b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 12603161e453SRusty Russell } 1261296f96fcSRusty Russell 1262d7dfc5cfSToshiaki Makita u64_stats_update_begin(&rq->stats.syncp); 1263d7dfc5cfSToshiaki Makita rq->stats.bytes += bytes; 1264d7dfc5cfSToshiaki Makita rq->stats.packets += received; 1265d7dfc5cfSToshiaki Makita u64_stats_update_end(&rq->stats.syncp); 126661845d20SJason Wang 12672ffa7598SJason Wang return received; 12682ffa7598SJason Wang } 12692ffa7598SJason Wang 1270ea7735d9SWillem de Bruijn static void free_old_xmit_skbs(struct send_queue *sq) 1271ea7735d9SWillem de Bruijn { 1272ea7735d9SWillem de Bruijn struct sk_buff *skb; 1273ea7735d9SWillem de Bruijn unsigned int len; 1274ea7735d9SWillem de Bruijn unsigned int packets = 0; 1275ea7735d9SWillem de Bruijn unsigned int bytes = 0; 1276ea7735d9SWillem de Bruijn 1277ea7735d9SWillem de Bruijn while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 1278ea7735d9SWillem de Bruijn pr_debug("Sent skb %p\n", skb); 1279ea7735d9SWillem de Bruijn 1280ea7735d9SWillem de Bruijn bytes += skb->len; 1281ea7735d9SWillem de Bruijn packets++; 1282ea7735d9SWillem de Bruijn 1283dadc0736SEric Dumazet dev_consume_skb_any(skb); 1284ea7735d9SWillem de Bruijn } 1285ea7735d9SWillem de Bruijn 1286ea7735d9SWillem de Bruijn /* Avoid overhead when no packets have been processed 1287ea7735d9SWillem de Bruijn * happens when called speculatively from start_xmit. 1288ea7735d9SWillem de Bruijn */ 1289ea7735d9SWillem de Bruijn if (!packets) 1290ea7735d9SWillem de Bruijn return; 1291ea7735d9SWillem de Bruijn 1292d7dfc5cfSToshiaki Makita u64_stats_update_begin(&sq->stats.syncp); 1293d7dfc5cfSToshiaki Makita sq->stats.bytes += bytes; 1294d7dfc5cfSToshiaki Makita sq->stats.packets += packets; 1295d7dfc5cfSToshiaki Makita u64_stats_update_end(&sq->stats.syncp); 1296ea7735d9SWillem de Bruijn } 1297ea7735d9SWillem de Bruijn 12987b0411efSWillem de Bruijn static void virtnet_poll_cleantx(struct receive_queue *rq) 12997b0411efSWillem de Bruijn { 13007b0411efSWillem de Bruijn struct virtnet_info *vi = rq->vq->vdev->priv; 13017b0411efSWillem de Bruijn unsigned int index = vq2rxq(rq->vq); 13027b0411efSWillem de Bruijn struct send_queue *sq = &vi->sq[index]; 13037b0411efSWillem de Bruijn struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 13047b0411efSWillem de Bruijn 13057b0411efSWillem de Bruijn if (!sq->napi.weight) 13067b0411efSWillem de Bruijn return; 13077b0411efSWillem de Bruijn 13087b0411efSWillem de Bruijn if (__netif_tx_trylock(txq)) { 13097b0411efSWillem de Bruijn free_old_xmit_skbs(sq); 13107b0411efSWillem de Bruijn __netif_tx_unlock(txq); 13117b0411efSWillem de Bruijn } 13127b0411efSWillem de Bruijn 13137b0411efSWillem de Bruijn if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 13147b0411efSWillem de Bruijn netif_tx_wake_queue(txq); 13157b0411efSWillem de Bruijn } 13167b0411efSWillem de Bruijn 13172ffa7598SJason Wang static int virtnet_poll(struct napi_struct *napi, int budget) 13182ffa7598SJason Wang { 13192ffa7598SJason Wang struct receive_queue *rq = 13202ffa7598SJason Wang container_of(napi, struct receive_queue, napi); 13219267c430SJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 13229267c430SJason Wang struct send_queue *sq; 13239267c430SJason Wang unsigned int received, qp; 1324186b3c99SJason Wang bool xdp_xmit = false; 13252ffa7598SJason Wang 13267b0411efSWillem de Bruijn virtnet_poll_cleantx(rq); 13277b0411efSWillem de Bruijn 1328186b3c99SJason Wang received = virtnet_receive(rq, budget, &xdp_xmit); 13292ffa7598SJason Wang 13308329d98eSRusty Russell /* Out of packets? */ 1331e4e8452aSWillem de Bruijn if (received < budget) 1332e4e8452aSWillem de Bruijn virtqueue_napi_complete(napi, rq->vq, received); 1333296f96fcSRusty Russell 13349267c430SJason Wang if (xdp_xmit) { 13359267c430SJason Wang qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + 13369267c430SJason Wang smp_processor_id(); 13379267c430SJason Wang sq = &vi->sq[qp]; 13389267c430SJason Wang virtqueue_kick(sq->vq); 1339186b3c99SJason Wang xdp_do_flush_map(); 13409267c430SJason Wang } 1341186b3c99SJason Wang 1342296f96fcSRusty Russell return received; 1343296f96fcSRusty Russell } 1344296f96fcSRusty Russell 1345986a4f4dSJason Wang static int virtnet_open(struct net_device *dev) 1346986a4f4dSJason Wang { 1347986a4f4dSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1348754b8a21SJesper Dangaard Brouer int i, err; 1349986a4f4dSJason Wang 1350e4166625SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1351e4166625SJason Wang if (i < vi->curr_queue_pairs) 1352986a4f4dSJason Wang /* Make sure we have some buffers: if oom use wq. */ 1353946fa564SMichael S. Tsirkin if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 1354986a4f4dSJason Wang schedule_delayed_work(&vi->refill, 0); 1355754b8a21SJesper Dangaard Brouer 1356754b8a21SJesper Dangaard Brouer err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); 1357754b8a21SJesper Dangaard Brouer if (err < 0) 1358754b8a21SJesper Dangaard Brouer return err; 1359754b8a21SJesper Dangaard Brouer 13608d5d8852SJesper Dangaard Brouer err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, 13618d5d8852SJesper Dangaard Brouer MEM_TYPE_PAGE_SHARED, NULL); 13628d5d8852SJesper Dangaard Brouer if (err < 0) { 13638d5d8852SJesper Dangaard Brouer xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); 13648d5d8852SJesper Dangaard Brouer return err; 13658d5d8852SJesper Dangaard Brouer } 13668d5d8852SJesper Dangaard Brouer 1367e4e8452aSWillem de Bruijn virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 1368b92f1e67SWillem de Bruijn virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); 1369986a4f4dSJason Wang } 1370986a4f4dSJason Wang 1371986a4f4dSJason Wang return 0; 1372986a4f4dSJason Wang } 1373986a4f4dSJason Wang 1374b92f1e67SWillem de Bruijn static int virtnet_poll_tx(struct napi_struct *napi, int budget) 1375b92f1e67SWillem de Bruijn { 1376b92f1e67SWillem de Bruijn struct send_queue *sq = container_of(napi, struct send_queue, napi); 1377b92f1e67SWillem de Bruijn struct virtnet_info *vi = sq->vq->vdev->priv; 1378b92f1e67SWillem de Bruijn struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); 1379b92f1e67SWillem de Bruijn 1380b92f1e67SWillem de Bruijn __netif_tx_lock(txq, raw_smp_processor_id()); 1381b92f1e67SWillem de Bruijn free_old_xmit_skbs(sq); 1382b92f1e67SWillem de Bruijn __netif_tx_unlock(txq); 1383b92f1e67SWillem de Bruijn 1384b92f1e67SWillem de Bruijn virtqueue_napi_complete(napi, sq->vq, 0); 1385b92f1e67SWillem de Bruijn 1386b92f1e67SWillem de Bruijn if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 1387b92f1e67SWillem de Bruijn netif_tx_wake_queue(txq); 1388b92f1e67SWillem de Bruijn 1389b92f1e67SWillem de Bruijn return 0; 1390b92f1e67SWillem de Bruijn } 1391b92f1e67SWillem de Bruijn 1392e9d7417bSJason Wang static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 1393296f96fcSRusty Russell { 1394012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 1395296f96fcSRusty Russell const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 1396e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv; 1397e2fcad58SJason A. Donenfeld int num_sg; 1398012873d0SMichael S. Tsirkin unsigned hdr_len = vi->hdr_len; 1399e7428e95SMichael S. Tsirkin bool can_push; 1400296f96fcSRusty Russell 1401e174961cSJohannes Berg pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 1402e7428e95SMichael S. Tsirkin 1403e7428e95SMichael S. Tsirkin can_push = vi->any_header_sg && 1404e7428e95SMichael S. Tsirkin !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 1405e7428e95SMichael S. Tsirkin !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 1406e7428e95SMichael S. Tsirkin /* Even if we can, don't push here yet as this would skew 1407e7428e95SMichael S. Tsirkin * csum_start offset below. */ 1408e7428e95SMichael S. Tsirkin if (can_push) 1409012873d0SMichael S. Tsirkin hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 1410e7428e95SMichael S. Tsirkin else 1411e7428e95SMichael S. Tsirkin hdr = skb_vnet_hdr(skb); 1412296f96fcSRusty Russell 1413e858fae2SMike Rapoport if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 14146391a448SJason Wang virtio_is_little_endian(vi->vdev), false)) 1415296f96fcSRusty Russell BUG(); 1416296f96fcSRusty Russell 1417e7428e95SMichael S. Tsirkin if (vi->mergeable_rx_bufs) 1418012873d0SMichael S. Tsirkin hdr->num_buffers = 0; 14193f2c31d9SMark McLoughlin 1420547c890cSJason Wang sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 1421e7428e95SMichael S. Tsirkin if (can_push) { 1422e7428e95SMichael S. Tsirkin __skb_push(skb, hdr_len); 1423e7428e95SMichael S. Tsirkin num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 1424e2fcad58SJason A. Donenfeld if (unlikely(num_sg < 0)) 1425e2fcad58SJason A. Donenfeld return num_sg; 1426e7428e95SMichael S. Tsirkin /* Pull header back to avoid skew in tx bytes calculations. */ 1427e7428e95SMichael S. Tsirkin __skb_pull(skb, hdr_len); 1428e7428e95SMichael S. Tsirkin } else { 1429e7428e95SMichael S. Tsirkin sg_set_buf(sq->sg, hdr, hdr_len); 1430e2fcad58SJason A. Donenfeld num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 1431e2fcad58SJason A. Donenfeld if (unlikely(num_sg < 0)) 1432e2fcad58SJason A. Donenfeld return num_sg; 1433e2fcad58SJason A. Donenfeld num_sg++; 1434e7428e95SMichael S. Tsirkin } 14359dc7b9e4SRusty Russell return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 143611a3a154SRusty Russell } 143711a3a154SRusty Russell 1438424efe9cSStephen Hemminger static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 143999ffc696SRusty Russell { 144099ffc696SRusty Russell struct virtnet_info *vi = netdev_priv(dev); 1441986a4f4dSJason Wang int qnum = skb_get_queue_mapping(skb); 1442986a4f4dSJason Wang struct send_queue *sq = &vi->sq[qnum]; 14439ed4cb07SRusty Russell int err; 14444b7fd2e6SMichael S. Tsirkin struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 14454b7fd2e6SMichael S. Tsirkin bool kick = !skb->xmit_more; 1446b92f1e67SWillem de Bruijn bool use_napi = sq->napi.weight; 14472cb9c6baSRusty Russell 14482cb9c6baSRusty Russell /* Free up any pending old buffers before queueing new ones. */ 1449e9d7417bSJason Wang free_old_xmit_skbs(sq); 145099ffc696SRusty Russell 1451bdb12e0dSWillem de Bruijn if (use_napi && kick) 1452bdb12e0dSWillem de Bruijn virtqueue_enable_cb_delayed(sq->vq); 1453bdb12e0dSWillem de Bruijn 1454074c3582SJacob Keller /* timestamp packet in software */ 1455074c3582SJacob Keller skb_tx_timestamp(skb); 1456074c3582SJacob Keller 145703f191baSMichael S. Tsirkin /* Try to transmit */ 1458b7dfde95SLinus Torvalds err = xmit_skb(sq, skb); 145999ffc696SRusty Russell 14609ed4cb07SRusty Russell /* This should not happen! */ 1461681daee2SJason Wang if (unlikely(err)) { 146258eba97dSRusty Russell dev->stats.tx_fifo_errors++; 14632e57b79cSRick Jones if (net_ratelimit()) 146458eba97dSRusty Russell dev_warn(&dev->dev, 1465b7dfde95SLinus Torvalds "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); 146658eba97dSRusty Russell dev->stats.tx_dropped++; 146785e94525SEric W. Biederman dev_kfree_skb_any(skb); 146858eba97dSRusty Russell return NETDEV_TX_OK; 1469296f96fcSRusty Russell } 147003f191baSMichael S. Tsirkin 147148925e37SRusty Russell /* Don't wait up for transmitted skbs to be freed. */ 1472b92f1e67SWillem de Bruijn if (!use_napi) { 147348925e37SRusty Russell skb_orphan(skb); 147448925e37SRusty Russell nf_reset(skb); 1475b92f1e67SWillem de Bruijn } 147648925e37SRusty Russell 147760302ff6SMichael S. Tsirkin /* If running out of space, stop queue to avoid getting packets that we 147860302ff6SMichael S. Tsirkin * are then unable to transmit. 147960302ff6SMichael S. Tsirkin * An alternative would be to force queuing layer to requeue the skb by 148060302ff6SMichael S. Tsirkin * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 148160302ff6SMichael S. Tsirkin * returned in a normal path of operation: it means that driver is not 148260302ff6SMichael S. Tsirkin * maintaining the TX queue stop/start state properly, and causes 148360302ff6SMichael S. Tsirkin * the stack to do a non-trivial amount of useless work. 148460302ff6SMichael S. Tsirkin * Since most packets only take 1 or 2 ring slots, stopping the queue 148560302ff6SMichael S. Tsirkin * early means 16 slots are typically wasted. 1486d631b94eSstephen hemminger */ 1487b7dfde95SLinus Torvalds if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 1488986a4f4dSJason Wang netif_stop_subqueue(dev, qnum); 1489b92f1e67SWillem de Bruijn if (!use_napi && 1490b92f1e67SWillem de Bruijn unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 149148925e37SRusty Russell /* More just got used, free them then recheck. */ 1492b7dfde95SLinus Torvalds free_old_xmit_skbs(sq); 1493b7dfde95SLinus Torvalds if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1494986a4f4dSJason Wang netif_start_subqueue(dev, qnum); 1495e9d7417bSJason Wang virtqueue_disable_cb(sq->vq); 149648925e37SRusty Russell } 149748925e37SRusty Russell } 149848925e37SRusty Russell } 149948925e37SRusty Russell 15004b7fd2e6SMichael S. Tsirkin if (kick || netif_xmit_stopped(txq)) 1501c223a078SDavid S. Miller virtqueue_kick(sq->vq); 15020b725a2cSDavid S. Miller 15030b725a2cSDavid S. Miller return NETDEV_TX_OK; 1504c223a078SDavid S. Miller } 1505c223a078SDavid S. Miller 150640cbfc37SAmos Kong /* 150740cbfc37SAmos Kong * Send command via the control virtqueue and check status. Commands 150840cbfc37SAmos Kong * supported by the hypervisor, as indicated by feature bits, should 1509788a8b6dSstephen hemminger * never fail unless improperly formatted. 151040cbfc37SAmos Kong */ 151140cbfc37SAmos Kong static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 1512d24bae32Sstephen hemminger struct scatterlist *out) 151340cbfc37SAmos Kong { 1514f7bc9594SRusty Russell struct scatterlist *sgs[4], hdr, stat; 1515d24bae32Sstephen hemminger unsigned out_num = 0, tmp; 151640cbfc37SAmos Kong 151740cbfc37SAmos Kong /* Caller should know better */ 1518f7bc9594SRusty Russell BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 151940cbfc37SAmos Kong 152012e57169SMichael S. Tsirkin vi->ctrl->status = ~0; 152112e57169SMichael S. Tsirkin vi->ctrl->hdr.class = class; 152212e57169SMichael S. Tsirkin vi->ctrl->hdr.cmd = cmd; 1523f7bc9594SRusty Russell /* Add header */ 152412e57169SMichael S. Tsirkin sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); 1525f7bc9594SRusty Russell sgs[out_num++] = &hdr; 152640cbfc37SAmos Kong 1527f7bc9594SRusty Russell if (out) 1528f7bc9594SRusty Russell sgs[out_num++] = out; 152940cbfc37SAmos Kong 1530f7bc9594SRusty Russell /* Add return status. */ 153112e57169SMichael S. Tsirkin sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); 1532d24bae32Sstephen hemminger sgs[out_num] = &stat; 153340cbfc37SAmos Kong 1534d24bae32Sstephen hemminger BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1535a7c58146SRusty Russell virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 153640cbfc37SAmos Kong 153767975901SHeinz Graalfs if (unlikely(!virtqueue_kick(vi->cvq))) 153812e57169SMichael S. Tsirkin return vi->ctrl->status == VIRTIO_NET_OK; 153940cbfc37SAmos Kong 154040cbfc37SAmos Kong /* Spin for a response, the kick causes an ioport write, trapping 154140cbfc37SAmos Kong * into the hypervisor, so the request should be handled immediately. 154240cbfc37SAmos Kong */ 1543047b9b94SHeinz Graalfs while (!virtqueue_get_buf(vi->cvq, &tmp) && 1544047b9b94SHeinz Graalfs !virtqueue_is_broken(vi->cvq)) 154540cbfc37SAmos Kong cpu_relax(); 154640cbfc37SAmos Kong 154712e57169SMichael S. Tsirkin return vi->ctrl->status == VIRTIO_NET_OK; 154840cbfc37SAmos Kong } 154940cbfc37SAmos Kong 15509c46f6d4SAlex Williamson static int virtnet_set_mac_address(struct net_device *dev, void *p) 15519c46f6d4SAlex Williamson { 15529c46f6d4SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 15539c46f6d4SAlex Williamson struct virtio_device *vdev = vi->vdev; 1554f2f2c8b4SJiri Pirko int ret; 1555e37e2ff3SAndy Lutomirski struct sockaddr *addr; 15567e58d5aeSAmos Kong struct scatterlist sg; 15579c46f6d4SAlex Williamson 1558801822d1SShyam Saini addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 1559e37e2ff3SAndy Lutomirski if (!addr) 1560e37e2ff3SAndy Lutomirski return -ENOMEM; 1561e37e2ff3SAndy Lutomirski 1562e37e2ff3SAndy Lutomirski ret = eth_prepare_mac_addr_change(dev, addr); 1563f2f2c8b4SJiri Pirko if (ret) 1564e37e2ff3SAndy Lutomirski goto out; 15659c46f6d4SAlex Williamson 15667e58d5aeSAmos Kong if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 15677e58d5aeSAmos Kong sg_init_one(&sg, addr->sa_data, dev->addr_len); 15687e58d5aeSAmos Kong if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1569d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 15707e58d5aeSAmos Kong dev_warn(&vdev->dev, 15717e58d5aeSAmos Kong "Failed to set mac address by vq command.\n"); 1572e37e2ff3SAndy Lutomirski ret = -EINVAL; 1573e37e2ff3SAndy Lutomirski goto out; 15747e58d5aeSAmos Kong } 15757e93a02fSMichael S. Tsirkin } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 15767e93a02fSMichael S. Tsirkin !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1577855e0c52SRusty Russell unsigned int i; 1578855e0c52SRusty Russell 1579855e0c52SRusty Russell /* Naturally, this has an atomicity problem. */ 1580855e0c52SRusty Russell for (i = 0; i < dev->addr_len; i++) 1581855e0c52SRusty Russell virtio_cwrite8(vdev, 1582855e0c52SRusty Russell offsetof(struct virtio_net_config, mac) + 1583855e0c52SRusty Russell i, addr->sa_data[i]); 15847e58d5aeSAmos Kong } 15857e58d5aeSAmos Kong 15867e58d5aeSAmos Kong eth_commit_mac_addr_change(dev, p); 1587e37e2ff3SAndy Lutomirski ret = 0; 15889c46f6d4SAlex Williamson 1589e37e2ff3SAndy Lutomirski out: 1590e37e2ff3SAndy Lutomirski kfree(addr); 1591e37e2ff3SAndy Lutomirski return ret; 15929c46f6d4SAlex Williamson } 15939c46f6d4SAlex Williamson 1594bc1f4470Sstephen hemminger static void virtnet_stats(struct net_device *dev, 15953fa2a1dfSstephen hemminger struct rtnl_link_stats64 *tot) 15963fa2a1dfSstephen hemminger { 15973fa2a1dfSstephen hemminger struct virtnet_info *vi = netdev_priv(dev); 15983fa2a1dfSstephen hemminger unsigned int start; 1599d7dfc5cfSToshiaki Makita int i; 16003fa2a1dfSstephen hemminger 1601d7dfc5cfSToshiaki Makita for (i = 0; i < vi->max_queue_pairs; i++) { 16023fa2a1dfSstephen hemminger u64 tpackets, tbytes, rpackets, rbytes; 1603d7dfc5cfSToshiaki Makita struct receive_queue *rq = &vi->rq[i]; 1604d7dfc5cfSToshiaki Makita struct send_queue *sq = &vi->sq[i]; 16053fa2a1dfSstephen hemminger 16063fa2a1dfSstephen hemminger do { 1607d7dfc5cfSToshiaki Makita start = u64_stats_fetch_begin_irq(&sq->stats.syncp); 1608d7dfc5cfSToshiaki Makita tpackets = sq->stats.packets; 1609d7dfc5cfSToshiaki Makita tbytes = sq->stats.bytes; 1610d7dfc5cfSToshiaki Makita } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); 161183a27052SEric Dumazet 161283a27052SEric Dumazet do { 1613d7dfc5cfSToshiaki Makita start = u64_stats_fetch_begin_irq(&rq->stats.syncp); 1614d7dfc5cfSToshiaki Makita rpackets = rq->stats.packets; 1615d7dfc5cfSToshiaki Makita rbytes = rq->stats.bytes; 1616d7dfc5cfSToshiaki Makita } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); 16173fa2a1dfSstephen hemminger 16183fa2a1dfSstephen hemminger tot->rx_packets += rpackets; 16193fa2a1dfSstephen hemminger tot->tx_packets += tpackets; 16203fa2a1dfSstephen hemminger tot->rx_bytes += rbytes; 16213fa2a1dfSstephen hemminger tot->tx_bytes += tbytes; 16223fa2a1dfSstephen hemminger } 16233fa2a1dfSstephen hemminger 16243fa2a1dfSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 1625021ac8d3SRick Jones tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 16263fa2a1dfSstephen hemminger tot->rx_dropped = dev->stats.rx_dropped; 16273fa2a1dfSstephen hemminger tot->rx_length_errors = dev->stats.rx_length_errors; 16283fa2a1dfSstephen hemminger tot->rx_frame_errors = dev->stats.rx_frame_errors; 16293fa2a1dfSstephen hemminger } 16303fa2a1dfSstephen hemminger 1631da74e89dSAmit Shah #ifdef CONFIG_NET_POLL_CONTROLLER 1632da74e89dSAmit Shah static void virtnet_netpoll(struct net_device *dev) 1633da74e89dSAmit Shah { 1634da74e89dSAmit Shah struct virtnet_info *vi = netdev_priv(dev); 1635986a4f4dSJason Wang int i; 1636da74e89dSAmit Shah 1637986a4f4dSJason Wang for (i = 0; i < vi->curr_queue_pairs; i++) 1638986a4f4dSJason Wang napi_schedule(&vi->rq[i].napi); 1639da74e89dSAmit Shah } 1640da74e89dSAmit Shah #endif 1641da74e89dSAmit Shah 1642586d17c5SJason Wang static void virtnet_ack_link_announce(struct virtnet_info *vi) 1643586d17c5SJason Wang { 1644586d17c5SJason Wang rtnl_lock(); 1645586d17c5SJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 1646d24bae32Sstephen hemminger VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 1647586d17c5SJason Wang dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 1648586d17c5SJason Wang rtnl_unlock(); 1649586d17c5SJason Wang } 1650586d17c5SJason Wang 165147315329SJohn Fastabend static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 1652986a4f4dSJason Wang { 1653986a4f4dSJason Wang struct scatterlist sg; 1654986a4f4dSJason Wang struct net_device *dev = vi->dev; 1655986a4f4dSJason Wang 1656986a4f4dSJason Wang if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 1657986a4f4dSJason Wang return 0; 1658986a4f4dSJason Wang 165912e57169SMichael S. Tsirkin vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 166012e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); 1661986a4f4dSJason Wang 1662986a4f4dSJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 1663d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 1664986a4f4dSJason Wang dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 1665986a4f4dSJason Wang queue_pairs); 1666986a4f4dSJason Wang return -EINVAL; 166755257d72SSasha Levin } else { 1668986a4f4dSJason Wang vi->curr_queue_pairs = queue_pairs; 166935ed159bSJason Wang /* virtnet_open() will refill when device is going to up. */ 167035ed159bSJason Wang if (dev->flags & IFF_UP) 16719b9cd802SJason Wang schedule_delayed_work(&vi->refill, 0); 167255257d72SSasha Levin } 1673986a4f4dSJason Wang 1674986a4f4dSJason Wang return 0; 1675986a4f4dSJason Wang } 1676986a4f4dSJason Wang 167747315329SJohn Fastabend static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 167847315329SJohn Fastabend { 167947315329SJohn Fastabend int err; 168047315329SJohn Fastabend 168147315329SJohn Fastabend rtnl_lock(); 168247315329SJohn Fastabend err = _virtnet_set_queues(vi, queue_pairs); 168347315329SJohn Fastabend rtnl_unlock(); 168447315329SJohn Fastabend return err; 168547315329SJohn Fastabend } 168647315329SJohn Fastabend 1687296f96fcSRusty Russell static int virtnet_close(struct net_device *dev) 1688296f96fcSRusty Russell { 1689296f96fcSRusty Russell struct virtnet_info *vi = netdev_priv(dev); 1690986a4f4dSJason Wang int i; 1691296f96fcSRusty Russell 1692b2baed69SRusty Russell /* Make sure refill_work doesn't re-enable napi! */ 1693b2baed69SRusty Russell cancel_delayed_work_sync(&vi->refill); 1694986a4f4dSJason Wang 1695b92f1e67SWillem de Bruijn for (i = 0; i < vi->max_queue_pairs; i++) { 1696754b8a21SJesper Dangaard Brouer xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); 1697986a4f4dSJason Wang napi_disable(&vi->rq[i].napi); 169878a57b48SWillem de Bruijn virtnet_napi_tx_disable(&vi->sq[i].napi); 1699b92f1e67SWillem de Bruijn } 1700296f96fcSRusty Russell 1701296f96fcSRusty Russell return 0; 1702296f96fcSRusty Russell } 1703296f96fcSRusty Russell 17042af7698eSAlex Williamson static void virtnet_set_rx_mode(struct net_device *dev) 17052af7698eSAlex Williamson { 17062af7698eSAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 1707f565a7c2SAlex Williamson struct scatterlist sg[2]; 1708f565a7c2SAlex Williamson struct virtio_net_ctrl_mac *mac_data; 1709ccffad25SJiri Pirko struct netdev_hw_addr *ha; 171032e7bfc4SJiri Pirko int uc_count; 17114cd24eafSJiri Pirko int mc_count; 1712f565a7c2SAlex Williamson void *buf; 1713f565a7c2SAlex Williamson int i; 17142af7698eSAlex Williamson 1715788a8b6dSstephen hemminger /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 17162af7698eSAlex Williamson if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 17172af7698eSAlex Williamson return; 17182af7698eSAlex Williamson 171912e57169SMichael S. Tsirkin vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); 172012e57169SMichael S. Tsirkin vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 17212af7698eSAlex Williamson 172212e57169SMichael S. Tsirkin sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); 17232af7698eSAlex Williamson 17242af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1725d24bae32Sstephen hemminger VIRTIO_NET_CTRL_RX_PROMISC, sg)) 17262af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 172712e57169SMichael S. Tsirkin vi->ctrl->promisc ? "en" : "dis"); 17282af7698eSAlex Williamson 172912e57169SMichael S. Tsirkin sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); 17302af7698eSAlex Williamson 17312af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1732d24bae32Sstephen hemminger VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 17332af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 173412e57169SMichael S. Tsirkin vi->ctrl->allmulti ? "en" : "dis"); 1735f565a7c2SAlex Williamson 173632e7bfc4SJiri Pirko uc_count = netdev_uc_count(dev); 17374cd24eafSJiri Pirko mc_count = netdev_mc_count(dev); 1738f565a7c2SAlex Williamson /* MAC filter - use one buffer for both lists */ 17394cd24eafSJiri Pirko buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 1740f565a7c2SAlex Williamson (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 17414cd24eafSJiri Pirko mac_data = buf; 1742e68ed8f0SJoe Perches if (!buf) 1743f565a7c2SAlex Williamson return; 1744f565a7c2SAlex Williamson 174523e258e1SAlex Williamson sg_init_table(sg, 2); 174623e258e1SAlex Williamson 1747f565a7c2SAlex Williamson /* Store the unicast list and count in the front of the buffer */ 1748fdd819b2SMichael S. Tsirkin mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 1749ccffad25SJiri Pirko i = 0; 175032e7bfc4SJiri Pirko netdev_for_each_uc_addr(ha, dev) 1751ccffad25SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1752f565a7c2SAlex Williamson 1753f565a7c2SAlex Williamson sg_set_buf(&sg[0], mac_data, 175432e7bfc4SJiri Pirko sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 1755f565a7c2SAlex Williamson 1756f565a7c2SAlex Williamson /* multicast list and count fill the end */ 175732e7bfc4SJiri Pirko mac_data = (void *)&mac_data->macs[uc_count][0]; 1758f565a7c2SAlex Williamson 1759fdd819b2SMichael S. Tsirkin mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 1760567ec874SJiri Pirko i = 0; 176122bedad3SJiri Pirko netdev_for_each_mc_addr(ha, dev) 176222bedad3SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1763f565a7c2SAlex Williamson 1764f565a7c2SAlex Williamson sg_set_buf(&sg[1], mac_data, 17654cd24eafSJiri Pirko sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 1766f565a7c2SAlex Williamson 1767f565a7c2SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1768d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 176999e872aeSThomas Huth dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 1770f565a7c2SAlex Williamson 1771f565a7c2SAlex Williamson kfree(buf); 17722af7698eSAlex Williamson } 17732af7698eSAlex Williamson 177480d5c368SPatrick McHardy static int virtnet_vlan_rx_add_vid(struct net_device *dev, 177580d5c368SPatrick McHardy __be16 proto, u16 vid) 17760bde9569SAlex Williamson { 17770bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 17780bde9569SAlex Williamson struct scatterlist sg; 17790bde9569SAlex Williamson 1780d7fad4c8SMichael S. Tsirkin vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 178112e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 17820bde9569SAlex Williamson 17830bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1784d24bae32Sstephen hemminger VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 17850bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 17868e586137SJiri Pirko return 0; 17870bde9569SAlex Williamson } 17880bde9569SAlex Williamson 178980d5c368SPatrick McHardy static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 179080d5c368SPatrick McHardy __be16 proto, u16 vid) 17910bde9569SAlex Williamson { 17920bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 17930bde9569SAlex Williamson struct scatterlist sg; 17940bde9569SAlex Williamson 1795d7fad4c8SMichael S. Tsirkin vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 179612e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 17970bde9569SAlex Williamson 17980bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1799d24bae32Sstephen hemminger VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 18000bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 18018e586137SJiri Pirko return 0; 18020bde9569SAlex Williamson } 18030bde9569SAlex Williamson 18048898c21cSWanlong Gao static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) 1805986a4f4dSJason Wang { 1806986a4f4dSJason Wang int i; 18078898c21cSWanlong Gao 18088898c21cSWanlong Gao if (vi->affinity_hint_set) { 18098898c21cSWanlong Gao for (i = 0; i < vi->max_queue_pairs; i++) { 18108898c21cSWanlong Gao virtqueue_set_affinity(vi->rq[i].vq, -1); 18118898c21cSWanlong Gao virtqueue_set_affinity(vi->sq[i].vq, -1); 18128898c21cSWanlong Gao } 18138898c21cSWanlong Gao 18148898c21cSWanlong Gao vi->affinity_hint_set = false; 18158898c21cSWanlong Gao } 18168898c21cSWanlong Gao } 18178898c21cSWanlong Gao 18188898c21cSWanlong Gao static void virtnet_set_affinity(struct virtnet_info *vi) 1819986a4f4dSJason Wang { 1820986a4f4dSJason Wang int i; 182147be2479SWanlong Gao int cpu; 1822986a4f4dSJason Wang 1823986a4f4dSJason Wang /* In multiqueue mode, when the number of cpu is equal to the number of 1824986a4f4dSJason Wang * queue pairs, we let the queue pairs to be private to one cpu by 1825986a4f4dSJason Wang * setting the affinity hint to eliminate the contention. 1826986a4f4dSJason Wang */ 18278898c21cSWanlong Gao if (vi->curr_queue_pairs == 1 || 18288898c21cSWanlong Gao vi->max_queue_pairs != num_online_cpus()) { 18298898c21cSWanlong Gao virtnet_clean_affinity(vi, -1); 1830986a4f4dSJason Wang return; 1831986a4f4dSJason Wang } 1832986a4f4dSJason Wang 183347be2479SWanlong Gao i = 0; 183447be2479SWanlong Gao for_each_online_cpu(cpu) { 1835986a4f4dSJason Wang virtqueue_set_affinity(vi->rq[i].vq, cpu); 1836986a4f4dSJason Wang virtqueue_set_affinity(vi->sq[i].vq, cpu); 18379bb8ca86SJason Wang netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); 183847be2479SWanlong Gao i++; 1839986a4f4dSJason Wang } 1840986a4f4dSJason Wang 1841986a4f4dSJason Wang vi->affinity_hint_set = true; 184247be2479SWanlong Gao } 1843986a4f4dSJason Wang 18448017c279SSebastian Andrzej Siewior static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 18458de4b2f3SWanlong Gao { 18468017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 18478017c279SSebastian Andrzej Siewior node); 18488de4b2f3SWanlong Gao virtnet_set_affinity(vi); 18498017c279SSebastian Andrzej Siewior return 0; 18508de4b2f3SWanlong Gao } 18513ab098dfSJason Wang 18528017c279SSebastian Andrzej Siewior static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 18538017c279SSebastian Andrzej Siewior { 18548017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 18558017c279SSebastian Andrzej Siewior node_dead); 18568017c279SSebastian Andrzej Siewior virtnet_set_affinity(vi); 18578017c279SSebastian Andrzej Siewior return 0; 18588017c279SSebastian Andrzej Siewior } 18598017c279SSebastian Andrzej Siewior 18608017c279SSebastian Andrzej Siewior static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 18618017c279SSebastian Andrzej Siewior { 18628017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 18638017c279SSebastian Andrzej Siewior node); 18648017c279SSebastian Andrzej Siewior 18658017c279SSebastian Andrzej Siewior virtnet_clean_affinity(vi, cpu); 18668017c279SSebastian Andrzej Siewior return 0; 18678017c279SSebastian Andrzej Siewior } 18688017c279SSebastian Andrzej Siewior 18698017c279SSebastian Andrzej Siewior static enum cpuhp_state virtionet_online; 18708017c279SSebastian Andrzej Siewior 18718017c279SSebastian Andrzej Siewior static int virtnet_cpu_notif_add(struct virtnet_info *vi) 18728017c279SSebastian Andrzej Siewior { 18738017c279SSebastian Andrzej Siewior int ret; 18748017c279SSebastian Andrzej Siewior 18758017c279SSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 18768017c279SSebastian Andrzej Siewior if (ret) 18778017c279SSebastian Andrzej Siewior return ret; 18788017c279SSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 18798017c279SSebastian Andrzej Siewior &vi->node_dead); 18808017c279SSebastian Andrzej Siewior if (!ret) 18818017c279SSebastian Andrzej Siewior return ret; 18828017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 18838017c279SSebastian Andrzej Siewior return ret; 18848017c279SSebastian Andrzej Siewior } 18858017c279SSebastian Andrzej Siewior 18868017c279SSebastian Andrzej Siewior static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 18878017c279SSebastian Andrzej Siewior { 18888017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 18898017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 18908017c279SSebastian Andrzej Siewior &vi->node_dead); 1891a9ea3fc6SHerbert Xu } 1892a9ea3fc6SHerbert Xu 18938f9f4668SRick Jones static void virtnet_get_ringparam(struct net_device *dev, 18948f9f4668SRick Jones struct ethtool_ringparam *ring) 18958f9f4668SRick Jones { 18968f9f4668SRick Jones struct virtnet_info *vi = netdev_priv(dev); 18978f9f4668SRick Jones 1898986a4f4dSJason Wang ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); 1899986a4f4dSJason Wang ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); 19008f9f4668SRick Jones ring->rx_pending = ring->rx_max_pending; 19018f9f4668SRick Jones ring->tx_pending = ring->tx_max_pending; 19028f9f4668SRick Jones } 19038f9f4668SRick Jones 190466846048SRick Jones 190566846048SRick Jones static void virtnet_get_drvinfo(struct net_device *dev, 190666846048SRick Jones struct ethtool_drvinfo *info) 190766846048SRick Jones { 190866846048SRick Jones struct virtnet_info *vi = netdev_priv(dev); 190966846048SRick Jones struct virtio_device *vdev = vi->vdev; 191066846048SRick Jones 191166846048SRick Jones strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 191266846048SRick Jones strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 191366846048SRick Jones strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 191466846048SRick Jones 191566846048SRick Jones } 191666846048SRick Jones 1917d73bcd2cSJason Wang /* TODO: Eliminate OOO packets during switching */ 1918d73bcd2cSJason Wang static int virtnet_set_channels(struct net_device *dev, 1919d73bcd2cSJason Wang struct ethtool_channels *channels) 1920d73bcd2cSJason Wang { 1921d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1922d73bcd2cSJason Wang u16 queue_pairs = channels->combined_count; 1923d73bcd2cSJason Wang int err; 1924d73bcd2cSJason Wang 1925d73bcd2cSJason Wang /* We don't support separate rx/tx channels. 1926d73bcd2cSJason Wang * We don't allow setting 'other' channels. 1927d73bcd2cSJason Wang */ 1928d73bcd2cSJason Wang if (channels->rx_count || channels->tx_count || channels->other_count) 1929d73bcd2cSJason Wang return -EINVAL; 1930d73bcd2cSJason Wang 1931c18e9cd6SAmos Kong if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 1932d73bcd2cSJason Wang return -EINVAL; 1933d73bcd2cSJason Wang 1934f600b690SJohn Fastabend /* For now we don't support modifying channels while XDP is loaded 1935f600b690SJohn Fastabend * also when XDP is loaded all RX queues have XDP programs so we only 1936f600b690SJohn Fastabend * need to check a single RX queue. 1937f600b690SJohn Fastabend */ 1938f600b690SJohn Fastabend if (vi->rq[0].xdp_prog) 1939f600b690SJohn Fastabend return -EINVAL; 1940f600b690SJohn Fastabend 194147be2479SWanlong Gao get_online_cpus(); 194247315329SJohn Fastabend err = _virtnet_set_queues(vi, queue_pairs); 1943d73bcd2cSJason Wang if (!err) { 1944d73bcd2cSJason Wang netif_set_real_num_tx_queues(dev, queue_pairs); 1945d73bcd2cSJason Wang netif_set_real_num_rx_queues(dev, queue_pairs); 1946d73bcd2cSJason Wang 19478898c21cSWanlong Gao virtnet_set_affinity(vi); 1948d73bcd2cSJason Wang } 194947be2479SWanlong Gao put_online_cpus(); 1950d73bcd2cSJason Wang 1951d73bcd2cSJason Wang return err; 1952d73bcd2cSJason Wang } 1953d73bcd2cSJason Wang 1954d7dfc5cfSToshiaki Makita static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) 1955d7dfc5cfSToshiaki Makita { 1956d7dfc5cfSToshiaki Makita struct virtnet_info *vi = netdev_priv(dev); 1957d7dfc5cfSToshiaki Makita char *p = (char *)data; 1958d7dfc5cfSToshiaki Makita unsigned int i, j; 1959d7dfc5cfSToshiaki Makita 1960d7dfc5cfSToshiaki Makita switch (stringset) { 1961d7dfc5cfSToshiaki Makita case ETH_SS_STATS: 1962d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) { 1963d7dfc5cfSToshiaki Makita for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { 1964d7dfc5cfSToshiaki Makita snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s", 1965d7dfc5cfSToshiaki Makita i, virtnet_rq_stats_desc[j].desc); 1966d7dfc5cfSToshiaki Makita p += ETH_GSTRING_LEN; 1967d7dfc5cfSToshiaki Makita } 1968d7dfc5cfSToshiaki Makita } 1969d7dfc5cfSToshiaki Makita 1970d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) { 1971d7dfc5cfSToshiaki Makita for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { 1972d7dfc5cfSToshiaki Makita snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s", 1973d7dfc5cfSToshiaki Makita i, virtnet_sq_stats_desc[j].desc); 1974d7dfc5cfSToshiaki Makita p += ETH_GSTRING_LEN; 1975d7dfc5cfSToshiaki Makita } 1976d7dfc5cfSToshiaki Makita } 1977d7dfc5cfSToshiaki Makita break; 1978d7dfc5cfSToshiaki Makita } 1979d7dfc5cfSToshiaki Makita } 1980d7dfc5cfSToshiaki Makita 1981d7dfc5cfSToshiaki Makita static int virtnet_get_sset_count(struct net_device *dev, int sset) 1982d7dfc5cfSToshiaki Makita { 1983d7dfc5cfSToshiaki Makita struct virtnet_info *vi = netdev_priv(dev); 1984d7dfc5cfSToshiaki Makita 1985d7dfc5cfSToshiaki Makita switch (sset) { 1986d7dfc5cfSToshiaki Makita case ETH_SS_STATS: 1987d7dfc5cfSToshiaki Makita return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + 1988d7dfc5cfSToshiaki Makita VIRTNET_SQ_STATS_LEN); 1989d7dfc5cfSToshiaki Makita default: 1990d7dfc5cfSToshiaki Makita return -EOPNOTSUPP; 1991d7dfc5cfSToshiaki Makita } 1992d7dfc5cfSToshiaki Makita } 1993d7dfc5cfSToshiaki Makita 1994d7dfc5cfSToshiaki Makita static void virtnet_get_ethtool_stats(struct net_device *dev, 1995d7dfc5cfSToshiaki Makita struct ethtool_stats *stats, u64 *data) 1996d7dfc5cfSToshiaki Makita { 1997d7dfc5cfSToshiaki Makita struct virtnet_info *vi = netdev_priv(dev); 1998d7dfc5cfSToshiaki Makita unsigned int idx = 0, start, i, j; 1999d7dfc5cfSToshiaki Makita const u8 *stats_base; 2000d7dfc5cfSToshiaki Makita size_t offset; 2001d7dfc5cfSToshiaki Makita 2002d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) { 2003d7dfc5cfSToshiaki Makita struct receive_queue *rq = &vi->rq[i]; 2004d7dfc5cfSToshiaki Makita 2005d7dfc5cfSToshiaki Makita stats_base = (u8 *)&rq->stats; 2006d7dfc5cfSToshiaki Makita do { 2007d7dfc5cfSToshiaki Makita start = u64_stats_fetch_begin_irq(&rq->stats.syncp); 2008d7dfc5cfSToshiaki Makita for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { 2009d7dfc5cfSToshiaki Makita offset = virtnet_rq_stats_desc[j].offset; 2010d7dfc5cfSToshiaki Makita data[idx + j] = *(u64 *)(stats_base + offset); 2011d7dfc5cfSToshiaki Makita } 2012d7dfc5cfSToshiaki Makita } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); 2013d7dfc5cfSToshiaki Makita idx += VIRTNET_RQ_STATS_LEN; 2014d7dfc5cfSToshiaki Makita } 2015d7dfc5cfSToshiaki Makita 2016d7dfc5cfSToshiaki Makita for (i = 0; i < vi->curr_queue_pairs; i++) { 2017d7dfc5cfSToshiaki Makita struct send_queue *sq = &vi->sq[i]; 2018d7dfc5cfSToshiaki Makita 2019d7dfc5cfSToshiaki Makita stats_base = (u8 *)&sq->stats; 2020d7dfc5cfSToshiaki Makita do { 2021d7dfc5cfSToshiaki Makita start = u64_stats_fetch_begin_irq(&sq->stats.syncp); 2022d7dfc5cfSToshiaki Makita for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { 2023d7dfc5cfSToshiaki Makita offset = virtnet_sq_stats_desc[j].offset; 2024d7dfc5cfSToshiaki Makita data[idx + j] = *(u64 *)(stats_base + offset); 2025d7dfc5cfSToshiaki Makita } 2026d7dfc5cfSToshiaki Makita } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); 2027d7dfc5cfSToshiaki Makita idx += VIRTNET_SQ_STATS_LEN; 2028d7dfc5cfSToshiaki Makita } 2029d7dfc5cfSToshiaki Makita } 2030d7dfc5cfSToshiaki Makita 2031d73bcd2cSJason Wang static void virtnet_get_channels(struct net_device *dev, 2032d73bcd2cSJason Wang struct ethtool_channels *channels) 2033d73bcd2cSJason Wang { 2034d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev); 2035d73bcd2cSJason Wang 2036d73bcd2cSJason Wang channels->combined_count = vi->curr_queue_pairs; 2037d73bcd2cSJason Wang channels->max_combined = vi->max_queue_pairs; 2038d73bcd2cSJason Wang channels->max_other = 0; 2039d73bcd2cSJason Wang channels->rx_count = 0; 2040d73bcd2cSJason Wang channels->tx_count = 0; 2041d73bcd2cSJason Wang channels->other_count = 0; 2042d73bcd2cSJason Wang } 2043d73bcd2cSJason Wang 204416032be5SNikolay Aleksandrov /* Check if the user is trying to change anything besides speed/duplex */ 2045ebb6b4b1SPhilippe Reynes static bool 2046ebb6b4b1SPhilippe Reynes virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd) 204716032be5SNikolay Aleksandrov { 2048ebb6b4b1SPhilippe Reynes struct ethtool_link_ksettings diff1 = *cmd; 2049ebb6b4b1SPhilippe Reynes struct ethtool_link_ksettings diff2 = {}; 205016032be5SNikolay Aleksandrov 20510cf3ace9SNikolay Aleksandrov /* cmd is always set so we need to clear it, validate the port type 20520cf3ace9SNikolay Aleksandrov * and also without autonegotiation we can ignore advertising 20530cf3ace9SNikolay Aleksandrov */ 2054ebb6b4b1SPhilippe Reynes diff1.base.speed = 0; 2055ebb6b4b1SPhilippe Reynes diff2.base.port = PORT_OTHER; 2056ebb6b4b1SPhilippe Reynes ethtool_link_ksettings_zero_link_mode(&diff1, advertising); 2057ebb6b4b1SPhilippe Reynes diff1.base.duplex = 0; 2058ebb6b4b1SPhilippe Reynes diff1.base.cmd = 0; 2059ebb6b4b1SPhilippe Reynes diff1.base.link_mode_masks_nwords = 0; 206016032be5SNikolay Aleksandrov 2061ebb6b4b1SPhilippe Reynes return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) && 2062ebb6b4b1SPhilippe Reynes bitmap_empty(diff1.link_modes.supported, 2063ebb6b4b1SPhilippe Reynes __ETHTOOL_LINK_MODE_MASK_NBITS) && 2064ebb6b4b1SPhilippe Reynes bitmap_empty(diff1.link_modes.advertising, 2065ebb6b4b1SPhilippe Reynes __ETHTOOL_LINK_MODE_MASK_NBITS) && 2066ebb6b4b1SPhilippe Reynes bitmap_empty(diff1.link_modes.lp_advertising, 2067ebb6b4b1SPhilippe Reynes __ETHTOOL_LINK_MODE_MASK_NBITS); 206816032be5SNikolay Aleksandrov } 206916032be5SNikolay Aleksandrov 2070ebb6b4b1SPhilippe Reynes static int virtnet_set_link_ksettings(struct net_device *dev, 2071ebb6b4b1SPhilippe Reynes const struct ethtool_link_ksettings *cmd) 207216032be5SNikolay Aleksandrov { 207316032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 207416032be5SNikolay Aleksandrov u32 speed; 207516032be5SNikolay Aleksandrov 2076ebb6b4b1SPhilippe Reynes speed = cmd->base.speed; 207716032be5SNikolay Aleksandrov /* don't allow custom speed and duplex */ 207816032be5SNikolay Aleksandrov if (!ethtool_validate_speed(speed) || 2079ebb6b4b1SPhilippe Reynes !ethtool_validate_duplex(cmd->base.duplex) || 208016032be5SNikolay Aleksandrov !virtnet_validate_ethtool_cmd(cmd)) 208116032be5SNikolay Aleksandrov return -EINVAL; 208216032be5SNikolay Aleksandrov vi->speed = speed; 2083ebb6b4b1SPhilippe Reynes vi->duplex = cmd->base.duplex; 208416032be5SNikolay Aleksandrov 208516032be5SNikolay Aleksandrov return 0; 208616032be5SNikolay Aleksandrov } 208716032be5SNikolay Aleksandrov 2088ebb6b4b1SPhilippe Reynes static int virtnet_get_link_ksettings(struct net_device *dev, 2089ebb6b4b1SPhilippe Reynes struct ethtool_link_ksettings *cmd) 209016032be5SNikolay Aleksandrov { 209116032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 209216032be5SNikolay Aleksandrov 2093ebb6b4b1SPhilippe Reynes cmd->base.speed = vi->speed; 2094ebb6b4b1SPhilippe Reynes cmd->base.duplex = vi->duplex; 2095ebb6b4b1SPhilippe Reynes cmd->base.port = PORT_OTHER; 209616032be5SNikolay Aleksandrov 209716032be5SNikolay Aleksandrov return 0; 209816032be5SNikolay Aleksandrov } 209916032be5SNikolay Aleksandrov 210016032be5SNikolay Aleksandrov static void virtnet_init_settings(struct net_device *dev) 210116032be5SNikolay Aleksandrov { 210216032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 210316032be5SNikolay Aleksandrov 210416032be5SNikolay Aleksandrov vi->speed = SPEED_UNKNOWN; 210516032be5SNikolay Aleksandrov vi->duplex = DUPLEX_UNKNOWN; 210616032be5SNikolay Aleksandrov } 210716032be5SNikolay Aleksandrov 2108faa9b39fSJason Baron static void virtnet_update_settings(struct virtnet_info *vi) 2109faa9b39fSJason Baron { 2110faa9b39fSJason Baron u32 speed; 2111faa9b39fSJason Baron u8 duplex; 2112faa9b39fSJason Baron 2113faa9b39fSJason Baron if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) 2114faa9b39fSJason Baron return; 2115faa9b39fSJason Baron 2116faa9b39fSJason Baron speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, 2117faa9b39fSJason Baron speed)); 2118faa9b39fSJason Baron if (ethtool_validate_speed(speed)) 2119faa9b39fSJason Baron vi->speed = speed; 2120faa9b39fSJason Baron duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config, 2121faa9b39fSJason Baron duplex)); 2122faa9b39fSJason Baron if (ethtool_validate_duplex(duplex)) 2123faa9b39fSJason Baron vi->duplex = duplex; 2124faa9b39fSJason Baron } 2125faa9b39fSJason Baron 21260fc0b732SStephen Hemminger static const struct ethtool_ops virtnet_ethtool_ops = { 212766846048SRick Jones .get_drvinfo = virtnet_get_drvinfo, 21289f4d26d0SMark McLoughlin .get_link = ethtool_op_get_link, 21298f9f4668SRick Jones .get_ringparam = virtnet_get_ringparam, 2130d7dfc5cfSToshiaki Makita .get_strings = virtnet_get_strings, 2131d7dfc5cfSToshiaki Makita .get_sset_count = virtnet_get_sset_count, 2132d7dfc5cfSToshiaki Makita .get_ethtool_stats = virtnet_get_ethtool_stats, 2133d73bcd2cSJason Wang .set_channels = virtnet_set_channels, 2134d73bcd2cSJason Wang .get_channels = virtnet_get_channels, 2135074c3582SJacob Keller .get_ts_info = ethtool_op_get_ts_info, 2136ebb6b4b1SPhilippe Reynes .get_link_ksettings = virtnet_get_link_ksettings, 2137ebb6b4b1SPhilippe Reynes .set_link_ksettings = virtnet_set_link_ksettings, 2138a9ea3fc6SHerbert Xu }; 2139a9ea3fc6SHerbert Xu 21409fe7bfceSJohn Fastabend static void virtnet_freeze_down(struct virtio_device *vdev) 21419fe7bfceSJohn Fastabend { 21429fe7bfceSJohn Fastabend struct virtnet_info *vi = vdev->priv; 21439fe7bfceSJohn Fastabend int i; 21449fe7bfceSJohn Fastabend 21459fe7bfceSJohn Fastabend /* Make sure no work handler is accessing the device */ 21469fe7bfceSJohn Fastabend flush_work(&vi->config_work); 21479fe7bfceSJohn Fastabend 21489fe7bfceSJohn Fastabend netif_device_detach(vi->dev); 2149713a98d9SJason Wang netif_tx_disable(vi->dev); 21509fe7bfceSJohn Fastabend cancel_delayed_work_sync(&vi->refill); 21519fe7bfceSJohn Fastabend 21529fe7bfceSJohn Fastabend if (netif_running(vi->dev)) { 2153b92f1e67SWillem de Bruijn for (i = 0; i < vi->max_queue_pairs; i++) { 21549fe7bfceSJohn Fastabend napi_disable(&vi->rq[i].napi); 215578a57b48SWillem de Bruijn virtnet_napi_tx_disable(&vi->sq[i].napi); 2156b92f1e67SWillem de Bruijn } 21579fe7bfceSJohn Fastabend } 21589fe7bfceSJohn Fastabend } 21599fe7bfceSJohn Fastabend 21609fe7bfceSJohn Fastabend static int init_vqs(struct virtnet_info *vi); 21619fe7bfceSJohn Fastabend 21629fe7bfceSJohn Fastabend static int virtnet_restore_up(struct virtio_device *vdev) 21639fe7bfceSJohn Fastabend { 21649fe7bfceSJohn Fastabend struct virtnet_info *vi = vdev->priv; 21659fe7bfceSJohn Fastabend int err, i; 21669fe7bfceSJohn Fastabend 21679fe7bfceSJohn Fastabend err = init_vqs(vi); 21689fe7bfceSJohn Fastabend if (err) 21699fe7bfceSJohn Fastabend return err; 21709fe7bfceSJohn Fastabend 21719fe7bfceSJohn Fastabend virtio_device_ready(vdev); 21729fe7bfceSJohn Fastabend 21739fe7bfceSJohn Fastabend if (netif_running(vi->dev)) { 21749fe7bfceSJohn Fastabend for (i = 0; i < vi->curr_queue_pairs; i++) 21759fe7bfceSJohn Fastabend if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 21769fe7bfceSJohn Fastabend schedule_delayed_work(&vi->refill, 0); 21779fe7bfceSJohn Fastabend 2178b92f1e67SWillem de Bruijn for (i = 0; i < vi->max_queue_pairs; i++) { 2179e4e8452aSWillem de Bruijn virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2180b92f1e67SWillem de Bruijn virtnet_napi_tx_enable(vi, vi->sq[i].vq, 2181b92f1e67SWillem de Bruijn &vi->sq[i].napi); 2182b92f1e67SWillem de Bruijn } 21839fe7bfceSJohn Fastabend } 21849fe7bfceSJohn Fastabend 21859fe7bfceSJohn Fastabend netif_device_attach(vi->dev); 21869fe7bfceSJohn Fastabend return err; 21879fe7bfceSJohn Fastabend } 21889fe7bfceSJohn Fastabend 21893f93522fSJason Wang static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 21903f93522fSJason Wang { 21913f93522fSJason Wang struct scatterlist sg; 219212e57169SMichael S. Tsirkin vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); 21933f93522fSJason Wang 219412e57169SMichael S. Tsirkin sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); 21953f93522fSJason Wang 21963f93522fSJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 21973f93522fSJason Wang VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { 21983f93522fSJason Wang dev_warn(&vi->dev->dev, "Fail to set guest offload. \n"); 21993f93522fSJason Wang return -EINVAL; 22003f93522fSJason Wang } 22013f93522fSJason Wang 22023f93522fSJason Wang return 0; 22033f93522fSJason Wang } 22043f93522fSJason Wang 22053f93522fSJason Wang static int virtnet_clear_guest_offloads(struct virtnet_info *vi) 22063f93522fSJason Wang { 22073f93522fSJason Wang u64 offloads = 0; 22083f93522fSJason Wang 22093f93522fSJason Wang if (!vi->guest_offloads) 22103f93522fSJason Wang return 0; 22113f93522fSJason Wang 22123f93522fSJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) 22133f93522fSJason Wang offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; 22143f93522fSJason Wang 22153f93522fSJason Wang return virtnet_set_guest_offloads(vi, offloads); 22163f93522fSJason Wang } 22173f93522fSJason Wang 22183f93522fSJason Wang static int virtnet_restore_guest_offloads(struct virtnet_info *vi) 22193f93522fSJason Wang { 22203f93522fSJason Wang u64 offloads = vi->guest_offloads; 22213f93522fSJason Wang 22223f93522fSJason Wang if (!vi->guest_offloads) 22233f93522fSJason Wang return 0; 22243f93522fSJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) 22253f93522fSJason Wang offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; 22263f93522fSJason Wang 22273f93522fSJason Wang return virtnet_set_guest_offloads(vi, offloads); 22283f93522fSJason Wang } 22293f93522fSJason Wang 22309861ce03SJakub Kicinski static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, 22319861ce03SJakub Kicinski struct netlink_ext_ack *extack) 2232f600b690SJohn Fastabend { 2233f600b690SJohn Fastabend unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); 2234f600b690SJohn Fastabend struct virtnet_info *vi = netdev_priv(dev); 2235f600b690SJohn Fastabend struct bpf_prog *old_prog; 2236017b29c3SJason Wang u16 xdp_qp = 0, curr_qp; 2237672aafd5SJohn Fastabend int i, err; 2238f600b690SJohn Fastabend 22393f93522fSJason Wang if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) 22403f93522fSJason Wang && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 224192502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 224292502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 22433f93522fSJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { 22444d463c4dSDaniel Borkmann NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); 2245f600b690SJohn Fastabend return -EOPNOTSUPP; 2246f600b690SJohn Fastabend } 2247f600b690SJohn Fastabend 2248f600b690SJohn Fastabend if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 22494d463c4dSDaniel Borkmann NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); 2250f600b690SJohn Fastabend return -EINVAL; 2251f600b690SJohn Fastabend } 2252f600b690SJohn Fastabend 2253f600b690SJohn Fastabend if (dev->mtu > max_sz) { 22544d463c4dSDaniel Borkmann NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); 2255f600b690SJohn Fastabend netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); 2256f600b690SJohn Fastabend return -EINVAL; 2257f600b690SJohn Fastabend } 2258f600b690SJohn Fastabend 2259672aafd5SJohn Fastabend curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 2260672aafd5SJohn Fastabend if (prog) 2261672aafd5SJohn Fastabend xdp_qp = nr_cpu_ids; 2262672aafd5SJohn Fastabend 2263672aafd5SJohn Fastabend /* XDP requires extra queues for XDP_TX */ 2264672aafd5SJohn Fastabend if (curr_qp + xdp_qp > vi->max_queue_pairs) { 22654d463c4dSDaniel Borkmann NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available"); 2266672aafd5SJohn Fastabend netdev_warn(dev, "request %i queues but max is %i\n", 2267672aafd5SJohn Fastabend curr_qp + xdp_qp, vi->max_queue_pairs); 2268672aafd5SJohn Fastabend return -ENOMEM; 2269672aafd5SJohn Fastabend } 2270672aafd5SJohn Fastabend 22712de2f7f4SJohn Fastabend if (prog) { 22722de2f7f4SJohn Fastabend prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); 22732de2f7f4SJohn Fastabend if (IS_ERR(prog)) 22742de2f7f4SJohn Fastabend return PTR_ERR(prog); 22752de2f7f4SJohn Fastabend } 22762de2f7f4SJohn Fastabend 22774941d472SJason Wang /* Make sure NAPI is not using any XDP TX queues for RX. */ 22784e09ff53SJason Wang if (netif_running(dev)) 22794941d472SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 22804941d472SJason Wang napi_disable(&vi->rq[i].napi); 22812de2f7f4SJohn Fastabend 2282672aafd5SJohn Fastabend netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 22834941d472SJason Wang err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 22844941d472SJason Wang if (err) 22854941d472SJason Wang goto err; 22864941d472SJason Wang vi->xdp_queue_pairs = xdp_qp; 2287f600b690SJohn Fastabend 2288f600b690SJohn Fastabend for (i = 0; i < vi->max_queue_pairs; i++) { 2289f600b690SJohn Fastabend old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 2290f600b690SJohn Fastabend rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 22913f93522fSJason Wang if (i == 0) { 22923f93522fSJason Wang if (!old_prog) 22933f93522fSJason Wang virtnet_clear_guest_offloads(vi); 22943f93522fSJason Wang if (!prog) 22953f93522fSJason Wang virtnet_restore_guest_offloads(vi); 22963f93522fSJason Wang } 2297f600b690SJohn Fastabend if (old_prog) 2298f600b690SJohn Fastabend bpf_prog_put(old_prog); 22994e09ff53SJason Wang if (netif_running(dev)) 23004941d472SJason Wang virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2301f600b690SJohn Fastabend } 2302f600b690SJohn Fastabend 2303f600b690SJohn Fastabend return 0; 23042de2f7f4SJohn Fastabend 23054941d472SJason Wang err: 23064941d472SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 23074941d472SJason Wang virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 23082de2f7f4SJohn Fastabend if (prog) 23092de2f7f4SJohn Fastabend bpf_prog_sub(prog, vi->max_queue_pairs - 1); 23102de2f7f4SJohn Fastabend return err; 2311f600b690SJohn Fastabend } 2312f600b690SJohn Fastabend 23135b0e6629SMartin KaFai Lau static u32 virtnet_xdp_query(struct net_device *dev) 2314f600b690SJohn Fastabend { 2315f600b690SJohn Fastabend struct virtnet_info *vi = netdev_priv(dev); 23165b0e6629SMartin KaFai Lau const struct bpf_prog *xdp_prog; 2317f600b690SJohn Fastabend int i; 2318f600b690SJohn Fastabend 2319f600b690SJohn Fastabend for (i = 0; i < vi->max_queue_pairs; i++) { 23205b0e6629SMartin KaFai Lau xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog); 23215b0e6629SMartin KaFai Lau if (xdp_prog) 23225b0e6629SMartin KaFai Lau return xdp_prog->aux->id; 2323f600b690SJohn Fastabend } 23245b0e6629SMartin KaFai Lau return 0; 2325f600b690SJohn Fastabend } 2326f600b690SJohn Fastabend 2327f4e63525SJakub Kicinski static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2328f600b690SJohn Fastabend { 2329f600b690SJohn Fastabend switch (xdp->command) { 2330f600b690SJohn Fastabend case XDP_SETUP_PROG: 23319861ce03SJakub Kicinski return virtnet_xdp_set(dev, xdp->prog, xdp->extack); 2332f600b690SJohn Fastabend case XDP_QUERY_PROG: 23335b0e6629SMartin KaFai Lau xdp->prog_id = virtnet_xdp_query(dev); 23345b0e6629SMartin KaFai Lau xdp->prog_attached = !!xdp->prog_id; 2335f600b690SJohn Fastabend return 0; 2336f600b690SJohn Fastabend default: 2337f600b690SJohn Fastabend return -EINVAL; 2338f600b690SJohn Fastabend } 2339f600b690SJohn Fastabend } 2340f600b690SJohn Fastabend 234176288b4eSStephen Hemminger static const struct net_device_ops virtnet_netdev = { 234276288b4eSStephen Hemminger .ndo_open = virtnet_open, 234376288b4eSStephen Hemminger .ndo_stop = virtnet_close, 234476288b4eSStephen Hemminger .ndo_start_xmit = start_xmit, 234576288b4eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 23469c46f6d4SAlex Williamson .ndo_set_mac_address = virtnet_set_mac_address, 23472af7698eSAlex Williamson .ndo_set_rx_mode = virtnet_set_rx_mode, 23483fa2a1dfSstephen hemminger .ndo_get_stats64 = virtnet_stats, 23491824a989SAlex Williamson .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 23501824a989SAlex Williamson .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 235176288b4eSStephen Hemminger #ifdef CONFIG_NET_POLL_CONTROLLER 235276288b4eSStephen Hemminger .ndo_poll_controller = virtnet_netpoll, 235376288b4eSStephen Hemminger #endif 2354f4e63525SJakub Kicinski .ndo_bpf = virtnet_xdp, 2355186b3c99SJason Wang .ndo_xdp_xmit = virtnet_xdp_xmit, 2356186b3c99SJason Wang .ndo_xdp_flush = virtnet_xdp_flush, 23572836b4f2SVlad Yasevich .ndo_features_check = passthru_features_check, 235876288b4eSStephen Hemminger }; 235976288b4eSStephen Hemminger 2360586d17c5SJason Wang static void virtnet_config_changed_work(struct work_struct *work) 23619f4d26d0SMark McLoughlin { 2362586d17c5SJason Wang struct virtnet_info *vi = 2363586d17c5SJason Wang container_of(work, struct virtnet_info, config_work); 23649f4d26d0SMark McLoughlin u16 v; 23659f4d26d0SMark McLoughlin 2366855e0c52SRusty Russell if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 2367855e0c52SRusty Russell struct virtio_net_config, status, &v) < 0) 2368507613bfSMichael S. Tsirkin return; 2369586d17c5SJason Wang 2370586d17c5SJason Wang if (v & VIRTIO_NET_S_ANNOUNCE) { 2371ee89bab1SAmerigo Wang netdev_notify_peers(vi->dev); 2372586d17c5SJason Wang virtnet_ack_link_announce(vi); 2373586d17c5SJason Wang } 23749f4d26d0SMark McLoughlin 23759f4d26d0SMark McLoughlin /* Ignore unknown (future) status bits */ 23769f4d26d0SMark McLoughlin v &= VIRTIO_NET_S_LINK_UP; 23779f4d26d0SMark McLoughlin 23789f4d26d0SMark McLoughlin if (vi->status == v) 2379507613bfSMichael S. Tsirkin return; 23809f4d26d0SMark McLoughlin 23819f4d26d0SMark McLoughlin vi->status = v; 23829f4d26d0SMark McLoughlin 23839f4d26d0SMark McLoughlin if (vi->status & VIRTIO_NET_S_LINK_UP) { 2384faa9b39fSJason Baron virtnet_update_settings(vi); 23859f4d26d0SMark McLoughlin netif_carrier_on(vi->dev); 2386986a4f4dSJason Wang netif_tx_wake_all_queues(vi->dev); 23879f4d26d0SMark McLoughlin } else { 23889f4d26d0SMark McLoughlin netif_carrier_off(vi->dev); 2389986a4f4dSJason Wang netif_tx_stop_all_queues(vi->dev); 23909f4d26d0SMark McLoughlin } 23919f4d26d0SMark McLoughlin } 23929f4d26d0SMark McLoughlin 23939f4d26d0SMark McLoughlin static void virtnet_config_changed(struct virtio_device *vdev) 23949f4d26d0SMark McLoughlin { 23959f4d26d0SMark McLoughlin struct virtnet_info *vi = vdev->priv; 23969f4d26d0SMark McLoughlin 23973b07e9caSTejun Heo schedule_work(&vi->config_work); 23989f4d26d0SMark McLoughlin } 23999f4d26d0SMark McLoughlin 2400986a4f4dSJason Wang static void virtnet_free_queues(struct virtnet_info *vi) 2401986a4f4dSJason Wang { 2402d4fb84eeSAndrey Vagin int i; 2403d4fb84eeSAndrey Vagin 2404ab3971b1SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 2405ab3971b1SJason Wang napi_hash_del(&vi->rq[i].napi); 2406d4fb84eeSAndrey Vagin netif_napi_del(&vi->rq[i].napi); 2407b92f1e67SWillem de Bruijn netif_napi_del(&vi->sq[i].napi); 2408ab3971b1SJason Wang } 2409d4fb84eeSAndrey Vagin 2410963abe5cSEric Dumazet /* We called napi_hash_del() before netif_napi_del(), 2411963abe5cSEric Dumazet * we need to respect an RCU grace period before freeing vi->rq 2412963abe5cSEric Dumazet */ 2413963abe5cSEric Dumazet synchronize_net(); 2414963abe5cSEric Dumazet 2415986a4f4dSJason Wang kfree(vi->rq); 2416986a4f4dSJason Wang kfree(vi->sq); 241712e57169SMichael S. Tsirkin kfree(vi->ctrl); 2418986a4f4dSJason Wang } 2419986a4f4dSJason Wang 242047315329SJohn Fastabend static void _free_receive_bufs(struct virtnet_info *vi) 2421986a4f4dSJason Wang { 2422f600b690SJohn Fastabend struct bpf_prog *old_prog; 2423986a4f4dSJason Wang int i; 2424986a4f4dSJason Wang 2425986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 2426986a4f4dSJason Wang while (vi->rq[i].pages) 2427986a4f4dSJason Wang __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 2428f600b690SJohn Fastabend 2429f600b690SJohn Fastabend old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 2430f600b690SJohn Fastabend RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 2431f600b690SJohn Fastabend if (old_prog) 2432f600b690SJohn Fastabend bpf_prog_put(old_prog); 2433986a4f4dSJason Wang } 243447315329SJohn Fastabend } 243547315329SJohn Fastabend 243647315329SJohn Fastabend static void free_receive_bufs(struct virtnet_info *vi) 243747315329SJohn Fastabend { 243847315329SJohn Fastabend rtnl_lock(); 243947315329SJohn Fastabend _free_receive_bufs(vi); 2440f600b690SJohn Fastabend rtnl_unlock(); 2441986a4f4dSJason Wang } 2442986a4f4dSJason Wang 2443fb51879dSMichael Dalton static void free_receive_page_frags(struct virtnet_info *vi) 2444fb51879dSMichael Dalton { 2445fb51879dSMichael Dalton int i; 2446fb51879dSMichael Dalton for (i = 0; i < vi->max_queue_pairs; i++) 2447fb51879dSMichael Dalton if (vi->rq[i].alloc_frag.page) 2448fb51879dSMichael Dalton put_page(vi->rq[i].alloc_frag.page); 2449fb51879dSMichael Dalton } 2450fb51879dSMichael Dalton 2451b68df015SJohn Fastabend static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 245256434a01SJohn Fastabend { 245356434a01SJohn Fastabend if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 245456434a01SJohn Fastabend return false; 245556434a01SJohn Fastabend else if (q < vi->curr_queue_pairs) 245656434a01SJohn Fastabend return true; 245756434a01SJohn Fastabend else 245856434a01SJohn Fastabend return false; 245956434a01SJohn Fastabend } 246056434a01SJohn Fastabend 2461986a4f4dSJason Wang static void free_unused_bufs(struct virtnet_info *vi) 2462986a4f4dSJason Wang { 2463986a4f4dSJason Wang void *buf; 2464986a4f4dSJason Wang int i; 2465986a4f4dSJason Wang 2466986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 2467986a4f4dSJason Wang struct virtqueue *vq = vi->sq[i].vq; 246856434a01SJohn Fastabend while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 2469b68df015SJohn Fastabend if (!is_xdp_raw_buffer_queue(vi, i)) 2470986a4f4dSJason Wang dev_kfree_skb(buf); 247156434a01SJohn Fastabend else 247256434a01SJohn Fastabend put_page(virt_to_head_page(buf)); 247356434a01SJohn Fastabend } 2474986a4f4dSJason Wang } 2475986a4f4dSJason Wang 2476986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 2477986a4f4dSJason Wang struct virtqueue *vq = vi->rq[i].vq; 2478986a4f4dSJason Wang 2479986a4f4dSJason Wang while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 2480ab7db917SMichael Dalton if (vi->mergeable_rx_bufs) { 2481680557cfSMichael S. Tsirkin put_page(virt_to_head_page(buf)); 2482ab7db917SMichael Dalton } else if (vi->big_packets) { 2483fa9fac17SAndrey Vagin give_pages(&vi->rq[i], buf); 2484ab7db917SMichael Dalton } else { 2485f6b10209SJason Wang put_page(virt_to_head_page(buf)); 2486986a4f4dSJason Wang } 2487986a4f4dSJason Wang } 2488986a4f4dSJason Wang } 2489ab7db917SMichael Dalton } 2490986a4f4dSJason Wang 2491e9d7417bSJason Wang static void virtnet_del_vqs(struct virtnet_info *vi) 2492e9d7417bSJason Wang { 2493e9d7417bSJason Wang struct virtio_device *vdev = vi->vdev; 2494e9d7417bSJason Wang 24958898c21cSWanlong Gao virtnet_clean_affinity(vi, -1); 2496986a4f4dSJason Wang 2497e9d7417bSJason Wang vdev->config->del_vqs(vdev); 2498986a4f4dSJason Wang 2499986a4f4dSJason Wang virtnet_free_queues(vi); 2500986a4f4dSJason Wang } 2501986a4f4dSJason Wang 2502d85b758fSMichael S. Tsirkin /* How large should a single buffer be so a queue full of these can fit at 2503d85b758fSMichael S. Tsirkin * least one full packet? 2504d85b758fSMichael S. Tsirkin * Logic below assumes the mergeable buffer header is used. 2505d85b758fSMichael S. Tsirkin */ 2506d85b758fSMichael S. Tsirkin static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) 2507d85b758fSMichael S. Tsirkin { 2508d85b758fSMichael S. Tsirkin const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 2509d85b758fSMichael S. Tsirkin unsigned int rq_size = virtqueue_get_vring_size(vq); 2510d85b758fSMichael S. Tsirkin unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; 2511d85b758fSMichael S. Tsirkin unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 2512d85b758fSMichael S. Tsirkin unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 2513d85b758fSMichael S. Tsirkin 2514f0c3192cSMichael S. Tsirkin return max(max(min_buf_len, hdr_len) - hdr_len, 2515f0c3192cSMichael S. Tsirkin (unsigned int)GOOD_PACKET_LEN); 2516d85b758fSMichael S. Tsirkin } 2517d85b758fSMichael S. Tsirkin 2518986a4f4dSJason Wang static int virtnet_find_vqs(struct virtnet_info *vi) 2519986a4f4dSJason Wang { 2520986a4f4dSJason Wang vq_callback_t **callbacks; 2521986a4f4dSJason Wang struct virtqueue **vqs; 2522986a4f4dSJason Wang int ret = -ENOMEM; 2523986a4f4dSJason Wang int i, total_vqs; 2524986a4f4dSJason Wang const char **names; 2525d45b897bSMichael S. Tsirkin bool *ctx; 2526986a4f4dSJason Wang 2527986a4f4dSJason Wang /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 2528986a4f4dSJason Wang * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 2529986a4f4dSJason Wang * possible control vq. 2530986a4f4dSJason Wang */ 2531986a4f4dSJason Wang total_vqs = vi->max_queue_pairs * 2 + 2532986a4f4dSJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 2533986a4f4dSJason Wang 2534986a4f4dSJason Wang /* Allocate space for find_vqs parameters */ 2535986a4f4dSJason Wang vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); 2536986a4f4dSJason Wang if (!vqs) 2537986a4f4dSJason Wang goto err_vq; 2538986a4f4dSJason Wang callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); 2539986a4f4dSJason Wang if (!callbacks) 2540986a4f4dSJason Wang goto err_callback; 2541986a4f4dSJason Wang names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); 2542986a4f4dSJason Wang if (!names) 2543986a4f4dSJason Wang goto err_names; 2544192f68cfSJason Wang if (!vi->big_packets || vi->mergeable_rx_bufs) { 2545d45b897bSMichael S. Tsirkin ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL); 2546d45b897bSMichael S. Tsirkin if (!ctx) 2547d45b897bSMichael S. Tsirkin goto err_ctx; 2548d45b897bSMichael S. Tsirkin } else { 2549d45b897bSMichael S. Tsirkin ctx = NULL; 2550d45b897bSMichael S. Tsirkin } 2551986a4f4dSJason Wang 2552986a4f4dSJason Wang /* Parameters for control virtqueue, if any */ 2553986a4f4dSJason Wang if (vi->has_cvq) { 2554986a4f4dSJason Wang callbacks[total_vqs - 1] = NULL; 2555986a4f4dSJason Wang names[total_vqs - 1] = "control"; 2556986a4f4dSJason Wang } 2557986a4f4dSJason Wang 2558986a4f4dSJason Wang /* Allocate/initialize parameters for send/receive virtqueues */ 2559986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 2560986a4f4dSJason Wang callbacks[rxq2vq(i)] = skb_recv_done; 2561986a4f4dSJason Wang callbacks[txq2vq(i)] = skb_xmit_done; 2562986a4f4dSJason Wang sprintf(vi->rq[i].name, "input.%d", i); 2563986a4f4dSJason Wang sprintf(vi->sq[i].name, "output.%d", i); 2564986a4f4dSJason Wang names[rxq2vq(i)] = vi->rq[i].name; 2565986a4f4dSJason Wang names[txq2vq(i)] = vi->sq[i].name; 2566d45b897bSMichael S. Tsirkin if (ctx) 2567d45b897bSMichael S. Tsirkin ctx[rxq2vq(i)] = true; 2568986a4f4dSJason Wang } 2569986a4f4dSJason Wang 2570986a4f4dSJason Wang ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, 2571d45b897bSMichael S. Tsirkin names, ctx, NULL); 2572986a4f4dSJason Wang if (ret) 2573986a4f4dSJason Wang goto err_find; 2574986a4f4dSJason Wang 2575986a4f4dSJason Wang if (vi->has_cvq) { 2576986a4f4dSJason Wang vi->cvq = vqs[total_vqs - 1]; 2577986a4f4dSJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 2578f646968fSPatrick McHardy vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2579986a4f4dSJason Wang } 2580986a4f4dSJason Wang 2581986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 2582986a4f4dSJason Wang vi->rq[i].vq = vqs[rxq2vq(i)]; 2583d85b758fSMichael S. Tsirkin vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); 2584986a4f4dSJason Wang vi->sq[i].vq = vqs[txq2vq(i)]; 2585986a4f4dSJason Wang } 2586986a4f4dSJason Wang 2587986a4f4dSJason Wang kfree(names); 2588986a4f4dSJason Wang kfree(callbacks); 2589986a4f4dSJason Wang kfree(vqs); 259055281621SJason Wang kfree(ctx); 2591986a4f4dSJason Wang 2592986a4f4dSJason Wang return 0; 2593986a4f4dSJason Wang 2594986a4f4dSJason Wang err_find: 2595d45b897bSMichael S. Tsirkin kfree(ctx); 2596d45b897bSMichael S. Tsirkin err_ctx: 2597986a4f4dSJason Wang kfree(names); 2598986a4f4dSJason Wang err_names: 2599986a4f4dSJason Wang kfree(callbacks); 2600986a4f4dSJason Wang err_callback: 2601986a4f4dSJason Wang kfree(vqs); 2602986a4f4dSJason Wang err_vq: 2603986a4f4dSJason Wang return ret; 2604986a4f4dSJason Wang } 2605986a4f4dSJason Wang 2606986a4f4dSJason Wang static int virtnet_alloc_queues(struct virtnet_info *vi) 2607986a4f4dSJason Wang { 2608986a4f4dSJason Wang int i; 2609986a4f4dSJason Wang 261012e57169SMichael S. Tsirkin vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); 261112e57169SMichael S. Tsirkin if (!vi->ctrl) 261212e57169SMichael S. Tsirkin goto err_ctrl; 2613986a4f4dSJason Wang vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); 2614986a4f4dSJason Wang if (!vi->sq) 2615986a4f4dSJason Wang goto err_sq; 2616986a4f4dSJason Wang vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); 2617008d4278SAmerigo Wang if (!vi->rq) 2618986a4f4dSJason Wang goto err_rq; 2619986a4f4dSJason Wang 2620986a4f4dSJason Wang INIT_DELAYED_WORK(&vi->refill, refill_work); 2621986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 2622986a4f4dSJason Wang vi->rq[i].pages = NULL; 2623986a4f4dSJason Wang netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, 2624986a4f4dSJason Wang napi_weight); 26251d11e732SWillem de Bruijn netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, 2626b92f1e67SWillem de Bruijn napi_tx ? napi_weight : 0); 2627986a4f4dSJason Wang 2628986a4f4dSJason Wang sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 26295377d758SJohannes Berg ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 2630986a4f4dSJason Wang sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 2631d7dfc5cfSToshiaki Makita 2632d7dfc5cfSToshiaki Makita u64_stats_init(&vi->rq[i].stats.syncp); 2633d7dfc5cfSToshiaki Makita u64_stats_init(&vi->sq[i].stats.syncp); 2634986a4f4dSJason Wang } 2635986a4f4dSJason Wang 2636986a4f4dSJason Wang return 0; 2637986a4f4dSJason Wang 2638986a4f4dSJason Wang err_rq: 2639986a4f4dSJason Wang kfree(vi->sq); 2640986a4f4dSJason Wang err_sq: 264112e57169SMichael S. Tsirkin kfree(vi->ctrl); 264212e57169SMichael S. Tsirkin err_ctrl: 2643986a4f4dSJason Wang return -ENOMEM; 2644e9d7417bSJason Wang } 2645e9d7417bSJason Wang 26463f9c10b0SAmit Shah static int init_vqs(struct virtnet_info *vi) 26473f9c10b0SAmit Shah { 2648986a4f4dSJason Wang int ret; 26493f9c10b0SAmit Shah 2650986a4f4dSJason Wang /* Allocate send & receive queues */ 2651986a4f4dSJason Wang ret = virtnet_alloc_queues(vi); 2652986a4f4dSJason Wang if (ret) 2653986a4f4dSJason Wang goto err; 26543f9c10b0SAmit Shah 2655986a4f4dSJason Wang ret = virtnet_find_vqs(vi); 2656986a4f4dSJason Wang if (ret) 2657986a4f4dSJason Wang goto err_free; 26583f9c10b0SAmit Shah 265947be2479SWanlong Gao get_online_cpus(); 26608898c21cSWanlong Gao virtnet_set_affinity(vi); 266147be2479SWanlong Gao put_online_cpus(); 266247be2479SWanlong Gao 26633f9c10b0SAmit Shah return 0; 2664986a4f4dSJason Wang 2665986a4f4dSJason Wang err_free: 2666986a4f4dSJason Wang virtnet_free_queues(vi); 2667986a4f4dSJason Wang err: 2668986a4f4dSJason Wang return ret; 26693f9c10b0SAmit Shah } 26703f9c10b0SAmit Shah 2671fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS 2672fbf28d78SMichael Dalton static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 2673718ad681Sstephen hemminger char *buf) 2674fbf28d78SMichael Dalton { 2675fbf28d78SMichael Dalton struct virtnet_info *vi = netdev_priv(queue->dev); 2676fbf28d78SMichael Dalton unsigned int queue_index = get_netdev_rx_queue_index(queue); 26773cc81a9aSJason Wang unsigned int headroom = virtnet_get_headroom(vi); 26783cc81a9aSJason Wang unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 26795377d758SJohannes Berg struct ewma_pkt_len *avg; 2680fbf28d78SMichael Dalton 2681fbf28d78SMichael Dalton BUG_ON(queue_index >= vi->max_queue_pairs); 2682fbf28d78SMichael Dalton avg = &vi->rq[queue_index].mrg_avg_pkt_len; 2683d85b758fSMichael S. Tsirkin return sprintf(buf, "%u\n", 26843cc81a9aSJason Wang get_mergeable_buf_len(&vi->rq[queue_index], avg, 26853cc81a9aSJason Wang SKB_DATA_ALIGN(headroom + tailroom))); 2686fbf28d78SMichael Dalton } 2687fbf28d78SMichael Dalton 2688fbf28d78SMichael Dalton static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 2689fbf28d78SMichael Dalton __ATTR_RO(mergeable_rx_buffer_size); 2690fbf28d78SMichael Dalton 2691fbf28d78SMichael Dalton static struct attribute *virtio_net_mrg_rx_attrs[] = { 2692fbf28d78SMichael Dalton &mergeable_rx_buffer_size_attribute.attr, 2693fbf28d78SMichael Dalton NULL 2694fbf28d78SMichael Dalton }; 2695fbf28d78SMichael Dalton 2696fbf28d78SMichael Dalton static const struct attribute_group virtio_net_mrg_rx_group = { 2697fbf28d78SMichael Dalton .name = "virtio_net", 2698fbf28d78SMichael Dalton .attrs = virtio_net_mrg_rx_attrs 2699fbf28d78SMichael Dalton }; 2700fbf28d78SMichael Dalton #endif 2701fbf28d78SMichael Dalton 2702892d6eb1SJason Wang static bool virtnet_fail_on_feature(struct virtio_device *vdev, 2703892d6eb1SJason Wang unsigned int fbit, 2704892d6eb1SJason Wang const char *fname, const char *dname) 2705892d6eb1SJason Wang { 2706892d6eb1SJason Wang if (!virtio_has_feature(vdev, fbit)) 2707892d6eb1SJason Wang return false; 2708892d6eb1SJason Wang 2709892d6eb1SJason Wang dev_err(&vdev->dev, "device advertises feature %s but not %s", 2710892d6eb1SJason Wang fname, dname); 2711892d6eb1SJason Wang 2712892d6eb1SJason Wang return true; 2713892d6eb1SJason Wang } 2714892d6eb1SJason Wang 2715892d6eb1SJason Wang #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 2716892d6eb1SJason Wang virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 2717892d6eb1SJason Wang 2718892d6eb1SJason Wang static bool virtnet_validate_features(struct virtio_device *vdev) 2719892d6eb1SJason Wang { 2720892d6eb1SJason Wang if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 2721892d6eb1SJason Wang (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 2722892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 2723892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 2724892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 2725892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 2726892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 2727892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 2728892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 2729892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ"))) { 2730892d6eb1SJason Wang return false; 2731892d6eb1SJason Wang } 2732892d6eb1SJason Wang 2733892d6eb1SJason Wang return true; 2734892d6eb1SJason Wang } 2735892d6eb1SJason Wang 2736d0c2c997SJarod Wilson #define MIN_MTU ETH_MIN_MTU 2737d0c2c997SJarod Wilson #define MAX_MTU ETH_MAX_MTU 2738d0c2c997SJarod Wilson 2739fe36cbe0SMichael S. Tsirkin static int virtnet_validate(struct virtio_device *vdev) 2740296f96fcSRusty Russell { 27416ba42248SMichael S. Tsirkin if (!vdev->config->get) { 27426ba42248SMichael S. Tsirkin dev_err(&vdev->dev, "%s failure: config access disabled\n", 27436ba42248SMichael S. Tsirkin __func__); 27446ba42248SMichael S. Tsirkin return -EINVAL; 27456ba42248SMichael S. Tsirkin } 27466ba42248SMichael S. Tsirkin 2747892d6eb1SJason Wang if (!virtnet_validate_features(vdev)) 2748892d6eb1SJason Wang return -EINVAL; 2749892d6eb1SJason Wang 2750fe36cbe0SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 2751fe36cbe0SMichael S. Tsirkin int mtu = virtio_cread16(vdev, 2752fe36cbe0SMichael S. Tsirkin offsetof(struct virtio_net_config, 2753fe36cbe0SMichael S. Tsirkin mtu)); 2754fe36cbe0SMichael S. Tsirkin if (mtu < MIN_MTU) 2755fe36cbe0SMichael S. Tsirkin __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 2756fe36cbe0SMichael S. Tsirkin } 2757fe36cbe0SMichael S. Tsirkin 2758fe36cbe0SMichael S. Tsirkin return 0; 2759fe36cbe0SMichael S. Tsirkin } 2760fe36cbe0SMichael S. Tsirkin 2761fe36cbe0SMichael S. Tsirkin static int virtnet_probe(struct virtio_device *vdev) 2762fe36cbe0SMichael S. Tsirkin { 2763d7dfc5cfSToshiaki Makita int i, err = -ENOMEM; 2764fe36cbe0SMichael S. Tsirkin struct net_device *dev; 2765fe36cbe0SMichael S. Tsirkin struct virtnet_info *vi; 2766fe36cbe0SMichael S. Tsirkin u16 max_queue_pairs; 2767fe36cbe0SMichael S. Tsirkin int mtu; 2768fe36cbe0SMichael S. Tsirkin 2769986a4f4dSJason Wang /* Find if host supports multiqueue virtio_net device */ 2770855e0c52SRusty Russell err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 2771855e0c52SRusty Russell struct virtio_net_config, 2772855e0c52SRusty Russell max_virtqueue_pairs, &max_queue_pairs); 2773986a4f4dSJason Wang 2774986a4f4dSJason Wang /* We need at least 2 queue's */ 2775986a4f4dSJason Wang if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 2776986a4f4dSJason Wang max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 2777986a4f4dSJason Wang !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 2778986a4f4dSJason Wang max_queue_pairs = 1; 2779296f96fcSRusty Russell 2780296f96fcSRusty Russell /* Allocate ourselves a network device with room for our info */ 2781986a4f4dSJason Wang dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 2782296f96fcSRusty Russell if (!dev) 2783296f96fcSRusty Russell return -ENOMEM; 2784296f96fcSRusty Russell 2785296f96fcSRusty Russell /* Set up network device as normal. */ 2786f2f2c8b4SJiri Pirko dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 278776288b4eSStephen Hemminger dev->netdev_ops = &virtnet_netdev; 2788296f96fcSRusty Russell dev->features = NETIF_F_HIGHDMA; 27893fa2a1dfSstephen hemminger 27907ad24ea4SWilfried Klaebe dev->ethtool_ops = &virtnet_ethtool_ops; 2791296f96fcSRusty Russell SET_NETDEV_DEV(dev, &vdev->dev); 2792296f96fcSRusty Russell 2793296f96fcSRusty Russell /* Do we support "hardware" checksums? */ 279498e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 2795296f96fcSRusty Russell /* This opens up the world of extra features. */ 279648900cb6SJason Wang dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 279798e778c9SMichał Mirosław if (csum) 279848900cb6SJason Wang dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 279998e778c9SMichał Mirosław 280098e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 2801e078de03SDavid S. Miller dev->hw_features |= NETIF_F_TSO 280234a48579SRusty Russell | NETIF_F_TSO_ECN | NETIF_F_TSO6; 280334a48579SRusty Russell } 28045539ae96SRusty Russell /* Individual feature bits: what can host handle? */ 280598e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 280698e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO; 280798e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 280898e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO6; 280998e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 281098e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO_ECN; 281198e778c9SMichał Mirosław 281241f2f127SJason Wang dev->features |= NETIF_F_GSO_ROBUST; 281341f2f127SJason Wang 281498e778c9SMichał Mirosław if (gso) 2815e078de03SDavid S. Miller dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 281698e778c9SMichał Mirosław /* (!csum && gso) case will be fixed by register_netdev() */ 2817296f96fcSRusty Russell } 28184f49129bSThomas Huth if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 28194f49129bSThomas Huth dev->features |= NETIF_F_RXCSUM; 2820296f96fcSRusty Russell 28214fda8302SJason Wang dev->vlan_features = dev->features; 28224fda8302SJason Wang 2823d0c2c997SJarod Wilson /* MTU range: 68 - 65535 */ 2824d0c2c997SJarod Wilson dev->min_mtu = MIN_MTU; 2825d0c2c997SJarod Wilson dev->max_mtu = MAX_MTU; 2826d0c2c997SJarod Wilson 2827296f96fcSRusty Russell /* Configuration may specify what MAC to use. Otherwise random. */ 2828855e0c52SRusty Russell if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) 2829855e0c52SRusty Russell virtio_cread_bytes(vdev, 2830a586d4f6SRusty Russell offsetof(struct virtio_net_config, mac), 2831855e0c52SRusty Russell dev->dev_addr, dev->addr_len); 2832855e0c52SRusty Russell else 2833f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 2834296f96fcSRusty Russell 2835296f96fcSRusty Russell /* Set up our device-specific information */ 2836296f96fcSRusty Russell vi = netdev_priv(dev); 2837296f96fcSRusty Russell vi->dev = dev; 2838296f96fcSRusty Russell vi->vdev = vdev; 2839d9d5dcc8SChristian Borntraeger vdev->priv = vi; 2840827da44cSJohn Stultz 2841586d17c5SJason Wang INIT_WORK(&vi->config_work, virtnet_config_changed_work); 2842296f96fcSRusty Russell 284397402b96SHerbert Xu /* If we can receive ANY GSO packets, we must allocate large ones. */ 28448e95a202SJoe Perches if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 28458e95a202SJoe Perches virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 2846e3e3c423SVlad Yasevich virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || 2847e3e3c423SVlad Yasevich virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) 284897402b96SHerbert Xu vi->big_packets = true; 284997402b96SHerbert Xu 28503f2c31d9SMark McLoughlin if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 28513f2c31d9SMark McLoughlin vi->mergeable_rx_bufs = true; 28523f2c31d9SMark McLoughlin 2853d04302b3SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 2854d04302b3SMichael S. Tsirkin virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 2855012873d0SMichael S. Tsirkin vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 2856012873d0SMichael S. Tsirkin else 2857012873d0SMichael S. Tsirkin vi->hdr_len = sizeof(struct virtio_net_hdr); 2858012873d0SMichael S. Tsirkin 285975993300SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 286075993300SMichael S. Tsirkin virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 2861e7428e95SMichael S. Tsirkin vi->any_header_sg = true; 2862e7428e95SMichael S. Tsirkin 2863986a4f4dSJason Wang if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 2864986a4f4dSJason Wang vi->has_cvq = true; 2865986a4f4dSJason Wang 286614de9d11SAaron Conole if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 286714de9d11SAaron Conole mtu = virtio_cread16(vdev, 286814de9d11SAaron Conole offsetof(struct virtio_net_config, 286914de9d11SAaron Conole mtu)); 287093a205eeSAaron Conole if (mtu < dev->min_mtu) { 2871fe36cbe0SMichael S. Tsirkin /* Should never trigger: MTU was previously validated 2872fe36cbe0SMichael S. Tsirkin * in virtnet_validate. 2873fe36cbe0SMichael S. Tsirkin */ 2874fe36cbe0SMichael S. Tsirkin dev_err(&vdev->dev, "device MTU appears to have changed " 2875fe36cbe0SMichael S. Tsirkin "it is now %d < %d", mtu, dev->min_mtu); 2876d7dfc5cfSToshiaki Makita goto free; 2877fe36cbe0SMichael S. Tsirkin } 2878fe36cbe0SMichael S. Tsirkin 2879d0c2c997SJarod Wilson dev->mtu = mtu; 288093a205eeSAaron Conole dev->max_mtu = mtu; 28812e123b44SMichael S. Tsirkin 28822e123b44SMichael S. Tsirkin /* TODO: size buffers correctly in this case. */ 28832e123b44SMichael S. Tsirkin if (dev->mtu > ETH_DATA_LEN) 28842e123b44SMichael S. Tsirkin vi->big_packets = true; 288514de9d11SAaron Conole } 288614de9d11SAaron Conole 2887012873d0SMichael S. Tsirkin if (vi->any_header_sg) 2888012873d0SMichael S. Tsirkin dev->needed_headroom = vi->hdr_len; 28896ebbc1a6SZhangjie \(HZ\) 289044900010SJason Wang /* Enable multiqueue by default */ 289144900010SJason Wang if (num_online_cpus() >= max_queue_pairs) 289244900010SJason Wang vi->curr_queue_pairs = max_queue_pairs; 289344900010SJason Wang else 289444900010SJason Wang vi->curr_queue_pairs = num_online_cpus(); 2895986a4f4dSJason Wang vi->max_queue_pairs = max_queue_pairs; 2896986a4f4dSJason Wang 2897986a4f4dSJason Wang /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 28983f9c10b0SAmit Shah err = init_vqs(vi); 2899d2a7dddaSMichael S. Tsirkin if (err) 2900d7dfc5cfSToshiaki Makita goto free; 2901d2a7dddaSMichael S. Tsirkin 2902fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS 2903fbf28d78SMichael Dalton if (vi->mergeable_rx_bufs) 2904fbf28d78SMichael Dalton dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 2905fbf28d78SMichael Dalton #endif 29060f13b66bSZhi Yong Wu netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 29070f13b66bSZhi Yong Wu netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 2908986a4f4dSJason Wang 290916032be5SNikolay Aleksandrov virtnet_init_settings(dev); 291016032be5SNikolay Aleksandrov 2911296f96fcSRusty Russell err = register_netdev(dev); 2912296f96fcSRusty Russell if (err) { 2913296f96fcSRusty Russell pr_debug("virtio_net: registering device failed\n"); 2914d2a7dddaSMichael S. Tsirkin goto free_vqs; 2915296f96fcSRusty Russell } 2916b3369c1fSRusty Russell 29174baf1e33SMichael S. Tsirkin virtio_device_ready(vdev); 29184baf1e33SMichael S. Tsirkin 29198017c279SSebastian Andrzej Siewior err = virtnet_cpu_notif_add(vi); 29208de4b2f3SWanlong Gao if (err) { 29218de4b2f3SWanlong Gao pr_debug("virtio_net: registering cpu notifier failed\n"); 2922f00e35e2Swangyunjian goto free_unregister_netdev; 29238de4b2f3SWanlong Gao } 29248de4b2f3SWanlong Gao 2925a220871bSJason Wang virtnet_set_queues(vi, vi->curr_queue_pairs); 292644900010SJason Wang 2927167c25e4SJason Wang /* Assume link up if device can't report link status, 2928167c25e4SJason Wang otherwise get link status from config. */ 2929167c25e4SJason Wang netif_carrier_off(dev); 2930bda7fab5SJay Vosburgh if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 29313b07e9caSTejun Heo schedule_work(&vi->config_work); 2932167c25e4SJason Wang } else { 2933167c25e4SJason Wang vi->status = VIRTIO_NET_S_LINK_UP; 2934faa9b39fSJason Baron virtnet_update_settings(vi); 29354783256eSPantelis Koukousoulas netif_carrier_on(dev); 2936167c25e4SJason Wang } 29379f4d26d0SMark McLoughlin 29383f93522fSJason Wang for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) 29393f93522fSJason Wang if (virtio_has_feature(vi->vdev, guest_offloads[i])) 29403f93522fSJason Wang set_bit(guest_offloads[i], &vi->guest_offloads); 29413f93522fSJason Wang 2942986a4f4dSJason Wang pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 2943986a4f4dSJason Wang dev->name, max_queue_pairs); 2944986a4f4dSJason Wang 2945296f96fcSRusty Russell return 0; 2946296f96fcSRusty Russell 2947f00e35e2Swangyunjian free_unregister_netdev: 294802465555SMichael S. Tsirkin vi->vdev->config->reset(vdev); 294902465555SMichael S. Tsirkin 2950b3369c1fSRusty Russell unregister_netdev(dev); 2951d2a7dddaSMichael S. Tsirkin free_vqs: 2952986a4f4dSJason Wang cancel_delayed_work_sync(&vi->refill); 2953fb51879dSMichael Dalton free_receive_page_frags(vi); 2954e9d7417bSJason Wang virtnet_del_vqs(vi); 2955296f96fcSRusty Russell free: 2956296f96fcSRusty Russell free_netdev(dev); 2957296f96fcSRusty Russell return err; 2958296f96fcSRusty Russell } 2959296f96fcSRusty Russell 296004486ed0SAmit Shah static void remove_vq_common(struct virtnet_info *vi) 2961296f96fcSRusty Russell { 296204486ed0SAmit Shah vi->vdev->config->reset(vi->vdev); 2963830a8a97SShirley Ma 2964830a8a97SShirley Ma /* Free unused buffers in both send and recv, if any. */ 29659ab86bbcSShirley Ma free_unused_bufs(vi); 2966fb6813f4SRusty Russell 2967986a4f4dSJason Wang free_receive_bufs(vi); 2968d2a7dddaSMichael S. Tsirkin 2969fb51879dSMichael Dalton free_receive_page_frags(vi); 2970fb51879dSMichael Dalton 2971986a4f4dSJason Wang virtnet_del_vqs(vi); 297204486ed0SAmit Shah } 297304486ed0SAmit Shah 29748cc085d6SBill Pemberton static void virtnet_remove(struct virtio_device *vdev) 297504486ed0SAmit Shah { 297604486ed0SAmit Shah struct virtnet_info *vi = vdev->priv; 297704486ed0SAmit Shah 29788017c279SSebastian Andrzej Siewior virtnet_cpu_notif_remove(vi); 29798de4b2f3SWanlong Gao 2980102a2786SMichael S. Tsirkin /* Make sure no work handler is accessing the device. */ 2981102a2786SMichael S. Tsirkin flush_work(&vi->config_work); 2982586d17c5SJason Wang 298304486ed0SAmit Shah unregister_netdev(vi->dev); 298404486ed0SAmit Shah 298504486ed0SAmit Shah remove_vq_common(vi); 2986fb6813f4SRusty Russell 298774b2553fSRusty Russell free_netdev(vi->dev); 2988296f96fcSRusty Russell } 2989296f96fcSRusty Russell 299067a75194SArnd Bergmann static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) 29910741bcb5SAmit Shah { 29920741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 29930741bcb5SAmit Shah 29948017c279SSebastian Andrzej Siewior virtnet_cpu_notif_remove(vi); 29959fe7bfceSJohn Fastabend virtnet_freeze_down(vdev); 29960741bcb5SAmit Shah remove_vq_common(vi); 29970741bcb5SAmit Shah 29980741bcb5SAmit Shah return 0; 29990741bcb5SAmit Shah } 30000741bcb5SAmit Shah 300167a75194SArnd Bergmann static __maybe_unused int virtnet_restore(struct virtio_device *vdev) 30020741bcb5SAmit Shah { 30030741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 30049fe7bfceSJohn Fastabend int err; 30050741bcb5SAmit Shah 30069fe7bfceSJohn Fastabend err = virtnet_restore_up(vdev); 30070741bcb5SAmit Shah if (err) 30080741bcb5SAmit Shah return err; 3009986a4f4dSJason Wang virtnet_set_queues(vi, vi->curr_queue_pairs); 3010986a4f4dSJason Wang 30118017c279SSebastian Andrzej Siewior err = virtnet_cpu_notif_add(vi); 3012ec9debbdSJason Wang if (err) 3013ec9debbdSJason Wang return err; 3014ec9debbdSJason Wang 30150741bcb5SAmit Shah return 0; 30160741bcb5SAmit Shah } 30170741bcb5SAmit Shah 3018296f96fcSRusty Russell static struct virtio_device_id id_table[] = { 3019296f96fcSRusty Russell { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 3020296f96fcSRusty Russell { 0 }, 3021296f96fcSRusty Russell }; 3022296f96fcSRusty Russell 3023f3358507SMichael S. Tsirkin #define VIRTNET_FEATURES \ 3024f3358507SMichael S. Tsirkin VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 3025f3358507SMichael S. Tsirkin VIRTIO_NET_F_MAC, \ 3026f3358507SMichael S. Tsirkin VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 3027f3358507SMichael S. Tsirkin VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 3028f3358507SMichael S. Tsirkin VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 3029f3358507SMichael S. Tsirkin VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 3030f3358507SMichael S. Tsirkin VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 3031f3358507SMichael S. Tsirkin VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 3032f3358507SMichael S. Tsirkin VIRTIO_NET_F_CTRL_MAC_ADDR, \ 3033faa9b39fSJason Baron VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ 3034faa9b39fSJason Baron VIRTIO_NET_F_SPEED_DUPLEX 3035f3358507SMichael S. Tsirkin 3036c45a6816SRusty Russell static unsigned int features[] = { 3037f3358507SMichael S. Tsirkin VIRTNET_FEATURES, 3038f3358507SMichael S. Tsirkin }; 3039f3358507SMichael S. Tsirkin 3040f3358507SMichael S. Tsirkin static unsigned int features_legacy[] = { 3041f3358507SMichael S. Tsirkin VIRTNET_FEATURES, 3042f3358507SMichael S. Tsirkin VIRTIO_NET_F_GSO, 3043e7428e95SMichael S. Tsirkin VIRTIO_F_ANY_LAYOUT, 3044c45a6816SRusty Russell }; 3045c45a6816SRusty Russell 304622402529SUwe Kleine-König static struct virtio_driver virtio_net_driver = { 3047c45a6816SRusty Russell .feature_table = features, 3048c45a6816SRusty Russell .feature_table_size = ARRAY_SIZE(features), 3049f3358507SMichael S. Tsirkin .feature_table_legacy = features_legacy, 3050f3358507SMichael S. Tsirkin .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 3051296f96fcSRusty Russell .driver.name = KBUILD_MODNAME, 3052296f96fcSRusty Russell .driver.owner = THIS_MODULE, 3053296f96fcSRusty Russell .id_table = id_table, 3054fe36cbe0SMichael S. Tsirkin .validate = virtnet_validate, 3055296f96fcSRusty Russell .probe = virtnet_probe, 30568cc085d6SBill Pemberton .remove = virtnet_remove, 30579f4d26d0SMark McLoughlin .config_changed = virtnet_config_changed, 305889107000SAaron Lu #ifdef CONFIG_PM_SLEEP 30590741bcb5SAmit Shah .freeze = virtnet_freeze, 30600741bcb5SAmit Shah .restore = virtnet_restore, 30610741bcb5SAmit Shah #endif 3062296f96fcSRusty Russell }; 3063296f96fcSRusty Russell 30648017c279SSebastian Andrzej Siewior static __init int virtio_net_driver_init(void) 30658017c279SSebastian Andrzej Siewior { 30668017c279SSebastian Andrzej Siewior int ret; 30678017c279SSebastian Andrzej Siewior 306873c1b41eSThomas Gleixner ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 30698017c279SSebastian Andrzej Siewior virtnet_cpu_online, 30708017c279SSebastian Andrzej Siewior virtnet_cpu_down_prep); 30718017c279SSebastian Andrzej Siewior if (ret < 0) 30728017c279SSebastian Andrzej Siewior goto out; 30738017c279SSebastian Andrzej Siewior virtionet_online = ret; 307473c1b41eSThomas Gleixner ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 30758017c279SSebastian Andrzej Siewior NULL, virtnet_cpu_dead); 30768017c279SSebastian Andrzej Siewior if (ret) 30778017c279SSebastian Andrzej Siewior goto err_dead; 30788017c279SSebastian Andrzej Siewior 30798017c279SSebastian Andrzej Siewior ret = register_virtio_driver(&virtio_net_driver); 30808017c279SSebastian Andrzej Siewior if (ret) 30818017c279SSebastian Andrzej Siewior goto err_virtio; 30828017c279SSebastian Andrzej Siewior return 0; 30838017c279SSebastian Andrzej Siewior err_virtio: 30848017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 30858017c279SSebastian Andrzej Siewior err_dead: 30868017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(virtionet_online); 30878017c279SSebastian Andrzej Siewior out: 30888017c279SSebastian Andrzej Siewior return ret; 30898017c279SSebastian Andrzej Siewior } 30908017c279SSebastian Andrzej Siewior module_init(virtio_net_driver_init); 30918017c279SSebastian Andrzej Siewior 30928017c279SSebastian Andrzej Siewior static __exit void virtio_net_driver_exit(void) 30938017c279SSebastian Andrzej Siewior { 3094cfa0ebc9SAndrew Jones unregister_virtio_driver(&virtio_net_driver); 30958017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 30968017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(virtionet_online); 30978017c279SSebastian Andrzej Siewior } 30988017c279SSebastian Andrzej Siewior module_exit(virtio_net_driver_exit); 3099296f96fcSRusty Russell 3100296f96fcSRusty Russell MODULE_DEVICE_TABLE(virtio, id_table); 3101296f96fcSRusty Russell MODULE_DESCRIPTION("Virtio network driver"); 3102296f96fcSRusty Russell MODULE_LICENSE("GPL"); 3103