148925e37SRusty Russell /* A network driver using virtio. 2296f96fcSRusty Russell * 3296f96fcSRusty Russell * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4296f96fcSRusty Russell * 5296f96fcSRusty Russell * This program is free software; you can redistribute it and/or modify 6296f96fcSRusty Russell * it under the terms of the GNU General Public License as published by 7296f96fcSRusty Russell * the Free Software Foundation; either version 2 of the License, or 8296f96fcSRusty Russell * (at your option) any later version. 9296f96fcSRusty Russell * 10296f96fcSRusty Russell * This program is distributed in the hope that it will be useful, 11296f96fcSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 12296f96fcSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13296f96fcSRusty Russell * GNU General Public License for more details. 14296f96fcSRusty Russell * 15296f96fcSRusty Russell * You should have received a copy of the GNU General Public License 16adf8d3ffSJeff Kirsher * along with this program; if not, see <http://www.gnu.org/licenses/>. 17296f96fcSRusty Russell */ 18296f96fcSRusty Russell //#define DEBUG 19296f96fcSRusty Russell #include <linux/netdevice.h> 20296f96fcSRusty Russell #include <linux/etherdevice.h> 21a9ea3fc6SHerbert Xu #include <linux/ethtool.h> 22296f96fcSRusty Russell #include <linux/module.h> 23296f96fcSRusty Russell #include <linux/virtio.h> 24296f96fcSRusty Russell #include <linux/virtio_net.h> 25f600b690SJohn Fastabend #include <linux/bpf.h> 26a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h> 27296f96fcSRusty Russell #include <linux/scatterlist.h> 28e918085aSAlex Williamson #include <linux/if_vlan.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 308de4b2f3SWanlong Gao #include <linux/cpu.h> 31ab7db917SMichael Dalton #include <linux/average.h> 32296f96fcSRusty Russell 33d34710e3SAmerigo Wang static int napi_weight = NAPI_POLL_WEIGHT; 346c0cd7c0SDor Laor module_param(napi_weight, int, 0444); 356c0cd7c0SDor Laor 36eb939922SRusty Russell static bool csum = true, gso = true; 3734a48579SRusty Russell module_param(csum, bool, 0444); 3834a48579SRusty Russell module_param(gso, bool, 0444); 3934a48579SRusty Russell 40296f96fcSRusty Russell /* FIXME: MTU in config. */ 415061de36SMichael Dalton #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 423f2c31d9SMark McLoughlin #define GOOD_COPY_LEN 128 43296f96fcSRusty Russell 445377d758SJohannes Berg /* RX packet size EWMA. The average packet size is used to determine the packet 455377d758SJohannes Berg * buffer size when refilling RX rings. As the entire RX ring may be refilled 465377d758SJohannes Berg * at once, the weight is chosen so that the EWMA will be insensitive to short- 475377d758SJohannes Berg * term, transient changes in packet size. 48ab7db917SMichael Dalton */ 495377d758SJohannes Berg DECLARE_EWMA(pkt_len, 1, 64) 50ab7db917SMichael Dalton 51d0fa28f0SMichael S. Tsirkin /* With mergeable buffers we align buffer address and use the low bits to 52d0fa28f0SMichael S. Tsirkin * encode its true size. Buffer size is up to 1 page so we need to align to 53d0fa28f0SMichael S. Tsirkin * square root of page size to ensure we reserve enough bits to encode the true 54d0fa28f0SMichael S. Tsirkin * size. 55d0fa28f0SMichael S. Tsirkin */ 56d0fa28f0SMichael S. Tsirkin #define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) 57d0fa28f0SMichael S. Tsirkin 58ab7db917SMichael Dalton /* Minimum alignment for mergeable packet buffers. */ 59d0fa28f0SMichael S. Tsirkin #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ 60d0fa28f0SMichael S. Tsirkin 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) 61ab7db917SMichael Dalton 6266846048SRick Jones #define VIRTNET_DRIVER_VERSION "1.0.0" 632a41f71dSAlex Williamson 643fa2a1dfSstephen hemminger struct virtnet_stats { 6583a27052SEric Dumazet struct u64_stats_sync tx_syncp; 6683a27052SEric Dumazet struct u64_stats_sync rx_syncp; 673fa2a1dfSstephen hemminger u64 tx_bytes; 683fa2a1dfSstephen hemminger u64 tx_packets; 693fa2a1dfSstephen hemminger 703fa2a1dfSstephen hemminger u64 rx_bytes; 713fa2a1dfSstephen hemminger u64 rx_packets; 723fa2a1dfSstephen hemminger }; 733fa2a1dfSstephen hemminger 74e9d7417bSJason Wang /* Internal representation of a send virtqueue */ 75e9d7417bSJason Wang struct send_queue { 76e9d7417bSJason Wang /* Virtqueue associated with this send _queue */ 77e9d7417bSJason Wang struct virtqueue *vq; 78e9d7417bSJason Wang 79e9d7417bSJason Wang /* TX: fragments + linear part + virtio header */ 80e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 81986a4f4dSJason Wang 82986a4f4dSJason Wang /* Name of the send queue: output.$index */ 83986a4f4dSJason Wang char name[40]; 84e9d7417bSJason Wang }; 85e9d7417bSJason Wang 86e9d7417bSJason Wang /* Internal representation of a receive virtqueue */ 87e9d7417bSJason Wang struct receive_queue { 88e9d7417bSJason Wang /* Virtqueue associated with this receive_queue */ 89e9d7417bSJason Wang struct virtqueue *vq; 90e9d7417bSJason Wang 91296f96fcSRusty Russell struct napi_struct napi; 92296f96fcSRusty Russell 93f600b690SJohn Fastabend struct bpf_prog __rcu *xdp_prog; 94f600b690SJohn Fastabend 95e9d7417bSJason Wang /* Chain pages by the private ptr. */ 96e9d7417bSJason Wang struct page *pages; 97e9d7417bSJason Wang 98ab7db917SMichael Dalton /* Average packet length for mergeable receive buffers. */ 995377d758SJohannes Berg struct ewma_pkt_len mrg_avg_pkt_len; 100ab7db917SMichael Dalton 101fb51879dSMichael Dalton /* Page frag for packet buffer allocation. */ 102fb51879dSMichael Dalton struct page_frag alloc_frag; 103fb51879dSMichael Dalton 104e9d7417bSJason Wang /* RX: fragments + linear part + virtio header */ 105e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 106986a4f4dSJason Wang 107986a4f4dSJason Wang /* Name of this receive queue: input.$index */ 108986a4f4dSJason Wang char name[40]; 109e9d7417bSJason Wang }; 110e9d7417bSJason Wang 111e9d7417bSJason Wang struct virtnet_info { 112e9d7417bSJason Wang struct virtio_device *vdev; 113e9d7417bSJason Wang struct virtqueue *cvq; 114e9d7417bSJason Wang struct net_device *dev; 115986a4f4dSJason Wang struct send_queue *sq; 116986a4f4dSJason Wang struct receive_queue *rq; 117e9d7417bSJason Wang unsigned int status; 118e9d7417bSJason Wang 119986a4f4dSJason Wang /* Max # of queue pairs supported by the device */ 120986a4f4dSJason Wang u16 max_queue_pairs; 121986a4f4dSJason Wang 122986a4f4dSJason Wang /* # of queue pairs currently used by the driver */ 123986a4f4dSJason Wang u16 curr_queue_pairs; 124986a4f4dSJason Wang 125672aafd5SJohn Fastabend /* # of XDP queue pairs currently used by the driver */ 126672aafd5SJohn Fastabend u16 xdp_queue_pairs; 127672aafd5SJohn Fastabend 12897402b96SHerbert Xu /* I like... big packets and I cannot lie! */ 12997402b96SHerbert Xu bool big_packets; 13097402b96SHerbert Xu 1313f2c31d9SMark McLoughlin /* Host will merge rx buffers for big packets (shake it! shake it!) */ 1323f2c31d9SMark McLoughlin bool mergeable_rx_bufs; 1333f2c31d9SMark McLoughlin 134986a4f4dSJason Wang /* Has control virtqueue */ 135986a4f4dSJason Wang bool has_cvq; 136986a4f4dSJason Wang 137e7428e95SMichael S. Tsirkin /* Host can handle any s/g split between our header and packet data */ 138e7428e95SMichael S. Tsirkin bool any_header_sg; 139e7428e95SMichael S. Tsirkin 140012873d0SMichael S. Tsirkin /* Packet virtio header size */ 141012873d0SMichael S. Tsirkin u8 hdr_len; 142012873d0SMichael S. Tsirkin 1433fa2a1dfSstephen hemminger /* Active statistics */ 1443fa2a1dfSstephen hemminger struct virtnet_stats __percpu *stats; 1453fa2a1dfSstephen hemminger 1463161e453SRusty Russell /* Work struct for refilling if we run low on memory. */ 1473161e453SRusty Russell struct delayed_work refill; 1483161e453SRusty Russell 149586d17c5SJason Wang /* Work struct for config space updates */ 150586d17c5SJason Wang struct work_struct config_work; 151586d17c5SJason Wang 152986a4f4dSJason Wang /* Does the affinity hint is set for virtqueues? */ 153986a4f4dSJason Wang bool affinity_hint_set; 15447be2479SWanlong Gao 1558017c279SSebastian Andrzej Siewior /* CPU hotplug instances for online & dead */ 1568017c279SSebastian Andrzej Siewior struct hlist_node node; 1578017c279SSebastian Andrzej Siewior struct hlist_node node_dead; 1582ac46030SMichael S. Tsirkin 1592ac46030SMichael S. Tsirkin /* Control VQ buffers: protected by the rtnl lock */ 1602ac46030SMichael S. Tsirkin struct virtio_net_ctrl_hdr ctrl_hdr; 1612ac46030SMichael S. Tsirkin virtio_net_ctrl_ack ctrl_status; 162a725ee3eSAndy Lutomirski struct virtio_net_ctrl_mq ctrl_mq; 1632ac46030SMichael S. Tsirkin u8 ctrl_promisc; 1642ac46030SMichael S. Tsirkin u8 ctrl_allmulti; 165a725ee3eSAndy Lutomirski u16 ctrl_vid; 16616032be5SNikolay Aleksandrov 16716032be5SNikolay Aleksandrov /* Ethtool settings */ 16816032be5SNikolay Aleksandrov u8 duplex; 16916032be5SNikolay Aleksandrov u32 speed; 170296f96fcSRusty Russell }; 171296f96fcSRusty Russell 1729ab86bbcSShirley Ma struct padded_vnet_hdr { 173012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf hdr; 1749ab86bbcSShirley Ma /* 175012873d0SMichael S. Tsirkin * hdr is in a separate sg buffer, and data sg buffer shares same page 176012873d0SMichael S. Tsirkin * with this header sg. This padding makes next sg 16 byte aligned 177012873d0SMichael S. Tsirkin * after the header. 1789ab86bbcSShirley Ma */ 179012873d0SMichael S. Tsirkin char padding[4]; 1809ab86bbcSShirley Ma }; 1819ab86bbcSShirley Ma 182986a4f4dSJason Wang /* Converting between virtqueue no. and kernel tx/rx queue no. 183986a4f4dSJason Wang * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 184986a4f4dSJason Wang */ 185986a4f4dSJason Wang static int vq2txq(struct virtqueue *vq) 186986a4f4dSJason Wang { 1879d0ca6edSRusty Russell return (vq->index - 1) / 2; 188986a4f4dSJason Wang } 189986a4f4dSJason Wang 190986a4f4dSJason Wang static int txq2vq(int txq) 191986a4f4dSJason Wang { 192986a4f4dSJason Wang return txq * 2 + 1; 193986a4f4dSJason Wang } 194986a4f4dSJason Wang 195986a4f4dSJason Wang static int vq2rxq(struct virtqueue *vq) 196986a4f4dSJason Wang { 1979d0ca6edSRusty Russell return vq->index / 2; 198986a4f4dSJason Wang } 199986a4f4dSJason Wang 200986a4f4dSJason Wang static int rxq2vq(int rxq) 201986a4f4dSJason Wang { 202986a4f4dSJason Wang return rxq * 2; 203986a4f4dSJason Wang } 204986a4f4dSJason Wang 205012873d0SMichael S. Tsirkin static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) 206296f96fcSRusty Russell { 207012873d0SMichael S. Tsirkin return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; 208296f96fcSRusty Russell } 209296f96fcSRusty Russell 2109ab86bbcSShirley Ma /* 2119ab86bbcSShirley Ma * private is used to chain pages for big packets, put the whole 2129ab86bbcSShirley Ma * most recent used list in the beginning for reuse 2139ab86bbcSShirley Ma */ 214e9d7417bSJason Wang static void give_pages(struct receive_queue *rq, struct page *page) 215fb6813f4SRusty Russell { 2169ab86bbcSShirley Ma struct page *end; 2179ab86bbcSShirley Ma 218e9d7417bSJason Wang /* Find end of list, sew whole thing into vi->rq.pages. */ 2199ab86bbcSShirley Ma for (end = page; end->private; end = (struct page *)end->private); 220e9d7417bSJason Wang end->private = (unsigned long)rq->pages; 221e9d7417bSJason Wang rq->pages = page; 222fb6813f4SRusty Russell } 223fb6813f4SRusty Russell 224e9d7417bSJason Wang static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 225fb6813f4SRusty Russell { 226e9d7417bSJason Wang struct page *p = rq->pages; 227fb6813f4SRusty Russell 2289ab86bbcSShirley Ma if (p) { 229e9d7417bSJason Wang rq->pages = (struct page *)p->private; 2309ab86bbcSShirley Ma /* clear private here, it is used to chain pages */ 2319ab86bbcSShirley Ma p->private = 0; 2329ab86bbcSShirley Ma } else 233fb6813f4SRusty Russell p = alloc_page(gfp_mask); 234fb6813f4SRusty Russell return p; 235fb6813f4SRusty Russell } 236fb6813f4SRusty Russell 237e9d7417bSJason Wang static void skb_xmit_done(struct virtqueue *vq) 238296f96fcSRusty Russell { 239e9d7417bSJason Wang struct virtnet_info *vi = vq->vdev->priv; 240296f96fcSRusty Russell 2412cb9c6baSRusty Russell /* Suppress further interrupts. */ 242e9d7417bSJason Wang virtqueue_disable_cb(vq); 24311a3a154SRusty Russell 244363f1514SRusty Russell /* We were probably waiting for more output buffers. */ 245986a4f4dSJason Wang netif_wake_subqueue(vi->dev, vq2txq(vq)); 246296f96fcSRusty Russell } 247296f96fcSRusty Russell 248ab7db917SMichael Dalton static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) 249ab7db917SMichael Dalton { 250ab7db917SMichael Dalton unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); 251ab7db917SMichael Dalton return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; 252ab7db917SMichael Dalton } 253ab7db917SMichael Dalton 254ab7db917SMichael Dalton static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) 255ab7db917SMichael Dalton { 256ab7db917SMichael Dalton return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); 257ab7db917SMichael Dalton 258ab7db917SMichael Dalton } 259ab7db917SMichael Dalton 260ab7db917SMichael Dalton static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) 261ab7db917SMichael Dalton { 262ab7db917SMichael Dalton unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; 263ab7db917SMichael Dalton return (unsigned long)buf | (size - 1); 264ab7db917SMichael Dalton } 265ab7db917SMichael Dalton 2663464645aSMike Waychison /* Called from bottom half context */ 267946fa564SMichael S. Tsirkin static struct sk_buff *page_to_skb(struct virtnet_info *vi, 268946fa564SMichael S. Tsirkin struct receive_queue *rq, 2692613af0eSMichael Dalton struct page *page, unsigned int offset, 2702613af0eSMichael Dalton unsigned int len, unsigned int truesize) 2719ab86bbcSShirley Ma { 2729ab86bbcSShirley Ma struct sk_buff *skb; 273012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 2742613af0eSMichael Dalton unsigned int copy, hdr_len, hdr_padded_len; 2759ab86bbcSShirley Ma char *p; 2769ab86bbcSShirley Ma 2772613af0eSMichael Dalton p = page_address(page) + offset; 2789ab86bbcSShirley Ma 2799ab86bbcSShirley Ma /* copy small packet so we can reuse these pages for small data */ 280c67f5db8SPaolo Abeni skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 2819ab86bbcSShirley Ma if (unlikely(!skb)) 2829ab86bbcSShirley Ma return NULL; 2839ab86bbcSShirley Ma 2849ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 2859ab86bbcSShirley Ma 286012873d0SMichael S. Tsirkin hdr_len = vi->hdr_len; 287012873d0SMichael S. Tsirkin if (vi->mergeable_rx_bufs) 288012873d0SMichael S. Tsirkin hdr_padded_len = sizeof *hdr; 289012873d0SMichael S. Tsirkin else 2902613af0eSMichael Dalton hdr_padded_len = sizeof(struct padded_vnet_hdr); 2913f2c31d9SMark McLoughlin 2929ab86bbcSShirley Ma memcpy(hdr, p, hdr_len); 2933f2c31d9SMark McLoughlin 2949ab86bbcSShirley Ma len -= hdr_len; 2952613af0eSMichael Dalton offset += hdr_padded_len; 2962613af0eSMichael Dalton p += hdr_padded_len; 2973f2c31d9SMark McLoughlin 2983f2c31d9SMark McLoughlin copy = len; 2993f2c31d9SMark McLoughlin if (copy > skb_tailroom(skb)) 3003f2c31d9SMark McLoughlin copy = skb_tailroom(skb); 3013f2c31d9SMark McLoughlin memcpy(skb_put(skb, copy), p, copy); 3023f2c31d9SMark McLoughlin 3033f2c31d9SMark McLoughlin len -= copy; 3049ab86bbcSShirley Ma offset += copy; 3053f2c31d9SMark McLoughlin 3062613af0eSMichael Dalton if (vi->mergeable_rx_bufs) { 3072613af0eSMichael Dalton if (len) 3082613af0eSMichael Dalton skb_add_rx_frag(skb, 0, page, offset, len, truesize); 3092613af0eSMichael Dalton else 3102613af0eSMichael Dalton put_page(page); 3112613af0eSMichael Dalton return skb; 3122613af0eSMichael Dalton } 3132613af0eSMichael Dalton 314e878d78bSSasha Levin /* 315e878d78bSSasha Levin * Verify that we can indeed put this data into a skb. 316e878d78bSSasha Levin * This is here to handle cases when the device erroneously 317e878d78bSSasha Levin * tries to receive more than is possible. This is usually 318e878d78bSSasha Levin * the case of a broken device. 319e878d78bSSasha Levin */ 320e878d78bSSasha Levin if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 321be443899SAmerigo Wang net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 322e878d78bSSasha Levin dev_kfree_skb(skb); 323e878d78bSSasha Levin return NULL; 324e878d78bSSasha Levin } 3252613af0eSMichael Dalton BUG_ON(offset >= PAGE_SIZE); 3269ab86bbcSShirley Ma while (len) { 3272613af0eSMichael Dalton unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 3282613af0eSMichael Dalton skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 3292613af0eSMichael Dalton frag_size, truesize); 3302613af0eSMichael Dalton len -= frag_size; 3319ab86bbcSShirley Ma page = (struct page *)page->private; 3329ab86bbcSShirley Ma offset = 0; 3333f2c31d9SMark McLoughlin } 3343f2c31d9SMark McLoughlin 3359ab86bbcSShirley Ma if (page) 336e9d7417bSJason Wang give_pages(rq, page); 3373f2c31d9SMark McLoughlin 3389ab86bbcSShirley Ma return skb; 3399ab86bbcSShirley Ma } 3409ab86bbcSShirley Ma 341a67edbf4SDaniel Borkmann static bool virtnet_xdp_xmit(struct virtnet_info *vi, 34256434a01SJohn Fastabend struct receive_queue *rq, 34356434a01SJohn Fastabend struct send_queue *sq, 344bb91accfSJason Wang struct xdp_buff *xdp, 345bb91accfSJason Wang void *data) 34656434a01SJohn Fastabend { 34756434a01SJohn Fastabend struct virtio_net_hdr_mrg_rxbuf *hdr; 34856434a01SJohn Fastabend unsigned int num_sg, len; 34956434a01SJohn Fastabend void *xdp_sent; 35056434a01SJohn Fastabend int err; 35156434a01SJohn Fastabend 35256434a01SJohn Fastabend /* Free up any pending old buffers before queueing new ones. */ 35356434a01SJohn Fastabend while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { 354bb91accfSJason Wang if (vi->mergeable_rx_bufs) { 35556434a01SJohn Fastabend struct page *sent_page = virt_to_head_page(xdp_sent); 356bb91accfSJason Wang 35756434a01SJohn Fastabend put_page(sent_page); 358bb91accfSJason Wang } else { /* small buffer */ 359bb91accfSJason Wang struct sk_buff *skb = xdp_sent; 360bb91accfSJason Wang 361bb91accfSJason Wang kfree_skb(skb); 362bb91accfSJason Wang } 36356434a01SJohn Fastabend } 36456434a01SJohn Fastabend 365bb91accfSJason Wang if (vi->mergeable_rx_bufs) { 36656434a01SJohn Fastabend /* Zero header and leave csum up to XDP layers */ 36756434a01SJohn Fastabend hdr = xdp->data; 36856434a01SJohn Fastabend memset(hdr, 0, vi->hdr_len); 36956434a01SJohn Fastabend 37056434a01SJohn Fastabend num_sg = 1; 37156434a01SJohn Fastabend sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); 372bb91accfSJason Wang } else { /* small buffer */ 373bb91accfSJason Wang struct sk_buff *skb = data; 374bb91accfSJason Wang 375bb91accfSJason Wang /* Zero header and leave csum up to XDP layers */ 376bb91accfSJason Wang hdr = skb_vnet_hdr(skb); 377bb91accfSJason Wang memset(hdr, 0, vi->hdr_len); 378bb91accfSJason Wang 379bb91accfSJason Wang num_sg = 2; 380bb91accfSJason Wang sg_init_table(sq->sg, 2); 381bb91accfSJason Wang sg_set_buf(sq->sg, hdr, vi->hdr_len); 382bb91accfSJason Wang skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 383bb91accfSJason Wang } 38456434a01SJohn Fastabend err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, 385bb91accfSJason Wang data, GFP_ATOMIC); 38656434a01SJohn Fastabend if (unlikely(err)) { 387bb91accfSJason Wang if (vi->mergeable_rx_bufs) { 388bb91accfSJason Wang struct page *page = virt_to_head_page(xdp->data); 389bb91accfSJason Wang 39056434a01SJohn Fastabend put_page(page); 391bb91accfSJason Wang } else /* small buffer */ 392bb91accfSJason Wang kfree_skb(data); 393a67edbf4SDaniel Borkmann /* On error abort to avoid unnecessary kick */ 394a67edbf4SDaniel Borkmann return false; 39556434a01SJohn Fastabend } 39656434a01SJohn Fastabend 39756434a01SJohn Fastabend virtqueue_kick(sq->vq); 398a67edbf4SDaniel Borkmann return true; 39956434a01SJohn Fastabend } 40056434a01SJohn Fastabend 401f600b690SJohn Fastabend static u32 do_xdp_prog(struct virtnet_info *vi, 40256434a01SJohn Fastabend struct receive_queue *rq, 403f600b690SJohn Fastabend struct bpf_prog *xdp_prog, 404bb91accfSJason Wang void *data, int len) 405f600b690SJohn Fastabend { 406f600b690SJohn Fastabend int hdr_padded_len; 407f600b690SJohn Fastabend struct xdp_buff xdp; 408bb91accfSJason Wang void *buf; 40956434a01SJohn Fastabend unsigned int qp; 410f600b690SJohn Fastabend u32 act; 411f600b690SJohn Fastabend 412bb91accfSJason Wang if (vi->mergeable_rx_bufs) { 413f600b690SJohn Fastabend hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 414bb91accfSJason Wang xdp.data = data + hdr_padded_len; 415f600b690SJohn Fastabend xdp.data_end = xdp.data + (len - vi->hdr_len); 416bb91accfSJason Wang buf = data; 417bb91accfSJason Wang } else { /* small buffers */ 418bb91accfSJason Wang struct sk_buff *skb = data; 419bb91accfSJason Wang 420bb91accfSJason Wang xdp.data = skb->data; 421bb91accfSJason Wang xdp.data_end = xdp.data + len; 422bb91accfSJason Wang buf = skb->data; 423bb91accfSJason Wang } 424f600b690SJohn Fastabend 425f600b690SJohn Fastabend act = bpf_prog_run_xdp(xdp_prog, &xdp); 426f600b690SJohn Fastabend switch (act) { 427f600b690SJohn Fastabend case XDP_PASS: 428f600b690SJohn Fastabend return XDP_PASS; 42956434a01SJohn Fastabend case XDP_TX: 43056434a01SJohn Fastabend qp = vi->curr_queue_pairs - 43156434a01SJohn Fastabend vi->xdp_queue_pairs + 43256434a01SJohn Fastabend smp_processor_id(); 433bb91accfSJason Wang xdp.data = buf; 434a67edbf4SDaniel Borkmann if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, 435a67edbf4SDaniel Borkmann data))) 436a67edbf4SDaniel Borkmann trace_xdp_exception(vi->dev, xdp_prog, act); 43756434a01SJohn Fastabend return XDP_TX; 438f600b690SJohn Fastabend default: 439f600b690SJohn Fastabend bpf_warn_invalid_xdp_action(act); 440f600b690SJohn Fastabend case XDP_ABORTED: 441a67edbf4SDaniel Borkmann trace_xdp_exception(vi->dev, xdp_prog, act); 442f600b690SJohn Fastabend case XDP_DROP: 443f600b690SJohn Fastabend return XDP_DROP; 444f600b690SJohn Fastabend } 445f600b690SJohn Fastabend } 446f600b690SJohn Fastabend 447bb91accfSJason Wang static struct sk_buff *receive_small(struct net_device *dev, 448bb91accfSJason Wang struct virtnet_info *vi, 449bb91accfSJason Wang struct receive_queue *rq, 450bb91accfSJason Wang void *buf, unsigned int len) 451f121159dSMichael S. Tsirkin { 452f121159dSMichael S. Tsirkin struct sk_buff * skb = buf; 453bb91accfSJason Wang struct bpf_prog *xdp_prog; 454f121159dSMichael S. Tsirkin 455012873d0SMichael S. Tsirkin len -= vi->hdr_len; 456f121159dSMichael S. Tsirkin skb_trim(skb, len); 457f121159dSMichael S. Tsirkin 458bb91accfSJason Wang rcu_read_lock(); 459bb91accfSJason Wang xdp_prog = rcu_dereference(rq->xdp_prog); 460bb91accfSJason Wang if (xdp_prog) { 461bb91accfSJason Wang struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 462bb91accfSJason Wang u32 act; 463bb91accfSJason Wang 464bb91accfSJason Wang if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) 465bb91accfSJason Wang goto err_xdp; 466bb91accfSJason Wang act = do_xdp_prog(vi, rq, xdp_prog, skb, len); 467bb91accfSJason Wang switch (act) { 468bb91accfSJason Wang case XDP_PASS: 469bb91accfSJason Wang break; 470bb91accfSJason Wang case XDP_TX: 471bb91accfSJason Wang rcu_read_unlock(); 472bb91accfSJason Wang goto xdp_xmit; 473bb91accfSJason Wang case XDP_DROP: 474bb91accfSJason Wang default: 475bb91accfSJason Wang goto err_xdp; 476bb91accfSJason Wang } 477bb91accfSJason Wang } 478bb91accfSJason Wang rcu_read_unlock(); 479bb91accfSJason Wang 480f121159dSMichael S. Tsirkin return skb; 481bb91accfSJason Wang 482bb91accfSJason Wang err_xdp: 483bb91accfSJason Wang rcu_read_unlock(); 484bb91accfSJason Wang dev->stats.rx_dropped++; 485bb91accfSJason Wang kfree_skb(skb); 486bb91accfSJason Wang xdp_xmit: 487bb91accfSJason Wang return NULL; 488f121159dSMichael S. Tsirkin } 489f121159dSMichael S. Tsirkin 490f121159dSMichael S. Tsirkin static struct sk_buff *receive_big(struct net_device *dev, 491946fa564SMichael S. Tsirkin struct virtnet_info *vi, 492f121159dSMichael S. Tsirkin struct receive_queue *rq, 493f121159dSMichael S. Tsirkin void *buf, 494f121159dSMichael S. Tsirkin unsigned int len) 495f121159dSMichael S. Tsirkin { 496f121159dSMichael S. Tsirkin struct page *page = buf; 497c47a43d3SJason Wang struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); 498f121159dSMichael S. Tsirkin 499f121159dSMichael S. Tsirkin if (unlikely(!skb)) 500f121159dSMichael S. Tsirkin goto err; 501f121159dSMichael S. Tsirkin 502f121159dSMichael S. Tsirkin return skb; 503f121159dSMichael S. Tsirkin 504f121159dSMichael S. Tsirkin err: 505f121159dSMichael S. Tsirkin dev->stats.rx_dropped++; 506f121159dSMichael S. Tsirkin give_pages(rq, page); 507f121159dSMichael S. Tsirkin return NULL; 508f121159dSMichael S. Tsirkin } 509f121159dSMichael S. Tsirkin 51072979a6cSJohn Fastabend /* The conditions to enable XDP should preclude the underlying device from 51172979a6cSJohn Fastabend * sending packets across multiple buffers (num_buf > 1). However per spec 51272979a6cSJohn Fastabend * it does not appear to be illegal to do so but rather just against convention. 51372979a6cSJohn Fastabend * So in order to avoid making a system unresponsive the packets are pushed 51472979a6cSJohn Fastabend * into a page and the XDP program is run. This will be extremely slow and we 51572979a6cSJohn Fastabend * push a warning to the user to fix this as soon as possible. Fixing this may 51672979a6cSJohn Fastabend * require resolving the underlying hardware to determine why multiple buffers 51772979a6cSJohn Fastabend * are being received or simply loading the XDP program in the ingress stack 51872979a6cSJohn Fastabend * after the skb is built because there is no advantage to running it here 51972979a6cSJohn Fastabend * anymore. 52072979a6cSJohn Fastabend */ 52172979a6cSJohn Fastabend static struct page *xdp_linearize_page(struct receive_queue *rq, 52256a86f84SJason Wang u16 *num_buf, 52372979a6cSJohn Fastabend struct page *p, 52472979a6cSJohn Fastabend int offset, 52572979a6cSJohn Fastabend unsigned int *len) 52672979a6cSJohn Fastabend { 52772979a6cSJohn Fastabend struct page *page = alloc_page(GFP_ATOMIC); 52872979a6cSJohn Fastabend unsigned int page_off = 0; 52972979a6cSJohn Fastabend 53072979a6cSJohn Fastabend if (!page) 53172979a6cSJohn Fastabend return NULL; 53272979a6cSJohn Fastabend 53372979a6cSJohn Fastabend memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 53472979a6cSJohn Fastabend page_off += *len; 53572979a6cSJohn Fastabend 53656a86f84SJason Wang while (--*num_buf) { 53772979a6cSJohn Fastabend unsigned int buflen; 53872979a6cSJohn Fastabend unsigned long ctx; 53972979a6cSJohn Fastabend void *buf; 54072979a6cSJohn Fastabend int off; 54172979a6cSJohn Fastabend 54272979a6cSJohn Fastabend ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen); 54372979a6cSJohn Fastabend if (unlikely(!ctx)) 54472979a6cSJohn Fastabend goto err_buf; 54572979a6cSJohn Fastabend 54672979a6cSJohn Fastabend buf = mergeable_ctx_to_buf_address(ctx); 54772979a6cSJohn Fastabend p = virt_to_head_page(buf); 54872979a6cSJohn Fastabend off = buf - page_address(p); 54972979a6cSJohn Fastabend 55056a86f84SJason Wang /* guard against a misconfigured or uncooperative backend that 55156a86f84SJason Wang * is sending packet larger than the MTU. 55256a86f84SJason Wang */ 55356a86f84SJason Wang if ((page_off + buflen) > PAGE_SIZE) { 55456a86f84SJason Wang put_page(p); 55556a86f84SJason Wang goto err_buf; 55656a86f84SJason Wang } 55756a86f84SJason Wang 55872979a6cSJohn Fastabend memcpy(page_address(page) + page_off, 55972979a6cSJohn Fastabend page_address(p) + off, buflen); 56072979a6cSJohn Fastabend page_off += buflen; 56156a86f84SJason Wang put_page(p); 56272979a6cSJohn Fastabend } 56372979a6cSJohn Fastabend 56472979a6cSJohn Fastabend *len = page_off; 56572979a6cSJohn Fastabend return page; 56672979a6cSJohn Fastabend err_buf: 56772979a6cSJohn Fastabend __free_pages(page, 0); 56872979a6cSJohn Fastabend return NULL; 56972979a6cSJohn Fastabend } 57072979a6cSJohn Fastabend 5718fc3b9e9SMichael S. Tsirkin static struct sk_buff *receive_mergeable(struct net_device *dev, 572fdd819b2SMichael S. Tsirkin struct virtnet_info *vi, 5738fc3b9e9SMichael S. Tsirkin struct receive_queue *rq, 574ab7db917SMichael Dalton unsigned long ctx, 5758fc3b9e9SMichael S. Tsirkin unsigned int len) 5769ab86bbcSShirley Ma { 577ab7db917SMichael Dalton void *buf = mergeable_ctx_to_buf_address(ctx); 578012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 579012873d0SMichael S. Tsirkin u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 5808fc3b9e9SMichael S. Tsirkin struct page *page = virt_to_head_page(buf); 5818fc3b9e9SMichael S. Tsirkin int offset = buf - page_address(page); 582f600b690SJohn Fastabend struct sk_buff *head_skb, *curr_skb; 583f600b690SJohn Fastabend struct bpf_prog *xdp_prog; 584f600b690SJohn Fastabend unsigned int truesize; 585ab7db917SMichael Dalton 58656434a01SJohn Fastabend head_skb = NULL; 58756434a01SJohn Fastabend 588f600b690SJohn Fastabend rcu_read_lock(); 589f600b690SJohn Fastabend xdp_prog = rcu_dereference(rq->xdp_prog); 590f600b690SJohn Fastabend if (xdp_prog) { 59172979a6cSJohn Fastabend struct page *xdp_page; 592f600b690SJohn Fastabend u32 act; 593f600b690SJohn Fastabend 59473b62bd0SJason Wang /* This happens when rx buffer size is underestimated */ 595f600b690SJohn Fastabend if (unlikely(num_buf > 1)) { 59672979a6cSJohn Fastabend /* linearize data for XDP */ 59756a86f84SJason Wang xdp_page = xdp_linearize_page(rq, &num_buf, 59872979a6cSJohn Fastabend page, offset, &len); 59972979a6cSJohn Fastabend if (!xdp_page) 600f600b690SJohn Fastabend goto err_xdp; 60172979a6cSJohn Fastabend offset = 0; 60272979a6cSJohn Fastabend } else { 60372979a6cSJohn Fastabend xdp_page = page; 604f600b690SJohn Fastabend } 605f600b690SJohn Fastabend 606f600b690SJohn Fastabend /* Transient failure which in theory could occur if 607f600b690SJohn Fastabend * in-flight packets from before XDP was enabled reach 608f600b690SJohn Fastabend * the receive path after XDP is loaded. In practice I 609f600b690SJohn Fastabend * was not able to create this condition. 610f600b690SJohn Fastabend */ 611b00f70b0SJason Wang if (unlikely(hdr->hdr.gso_type)) 612f600b690SJohn Fastabend goto err_xdp; 613f600b690SJohn Fastabend 614bb91accfSJason Wang act = do_xdp_prog(vi, rq, xdp_prog, 615bb91accfSJason Wang page_address(xdp_page) + offset, len); 61656434a01SJohn Fastabend switch (act) { 61756434a01SJohn Fastabend case XDP_PASS: 6181830f893SJason Wang /* We can only create skb based on xdp_page. */ 6191830f893SJason Wang if (unlikely(xdp_page != page)) { 6201830f893SJason Wang rcu_read_unlock(); 6211830f893SJason Wang put_page(page); 6221830f893SJason Wang head_skb = page_to_skb(vi, rq, xdp_page, 6231830f893SJason Wang 0, len, PAGE_SIZE); 6245c33474dSJason Wang ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); 6251830f893SJason Wang return head_skb; 6261830f893SJason Wang } 62756434a01SJohn Fastabend break; 62856434a01SJohn Fastabend case XDP_TX: 6295c33474dSJason Wang ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); 63072979a6cSJohn Fastabend if (unlikely(xdp_page != page)) 63172979a6cSJohn Fastabend goto err_xdp; 63256434a01SJohn Fastabend rcu_read_unlock(); 63356434a01SJohn Fastabend goto xdp_xmit; 63456434a01SJohn Fastabend case XDP_DROP: 63556434a01SJohn Fastabend default: 63672979a6cSJohn Fastabend if (unlikely(xdp_page != page)) 63772979a6cSJohn Fastabend __free_pages(xdp_page, 0); 6385c33474dSJason Wang ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); 639f600b690SJohn Fastabend goto err_xdp; 640f600b690SJohn Fastabend } 64156434a01SJohn Fastabend } 642f600b690SJohn Fastabend rcu_read_unlock(); 643f600b690SJohn Fastabend 644f600b690SJohn Fastabend truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); 645f600b690SJohn Fastabend head_skb = page_to_skb(vi, rq, page, offset, len, truesize); 646f600b690SJohn Fastabend curr_skb = head_skb; 6479ab86bbcSShirley Ma 6488fc3b9e9SMichael S. Tsirkin if (unlikely(!curr_skb)) 6498fc3b9e9SMichael S. Tsirkin goto err_skb; 6509ab86bbcSShirley Ma while (--num_buf) { 6518fc3b9e9SMichael S. Tsirkin int num_skb_frags; 6528fc3b9e9SMichael S. Tsirkin 653ab7db917SMichael Dalton ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); 654ab7db917SMichael Dalton if (unlikely(!ctx)) { 6558fc3b9e9SMichael S. Tsirkin pr_debug("%s: rx error: %d buffers out of %d missing\n", 656fdd819b2SMichael S. Tsirkin dev->name, num_buf, 657012873d0SMichael S. Tsirkin virtio16_to_cpu(vi->vdev, 658012873d0SMichael S. Tsirkin hdr->num_buffers)); 6598fc3b9e9SMichael S. Tsirkin dev->stats.rx_length_errors++; 6608fc3b9e9SMichael S. Tsirkin goto err_buf; 6613f2c31d9SMark McLoughlin } 6628fc3b9e9SMichael S. Tsirkin 663ab7db917SMichael Dalton buf = mergeable_ctx_to_buf_address(ctx); 6648fc3b9e9SMichael S. Tsirkin page = virt_to_head_page(buf); 6658fc3b9e9SMichael S. Tsirkin 6668fc3b9e9SMichael S. Tsirkin num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 6672613af0eSMichael Dalton if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 6682613af0eSMichael Dalton struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 6698fc3b9e9SMichael S. Tsirkin 6708fc3b9e9SMichael S. Tsirkin if (unlikely(!nskb)) 6718fc3b9e9SMichael S. Tsirkin goto err_skb; 6722613af0eSMichael Dalton if (curr_skb == head_skb) 6732613af0eSMichael Dalton skb_shinfo(curr_skb)->frag_list = nskb; 6742613af0eSMichael Dalton else 6752613af0eSMichael Dalton curr_skb->next = nskb; 6762613af0eSMichael Dalton curr_skb = nskb; 6772613af0eSMichael Dalton head_skb->truesize += nskb->truesize; 6782613af0eSMichael Dalton num_skb_frags = 0; 6792613af0eSMichael Dalton } 680ab7db917SMichael Dalton truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); 6812613af0eSMichael Dalton if (curr_skb != head_skb) { 6822613af0eSMichael Dalton head_skb->data_len += len; 6832613af0eSMichael Dalton head_skb->len += len; 684fb51879dSMichael Dalton head_skb->truesize += truesize; 6852613af0eSMichael Dalton } 6868fc3b9e9SMichael S. Tsirkin offset = buf - page_address(page); 687ba275241SJason Wang if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 688ba275241SJason Wang put_page(page); 689ba275241SJason Wang skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 690fb51879dSMichael Dalton len, truesize); 691ba275241SJason Wang } else { 6922613af0eSMichael Dalton skb_add_rx_frag(curr_skb, num_skb_frags, page, 693fb51879dSMichael Dalton offset, len, truesize); 694ba275241SJason Wang } 6958fc3b9e9SMichael S. Tsirkin } 6968fc3b9e9SMichael S. Tsirkin 6975377d758SJohannes Berg ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 6988fc3b9e9SMichael S. Tsirkin return head_skb; 6998fc3b9e9SMichael S. Tsirkin 700f600b690SJohn Fastabend err_xdp: 701f600b690SJohn Fastabend rcu_read_unlock(); 7028fc3b9e9SMichael S. Tsirkin err_skb: 7038fc3b9e9SMichael S. Tsirkin put_page(page); 7048fc3b9e9SMichael S. Tsirkin while (--num_buf) { 705ab7db917SMichael Dalton ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); 706ab7db917SMichael Dalton if (unlikely(!ctx)) { 7078fc3b9e9SMichael S. Tsirkin pr_debug("%s: rx error: %d buffers missing\n", 7088fc3b9e9SMichael S. Tsirkin dev->name, num_buf); 7098fc3b9e9SMichael S. Tsirkin dev->stats.rx_length_errors++; 7108fc3b9e9SMichael S. Tsirkin break; 7118fc3b9e9SMichael S. Tsirkin } 712ab7db917SMichael Dalton page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); 7138fc3b9e9SMichael S. Tsirkin put_page(page); 7143f2c31d9SMark McLoughlin } 7158fc3b9e9SMichael S. Tsirkin err_buf: 7168fc3b9e9SMichael S. Tsirkin dev->stats.rx_dropped++; 7178fc3b9e9SMichael S. Tsirkin dev_kfree_skb(head_skb); 71856434a01SJohn Fastabend xdp_xmit: 7198fc3b9e9SMichael S. Tsirkin return NULL; 7209ab86bbcSShirley Ma } 7219ab86bbcSShirley Ma 722946fa564SMichael S. Tsirkin static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 723946fa564SMichael S. Tsirkin void *buf, unsigned int len) 7249ab86bbcSShirley Ma { 725e9d7417bSJason Wang struct net_device *dev = vi->dev; 72658472a76SEric Dumazet struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 7279ab86bbcSShirley Ma struct sk_buff *skb; 728012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 7299ab86bbcSShirley Ma 730bcff3162SMichael S. Tsirkin if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 7319ab86bbcSShirley Ma pr_debug("%s: short packet %i\n", dev->name, len); 7329ab86bbcSShirley Ma dev->stats.rx_length_errors++; 733ab7db917SMichael Dalton if (vi->mergeable_rx_bufs) { 734ab7db917SMichael Dalton unsigned long ctx = (unsigned long)buf; 735ab7db917SMichael Dalton void *base = mergeable_ctx_to_buf_address(ctx); 736ab7db917SMichael Dalton put_page(virt_to_head_page(base)); 737ab7db917SMichael Dalton } else if (vi->big_packets) { 73898bfd23cSMichael Dalton give_pages(rq, buf); 739ab7db917SMichael Dalton } else { 7409ab86bbcSShirley Ma dev_kfree_skb(buf); 741ab7db917SMichael Dalton } 7429ab86bbcSShirley Ma return; 7439ab86bbcSShirley Ma } 7449ab86bbcSShirley Ma 745f121159dSMichael S. Tsirkin if (vi->mergeable_rx_bufs) 746fdd819b2SMichael S. Tsirkin skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); 747f121159dSMichael S. Tsirkin else if (vi->big_packets) 748946fa564SMichael S. Tsirkin skb = receive_big(dev, vi, rq, buf, len); 749f121159dSMichael S. Tsirkin else 750bb91accfSJason Wang skb = receive_small(dev, vi, rq, buf, len); 751f121159dSMichael S. Tsirkin 7528fc3b9e9SMichael S. Tsirkin if (unlikely(!skb)) 7532613af0eSMichael Dalton return; 7543f2c31d9SMark McLoughlin 7559ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 7563fa2a1dfSstephen hemminger 75783a27052SEric Dumazet u64_stats_update_begin(&stats->rx_syncp); 7583fa2a1dfSstephen hemminger stats->rx_bytes += skb->len; 7593fa2a1dfSstephen hemminger stats->rx_packets++; 76083a27052SEric Dumazet u64_stats_update_end(&stats->rx_syncp); 761296f96fcSRusty Russell 762e858fae2SMike Rapoport if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 76310a8d94aSJason Wang skb->ip_summed = CHECKSUM_UNNECESSARY; 764296f96fcSRusty Russell 765e858fae2SMike Rapoport if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 766e858fae2SMike Rapoport virtio_is_little_endian(vi->vdev))) { 767e858fae2SMike Rapoport net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 768e858fae2SMike Rapoport dev->name, hdr->hdr.gso_type, 769fdd819b2SMichael S. Tsirkin hdr->hdr.gso_size); 770296f96fcSRusty Russell goto frame_err; 771296f96fcSRusty Russell } 772296f96fcSRusty Russell 773d1dc06dcSMike Rapoport skb->protocol = eth_type_trans(skb, dev); 774d1dc06dcSMike Rapoport pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 775d1dc06dcSMike Rapoport ntohs(skb->protocol), skb->len, skb->pkt_type); 776d1dc06dcSMike Rapoport 7770fbd050aSEric Dumazet napi_gro_receive(&rq->napi, skb); 778296f96fcSRusty Russell return; 779296f96fcSRusty Russell 780296f96fcSRusty Russell frame_err: 781296f96fcSRusty Russell dev->stats.rx_frame_errors++; 782296f96fcSRusty Russell dev_kfree_skb(skb); 783296f96fcSRusty Russell } 784296f96fcSRusty Russell 785946fa564SMichael S. Tsirkin static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 786946fa564SMichael S. Tsirkin gfp_t gfp) 787296f96fcSRusty Russell { 788296f96fcSRusty Russell struct sk_buff *skb; 789012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 7909ab86bbcSShirley Ma int err; 7913f2c31d9SMark McLoughlin 7925061de36SMichael Dalton skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); 7939ab86bbcSShirley Ma if (unlikely(!skb)) 7949ab86bbcSShirley Ma return -ENOMEM; 795296f96fcSRusty Russell 7965061de36SMichael Dalton skb_put(skb, GOOD_PACKET_LEN); 7973f2c31d9SMark McLoughlin 7983f2c31d9SMark McLoughlin hdr = skb_vnet_hdr(skb); 799547c890cSJason Wang sg_init_table(rq->sg, 2); 800012873d0SMichael S. Tsirkin sg_set_buf(rq->sg, hdr, vi->hdr_len); 801e9d7417bSJason Wang skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); 80297402b96SHerbert Xu 8039dc7b9e4SRusty Russell err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); 8049ab86bbcSShirley Ma if (err < 0) 8059ab86bbcSShirley Ma dev_kfree_skb(skb); 80697402b96SHerbert Xu 8079ab86bbcSShirley Ma return err; 80897402b96SHerbert Xu } 80997402b96SHerbert Xu 810012873d0SMichael S. Tsirkin static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 811012873d0SMichael S. Tsirkin gfp_t gfp) 8129ab86bbcSShirley Ma { 8139ab86bbcSShirley Ma struct page *first, *list = NULL; 8149ab86bbcSShirley Ma char *p; 8159ab86bbcSShirley Ma int i, err, offset; 816296f96fcSRusty Russell 817a5835440SRusty Russell sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); 818a5835440SRusty Russell 819e9d7417bSJason Wang /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ 8209ab86bbcSShirley Ma for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 821e9d7417bSJason Wang first = get_a_page(rq, gfp); 8229ab86bbcSShirley Ma if (!first) { 8239ab86bbcSShirley Ma if (list) 824e9d7417bSJason Wang give_pages(rq, list); 8259ab86bbcSShirley Ma return -ENOMEM; 826296f96fcSRusty Russell } 827e9d7417bSJason Wang sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 8289ab86bbcSShirley Ma 8299ab86bbcSShirley Ma /* chain new page in list head to match sg */ 8309ab86bbcSShirley Ma first->private = (unsigned long)list; 8319ab86bbcSShirley Ma list = first; 8329ab86bbcSShirley Ma } 8339ab86bbcSShirley Ma 834e9d7417bSJason Wang first = get_a_page(rq, gfp); 8359ab86bbcSShirley Ma if (!first) { 836e9d7417bSJason Wang give_pages(rq, list); 8379ab86bbcSShirley Ma return -ENOMEM; 8389ab86bbcSShirley Ma } 8399ab86bbcSShirley Ma p = page_address(first); 8409ab86bbcSShirley Ma 841e9d7417bSJason Wang /* rq->sg[0], rq->sg[1] share the same page */ 842012873d0SMichael S. Tsirkin /* a separated rq->sg[0] for header - required in case !any_header_sg */ 843012873d0SMichael S. Tsirkin sg_set_buf(&rq->sg[0], p, vi->hdr_len); 8449ab86bbcSShirley Ma 845e9d7417bSJason Wang /* rq->sg[1] for data packet, from offset */ 8469ab86bbcSShirley Ma offset = sizeof(struct padded_vnet_hdr); 847e9d7417bSJason Wang sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 8489ab86bbcSShirley Ma 8499ab86bbcSShirley Ma /* chain first in list head */ 8509ab86bbcSShirley Ma first->private = (unsigned long)list; 8519dc7b9e4SRusty Russell err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, 852aa989f5eSMichael S. Tsirkin first, gfp); 8539ab86bbcSShirley Ma if (err < 0) 854e9d7417bSJason Wang give_pages(rq, first); 8559ab86bbcSShirley Ma 8569ab86bbcSShirley Ma return err; 8579ab86bbcSShirley Ma } 8589ab86bbcSShirley Ma 8595377d758SJohannes Berg static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) 8609ab86bbcSShirley Ma { 861ab7db917SMichael Dalton const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 862fbf28d78SMichael Dalton unsigned int len; 863fbf28d78SMichael Dalton 8645377d758SJohannes Berg len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 865fbf28d78SMichael Dalton GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); 866fbf28d78SMichael Dalton return ALIGN(len, MERGEABLE_BUFFER_ALIGN); 867fbf28d78SMichael Dalton } 868fbf28d78SMichael Dalton 869fbf28d78SMichael Dalton static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) 870fbf28d78SMichael Dalton { 871fb51879dSMichael Dalton struct page_frag *alloc_frag = &rq->alloc_frag; 872fb51879dSMichael Dalton char *buf; 873ab7db917SMichael Dalton unsigned long ctx; 8749ab86bbcSShirley Ma int err; 875fb51879dSMichael Dalton unsigned int len, hole; 8769ab86bbcSShirley Ma 877fbf28d78SMichael Dalton len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); 878ab7db917SMichael Dalton if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) 8799ab86bbcSShirley Ma return -ENOMEM; 880ab7db917SMichael Dalton 881fb51879dSMichael Dalton buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 882ab7db917SMichael Dalton ctx = mergeable_buf_to_ctx(buf, len); 883fb51879dSMichael Dalton get_page(alloc_frag->page); 884fb51879dSMichael Dalton alloc_frag->offset += len; 885fb51879dSMichael Dalton hole = alloc_frag->size - alloc_frag->offset; 886ab7db917SMichael Dalton if (hole < len) { 887ab7db917SMichael Dalton /* To avoid internal fragmentation, if there is very likely not 888ab7db917SMichael Dalton * enough space for another buffer, add the remaining space to 889ab7db917SMichael Dalton * the current buffer. This extra space is not included in 890ab7db917SMichael Dalton * the truesize stored in ctx. 891ab7db917SMichael Dalton */ 892fb51879dSMichael Dalton len += hole; 893fb51879dSMichael Dalton alloc_frag->offset += hole; 894fb51879dSMichael Dalton } 8959ab86bbcSShirley Ma 896fb51879dSMichael Dalton sg_init_one(rq->sg, buf, len); 897ab7db917SMichael Dalton err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); 8989ab86bbcSShirley Ma if (err < 0) 8992613af0eSMichael Dalton put_page(virt_to_head_page(buf)); 9009ab86bbcSShirley Ma 9019ab86bbcSShirley Ma return err; 902296f96fcSRusty Russell } 903296f96fcSRusty Russell 904b2baed69SRusty Russell /* 905b2baed69SRusty Russell * Returns false if we couldn't fill entirely (OOM). 906b2baed69SRusty Russell * 907b2baed69SRusty Russell * Normally run in the receive path, but can also be run from ndo_open 908b2baed69SRusty Russell * before we're receiving packets, or from refill_work which is 909b2baed69SRusty Russell * careful to disable receiving (using napi_disable). 910b2baed69SRusty Russell */ 911946fa564SMichael S. Tsirkin static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 912946fa564SMichael S. Tsirkin gfp_t gfp) 9133f2c31d9SMark McLoughlin { 9143f2c31d9SMark McLoughlin int err; 9151788f495SMichael S. Tsirkin bool oom; 9163f2c31d9SMark McLoughlin 917fb51879dSMichael Dalton gfp |= __GFP_COLD; 9180aea51c3SAmit Shah do { 9199ab86bbcSShirley Ma if (vi->mergeable_rx_bufs) 920e9d7417bSJason Wang err = add_recvbuf_mergeable(rq, gfp); 9219ab86bbcSShirley Ma else if (vi->big_packets) 922012873d0SMichael S. Tsirkin err = add_recvbuf_big(vi, rq, gfp); 9239ab86bbcSShirley Ma else 924946fa564SMichael S. Tsirkin err = add_recvbuf_small(vi, rq, gfp); 9253f2c31d9SMark McLoughlin 9261788f495SMichael S. Tsirkin oom = err == -ENOMEM; 9279ed4cb07SRusty Russell if (err) 9283f2c31d9SMark McLoughlin break; 929b7dfde95SLinus Torvalds } while (rq->vq->num_free); 930681daee2SJason Wang virtqueue_kick(rq->vq); 9313161e453SRusty Russell return !oom; 9323f2c31d9SMark McLoughlin } 9333f2c31d9SMark McLoughlin 93418445c4dSRusty Russell static void skb_recv_done(struct virtqueue *rvq) 935296f96fcSRusty Russell { 936296f96fcSRusty Russell struct virtnet_info *vi = rvq->vdev->priv; 937986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 938e9d7417bSJason Wang 93918445c4dSRusty Russell /* Schedule NAPI, Suppress further interrupts if successful. */ 940e9d7417bSJason Wang if (napi_schedule_prep(&rq->napi)) { 9411915a712SMichael S. Tsirkin virtqueue_disable_cb(rvq); 942e9d7417bSJason Wang __napi_schedule(&rq->napi); 94318445c4dSRusty Russell } 944296f96fcSRusty Russell } 945296f96fcSRusty Russell 946e9d7417bSJason Wang static void virtnet_napi_enable(struct receive_queue *rq) 9473e9d08ecSBruce Rogers { 948e9d7417bSJason Wang napi_enable(&rq->napi); 9493e9d08ecSBruce Rogers 9503e9d08ecSBruce Rogers /* If all buffers were filled by other side before we napi_enabled, we 9513e9d08ecSBruce Rogers * won't get another interrupt, so process any outstanding packets 9523e9d08ecSBruce Rogers * now. virtnet_poll wants re-enable the queue, so we disable here. 9533e9d08ecSBruce Rogers * We synchronize against interrupts via NAPI_STATE_SCHED */ 954e9d7417bSJason Wang if (napi_schedule_prep(&rq->napi)) { 955e9d7417bSJason Wang virtqueue_disable_cb(rq->vq); 956ec13ee80SMichael S. Tsirkin local_bh_disable(); 957e9d7417bSJason Wang __napi_schedule(&rq->napi); 958ec13ee80SMichael S. Tsirkin local_bh_enable(); 9593e9d08ecSBruce Rogers } 9603e9d08ecSBruce Rogers } 9613e9d08ecSBruce Rogers 9623161e453SRusty Russell static void refill_work(struct work_struct *work) 9633161e453SRusty Russell { 964e9d7417bSJason Wang struct virtnet_info *vi = 965e9d7417bSJason Wang container_of(work, struct virtnet_info, refill.work); 9663161e453SRusty Russell bool still_empty; 967986a4f4dSJason Wang int i; 9683161e453SRusty Russell 96955257d72SSasha Levin for (i = 0; i < vi->curr_queue_pairs; i++) { 970986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[i]; 971986a4f4dSJason Wang 972986a4f4dSJason Wang napi_disable(&rq->napi); 973946fa564SMichael S. Tsirkin still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 974986a4f4dSJason Wang virtnet_napi_enable(rq); 9753161e453SRusty Russell 9763161e453SRusty Russell /* In theory, this can happen: if we don't get any buffers in 977986a4f4dSJason Wang * we will *never* try to fill again. 978986a4f4dSJason Wang */ 9793161e453SRusty Russell if (still_empty) 9803b07e9caSTejun Heo schedule_delayed_work(&vi->refill, HZ/2); 9813161e453SRusty Russell } 982986a4f4dSJason Wang } 9833161e453SRusty Russell 9842ffa7598SJason Wang static int virtnet_receive(struct receive_queue *rq, int budget) 985296f96fcSRusty Russell { 986e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 9872ffa7598SJason Wang unsigned int len, received = 0; 9889ab86bbcSShirley Ma void *buf; 989296f96fcSRusty Russell 990296f96fcSRusty Russell while (received < budget && 991e9d7417bSJason Wang (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 992946fa564SMichael S. Tsirkin receive_buf(vi, rq, buf, len); 993296f96fcSRusty Russell received++; 994296f96fcSRusty Russell } 995296f96fcSRusty Russell 996be121f46SJason Wang if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { 997946fa564SMichael S. Tsirkin if (!try_fill_recv(vi, rq, GFP_ATOMIC)) 9983b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 9993161e453SRusty Russell } 1000296f96fcSRusty Russell 10012ffa7598SJason Wang return received; 10022ffa7598SJason Wang } 10032ffa7598SJason Wang 10042ffa7598SJason Wang static int virtnet_poll(struct napi_struct *napi, int budget) 10052ffa7598SJason Wang { 10062ffa7598SJason Wang struct receive_queue *rq = 10072ffa7598SJason Wang container_of(napi, struct receive_queue, napi); 1008faadb05fSLi RongQing unsigned int r, received; 10092ffa7598SJason Wang 1010faadb05fSLi RongQing received = virtnet_receive(rq, budget); 10112ffa7598SJason Wang 10128329d98eSRusty Russell /* Out of packets? */ 10138329d98eSRusty Russell if (received < budget) { 1014cbdadbbfSMichael S. Tsirkin r = virtqueue_enable_cb_prepare(rq->vq); 10154d6308aaSEric Dumazet if (napi_complete_done(napi, received)) { 1016cbdadbbfSMichael S. Tsirkin if (unlikely(virtqueue_poll(rq->vq, r)) && 10178e95a202SJoe Perches napi_schedule_prep(napi)) { 1018e9d7417bSJason Wang virtqueue_disable_cb(rq->vq); 1019288379f0SBen Hutchings __napi_schedule(napi); 1020296f96fcSRusty Russell } 10214265f161SChristian Borntraeger } 10224d6308aaSEric Dumazet } 1023296f96fcSRusty Russell 1024296f96fcSRusty Russell return received; 1025296f96fcSRusty Russell } 1026296f96fcSRusty Russell 1027986a4f4dSJason Wang static int virtnet_open(struct net_device *dev) 1028986a4f4dSJason Wang { 1029986a4f4dSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1030986a4f4dSJason Wang int i; 1031986a4f4dSJason Wang 1032e4166625SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1033e4166625SJason Wang if (i < vi->curr_queue_pairs) 1034986a4f4dSJason Wang /* Make sure we have some buffers: if oom use wq. */ 1035946fa564SMichael S. Tsirkin if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 1036986a4f4dSJason Wang schedule_delayed_work(&vi->refill, 0); 1037986a4f4dSJason Wang virtnet_napi_enable(&vi->rq[i]); 1038986a4f4dSJason Wang } 1039986a4f4dSJason Wang 1040986a4f4dSJason Wang return 0; 1041986a4f4dSJason Wang } 1042986a4f4dSJason Wang 1043b7dfde95SLinus Torvalds static void free_old_xmit_skbs(struct send_queue *sq) 1044296f96fcSRusty Russell { 1045296f96fcSRusty Russell struct sk_buff *skb; 10466ee57bccSMichael S. Tsirkin unsigned int len; 1047e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv; 104858472a76SEric Dumazet struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 1049296f96fcSRusty Russell 1050e9d7417bSJason Wang while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 1051296f96fcSRusty Russell pr_debug("Sent skb %p\n", skb); 10523fa2a1dfSstephen hemminger 105383a27052SEric Dumazet u64_stats_update_begin(&stats->tx_syncp); 10543fa2a1dfSstephen hemminger stats->tx_bytes += skb->len; 10553fa2a1dfSstephen hemminger stats->tx_packets++; 105683a27052SEric Dumazet u64_stats_update_end(&stats->tx_syncp); 10573fa2a1dfSstephen hemminger 1058ed79bab8SEric Dumazet dev_kfree_skb_any(skb); 1059296f96fcSRusty Russell } 1060296f96fcSRusty Russell } 1061296f96fcSRusty Russell 1062e9d7417bSJason Wang static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 1063296f96fcSRusty Russell { 1064012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 1065296f96fcSRusty Russell const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 1066e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv; 10677bedc7dcSMichael S. Tsirkin unsigned num_sg; 1068012873d0SMichael S. Tsirkin unsigned hdr_len = vi->hdr_len; 1069e7428e95SMichael S. Tsirkin bool can_push; 1070296f96fcSRusty Russell 1071e174961cSJohannes Berg pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 1072e7428e95SMichael S. Tsirkin 1073e7428e95SMichael S. Tsirkin can_push = vi->any_header_sg && 1074e7428e95SMichael S. Tsirkin !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 1075e7428e95SMichael S. Tsirkin !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 1076e7428e95SMichael S. Tsirkin /* Even if we can, don't push here yet as this would skew 1077e7428e95SMichael S. Tsirkin * csum_start offset below. */ 1078e7428e95SMichael S. Tsirkin if (can_push) 1079012873d0SMichael S. Tsirkin hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 1080e7428e95SMichael S. Tsirkin else 1081e7428e95SMichael S. Tsirkin hdr = skb_vnet_hdr(skb); 1082296f96fcSRusty Russell 1083e858fae2SMike Rapoport if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 10846391a448SJason Wang virtio_is_little_endian(vi->vdev), false)) 1085296f96fcSRusty Russell BUG(); 1086296f96fcSRusty Russell 1087e7428e95SMichael S. Tsirkin if (vi->mergeable_rx_bufs) 1088012873d0SMichael S. Tsirkin hdr->num_buffers = 0; 10893f2c31d9SMark McLoughlin 1090547c890cSJason Wang sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 1091e7428e95SMichael S. Tsirkin if (can_push) { 1092e7428e95SMichael S. Tsirkin __skb_push(skb, hdr_len); 1093e7428e95SMichael S. Tsirkin num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 1094e7428e95SMichael S. Tsirkin /* Pull header back to avoid skew in tx bytes calculations. */ 1095e7428e95SMichael S. Tsirkin __skb_pull(skb, hdr_len); 1096e7428e95SMichael S. Tsirkin } else { 1097e7428e95SMichael S. Tsirkin sg_set_buf(sq->sg, hdr, hdr_len); 1098b7dfde95SLinus Torvalds num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; 1099e7428e95SMichael S. Tsirkin } 11009dc7b9e4SRusty Russell return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 110111a3a154SRusty Russell } 110211a3a154SRusty Russell 1103424efe9cSStephen Hemminger static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 110499ffc696SRusty Russell { 110599ffc696SRusty Russell struct virtnet_info *vi = netdev_priv(dev); 1106986a4f4dSJason Wang int qnum = skb_get_queue_mapping(skb); 1107986a4f4dSJason Wang struct send_queue *sq = &vi->sq[qnum]; 11089ed4cb07SRusty Russell int err; 11094b7fd2e6SMichael S. Tsirkin struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 11104b7fd2e6SMichael S. Tsirkin bool kick = !skb->xmit_more; 11112cb9c6baSRusty Russell 11122cb9c6baSRusty Russell /* Free up any pending old buffers before queueing new ones. */ 1113e9d7417bSJason Wang free_old_xmit_skbs(sq); 111499ffc696SRusty Russell 1115074c3582SJacob Keller /* timestamp packet in software */ 1116074c3582SJacob Keller skb_tx_timestamp(skb); 1117074c3582SJacob Keller 111803f191baSMichael S. Tsirkin /* Try to transmit */ 1119b7dfde95SLinus Torvalds err = xmit_skb(sq, skb); 112099ffc696SRusty Russell 11219ed4cb07SRusty Russell /* This should not happen! */ 1122681daee2SJason Wang if (unlikely(err)) { 112358eba97dSRusty Russell dev->stats.tx_fifo_errors++; 11242e57b79cSRick Jones if (net_ratelimit()) 112558eba97dSRusty Russell dev_warn(&dev->dev, 1126b7dfde95SLinus Torvalds "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); 112758eba97dSRusty Russell dev->stats.tx_dropped++; 112885e94525SEric W. Biederman dev_kfree_skb_any(skb); 112958eba97dSRusty Russell return NETDEV_TX_OK; 1130296f96fcSRusty Russell } 113103f191baSMichael S. Tsirkin 113248925e37SRusty Russell /* Don't wait up for transmitted skbs to be freed. */ 113348925e37SRusty Russell skb_orphan(skb); 113448925e37SRusty Russell nf_reset(skb); 113548925e37SRusty Russell 113660302ff6SMichael S. Tsirkin /* If running out of space, stop queue to avoid getting packets that we 113760302ff6SMichael S. Tsirkin * are then unable to transmit. 113860302ff6SMichael S. Tsirkin * An alternative would be to force queuing layer to requeue the skb by 113960302ff6SMichael S. Tsirkin * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 114060302ff6SMichael S. Tsirkin * returned in a normal path of operation: it means that driver is not 114160302ff6SMichael S. Tsirkin * maintaining the TX queue stop/start state properly, and causes 114260302ff6SMichael S. Tsirkin * the stack to do a non-trivial amount of useless work. 114360302ff6SMichael S. Tsirkin * Since most packets only take 1 or 2 ring slots, stopping the queue 114460302ff6SMichael S. Tsirkin * early means 16 slots are typically wasted. 1145d631b94eSstephen hemminger */ 1146b7dfde95SLinus Torvalds if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 1147986a4f4dSJason Wang netif_stop_subqueue(dev, qnum); 1148e9d7417bSJason Wang if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 114948925e37SRusty Russell /* More just got used, free them then recheck. */ 1150b7dfde95SLinus Torvalds free_old_xmit_skbs(sq); 1151b7dfde95SLinus Torvalds if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1152986a4f4dSJason Wang netif_start_subqueue(dev, qnum); 1153e9d7417bSJason Wang virtqueue_disable_cb(sq->vq); 115448925e37SRusty Russell } 115548925e37SRusty Russell } 115648925e37SRusty Russell } 115748925e37SRusty Russell 11584b7fd2e6SMichael S. Tsirkin if (kick || netif_xmit_stopped(txq)) 1159c223a078SDavid S. Miller virtqueue_kick(sq->vq); 11600b725a2cSDavid S. Miller 11610b725a2cSDavid S. Miller return NETDEV_TX_OK; 1162c223a078SDavid S. Miller } 1163c223a078SDavid S. Miller 116440cbfc37SAmos Kong /* 116540cbfc37SAmos Kong * Send command via the control virtqueue and check status. Commands 116640cbfc37SAmos Kong * supported by the hypervisor, as indicated by feature bits, should 1167788a8b6dSstephen hemminger * never fail unless improperly formatted. 116840cbfc37SAmos Kong */ 116940cbfc37SAmos Kong static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 1170d24bae32Sstephen hemminger struct scatterlist *out) 117140cbfc37SAmos Kong { 1172f7bc9594SRusty Russell struct scatterlist *sgs[4], hdr, stat; 1173d24bae32Sstephen hemminger unsigned out_num = 0, tmp; 117440cbfc37SAmos Kong 117540cbfc37SAmos Kong /* Caller should know better */ 1176f7bc9594SRusty Russell BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 117740cbfc37SAmos Kong 11782ac46030SMichael S. Tsirkin vi->ctrl_status = ~0; 11792ac46030SMichael S. Tsirkin vi->ctrl_hdr.class = class; 11802ac46030SMichael S. Tsirkin vi->ctrl_hdr.cmd = cmd; 1181f7bc9594SRusty Russell /* Add header */ 11822ac46030SMichael S. Tsirkin sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); 1183f7bc9594SRusty Russell sgs[out_num++] = &hdr; 118440cbfc37SAmos Kong 1185f7bc9594SRusty Russell if (out) 1186f7bc9594SRusty Russell sgs[out_num++] = out; 118740cbfc37SAmos Kong 1188f7bc9594SRusty Russell /* Add return status. */ 11892ac46030SMichael S. Tsirkin sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); 1190d24bae32Sstephen hemminger sgs[out_num] = &stat; 119140cbfc37SAmos Kong 1192d24bae32Sstephen hemminger BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1193a7c58146SRusty Russell virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 119440cbfc37SAmos Kong 119567975901SHeinz Graalfs if (unlikely(!virtqueue_kick(vi->cvq))) 11962ac46030SMichael S. Tsirkin return vi->ctrl_status == VIRTIO_NET_OK; 119740cbfc37SAmos Kong 119840cbfc37SAmos Kong /* Spin for a response, the kick causes an ioport write, trapping 119940cbfc37SAmos Kong * into the hypervisor, so the request should be handled immediately. 120040cbfc37SAmos Kong */ 1201047b9b94SHeinz Graalfs while (!virtqueue_get_buf(vi->cvq, &tmp) && 1202047b9b94SHeinz Graalfs !virtqueue_is_broken(vi->cvq)) 120340cbfc37SAmos Kong cpu_relax(); 120440cbfc37SAmos Kong 12052ac46030SMichael S. Tsirkin return vi->ctrl_status == VIRTIO_NET_OK; 120640cbfc37SAmos Kong } 120740cbfc37SAmos Kong 12089c46f6d4SAlex Williamson static int virtnet_set_mac_address(struct net_device *dev, void *p) 12099c46f6d4SAlex Williamson { 12109c46f6d4SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 12119c46f6d4SAlex Williamson struct virtio_device *vdev = vi->vdev; 1212f2f2c8b4SJiri Pirko int ret; 1213e37e2ff3SAndy Lutomirski struct sockaddr *addr; 12147e58d5aeSAmos Kong struct scatterlist sg; 12159c46f6d4SAlex Williamson 1216801822d1SShyam Saini addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 1217e37e2ff3SAndy Lutomirski if (!addr) 1218e37e2ff3SAndy Lutomirski return -ENOMEM; 1219e37e2ff3SAndy Lutomirski 1220e37e2ff3SAndy Lutomirski ret = eth_prepare_mac_addr_change(dev, addr); 1221f2f2c8b4SJiri Pirko if (ret) 1222e37e2ff3SAndy Lutomirski goto out; 12239c46f6d4SAlex Williamson 12247e58d5aeSAmos Kong if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 12257e58d5aeSAmos Kong sg_init_one(&sg, addr->sa_data, dev->addr_len); 12267e58d5aeSAmos Kong if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1227d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 12287e58d5aeSAmos Kong dev_warn(&vdev->dev, 12297e58d5aeSAmos Kong "Failed to set mac address by vq command.\n"); 1230e37e2ff3SAndy Lutomirski ret = -EINVAL; 1231e37e2ff3SAndy Lutomirski goto out; 12327e58d5aeSAmos Kong } 12337e93a02fSMichael S. Tsirkin } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 12347e93a02fSMichael S. Tsirkin !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1235855e0c52SRusty Russell unsigned int i; 1236855e0c52SRusty Russell 1237855e0c52SRusty Russell /* Naturally, this has an atomicity problem. */ 1238855e0c52SRusty Russell for (i = 0; i < dev->addr_len; i++) 1239855e0c52SRusty Russell virtio_cwrite8(vdev, 1240855e0c52SRusty Russell offsetof(struct virtio_net_config, mac) + 1241855e0c52SRusty Russell i, addr->sa_data[i]); 12427e58d5aeSAmos Kong } 12437e58d5aeSAmos Kong 12447e58d5aeSAmos Kong eth_commit_mac_addr_change(dev, p); 1245e37e2ff3SAndy Lutomirski ret = 0; 12469c46f6d4SAlex Williamson 1247e37e2ff3SAndy Lutomirski out: 1248e37e2ff3SAndy Lutomirski kfree(addr); 1249e37e2ff3SAndy Lutomirski return ret; 12509c46f6d4SAlex Williamson } 12519c46f6d4SAlex Williamson 1252bc1f4470Sstephen hemminger static void virtnet_stats(struct net_device *dev, 12533fa2a1dfSstephen hemminger struct rtnl_link_stats64 *tot) 12543fa2a1dfSstephen hemminger { 12553fa2a1dfSstephen hemminger struct virtnet_info *vi = netdev_priv(dev); 12563fa2a1dfSstephen hemminger int cpu; 12573fa2a1dfSstephen hemminger unsigned int start; 12583fa2a1dfSstephen hemminger 12593fa2a1dfSstephen hemminger for_each_possible_cpu(cpu) { 126058472a76SEric Dumazet struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); 12613fa2a1dfSstephen hemminger u64 tpackets, tbytes, rpackets, rbytes; 12623fa2a1dfSstephen hemminger 12633fa2a1dfSstephen hemminger do { 126457a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&stats->tx_syncp); 12653fa2a1dfSstephen hemminger tpackets = stats->tx_packets; 12663fa2a1dfSstephen hemminger tbytes = stats->tx_bytes; 126757a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); 126883a27052SEric Dumazet 126983a27052SEric Dumazet do { 127057a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&stats->rx_syncp); 12713fa2a1dfSstephen hemminger rpackets = stats->rx_packets; 12723fa2a1dfSstephen hemminger rbytes = stats->rx_bytes; 127357a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); 12743fa2a1dfSstephen hemminger 12753fa2a1dfSstephen hemminger tot->rx_packets += rpackets; 12763fa2a1dfSstephen hemminger tot->tx_packets += tpackets; 12773fa2a1dfSstephen hemminger tot->rx_bytes += rbytes; 12783fa2a1dfSstephen hemminger tot->tx_bytes += tbytes; 12793fa2a1dfSstephen hemminger } 12803fa2a1dfSstephen hemminger 12813fa2a1dfSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 1282021ac8d3SRick Jones tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 12833fa2a1dfSstephen hemminger tot->rx_dropped = dev->stats.rx_dropped; 12843fa2a1dfSstephen hemminger tot->rx_length_errors = dev->stats.rx_length_errors; 12853fa2a1dfSstephen hemminger tot->rx_frame_errors = dev->stats.rx_frame_errors; 12863fa2a1dfSstephen hemminger } 12873fa2a1dfSstephen hemminger 1288da74e89dSAmit Shah #ifdef CONFIG_NET_POLL_CONTROLLER 1289da74e89dSAmit Shah static void virtnet_netpoll(struct net_device *dev) 1290da74e89dSAmit Shah { 1291da74e89dSAmit Shah struct virtnet_info *vi = netdev_priv(dev); 1292986a4f4dSJason Wang int i; 1293da74e89dSAmit Shah 1294986a4f4dSJason Wang for (i = 0; i < vi->curr_queue_pairs; i++) 1295986a4f4dSJason Wang napi_schedule(&vi->rq[i].napi); 1296da74e89dSAmit Shah } 1297da74e89dSAmit Shah #endif 1298da74e89dSAmit Shah 1299586d17c5SJason Wang static void virtnet_ack_link_announce(struct virtnet_info *vi) 1300586d17c5SJason Wang { 1301586d17c5SJason Wang rtnl_lock(); 1302586d17c5SJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 1303d24bae32Sstephen hemminger VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 1304586d17c5SJason Wang dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 1305586d17c5SJason Wang rtnl_unlock(); 1306586d17c5SJason Wang } 1307586d17c5SJason Wang 1308*47315329SJohn Fastabend static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 1309986a4f4dSJason Wang { 1310986a4f4dSJason Wang struct scatterlist sg; 1311986a4f4dSJason Wang struct net_device *dev = vi->dev; 1312986a4f4dSJason Wang 1313986a4f4dSJason Wang if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 1314986a4f4dSJason Wang return 0; 1315986a4f4dSJason Wang 1316a725ee3eSAndy Lutomirski vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 1317a725ee3eSAndy Lutomirski sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); 1318986a4f4dSJason Wang 1319986a4f4dSJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 1320d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 1321986a4f4dSJason Wang dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 1322986a4f4dSJason Wang queue_pairs); 1323986a4f4dSJason Wang return -EINVAL; 132455257d72SSasha Levin } else { 1325986a4f4dSJason Wang vi->curr_queue_pairs = queue_pairs; 132635ed159bSJason Wang /* virtnet_open() will refill when device is going to up. */ 132735ed159bSJason Wang if (dev->flags & IFF_UP) 13289b9cd802SJason Wang schedule_delayed_work(&vi->refill, 0); 132955257d72SSasha Levin } 1330986a4f4dSJason Wang 1331986a4f4dSJason Wang return 0; 1332986a4f4dSJason Wang } 1333986a4f4dSJason Wang 1334*47315329SJohn Fastabend static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 1335*47315329SJohn Fastabend { 1336*47315329SJohn Fastabend int err; 1337*47315329SJohn Fastabend 1338*47315329SJohn Fastabend rtnl_lock(); 1339*47315329SJohn Fastabend err = _virtnet_set_queues(vi, queue_pairs); 1340*47315329SJohn Fastabend rtnl_unlock(); 1341*47315329SJohn Fastabend return err; 1342*47315329SJohn Fastabend } 1343*47315329SJohn Fastabend 1344296f96fcSRusty Russell static int virtnet_close(struct net_device *dev) 1345296f96fcSRusty Russell { 1346296f96fcSRusty Russell struct virtnet_info *vi = netdev_priv(dev); 1347986a4f4dSJason Wang int i; 1348296f96fcSRusty Russell 1349b2baed69SRusty Russell /* Make sure refill_work doesn't re-enable napi! */ 1350b2baed69SRusty Russell cancel_delayed_work_sync(&vi->refill); 1351986a4f4dSJason Wang 1352986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 1353986a4f4dSJason Wang napi_disable(&vi->rq[i].napi); 1354296f96fcSRusty Russell 1355296f96fcSRusty Russell return 0; 1356296f96fcSRusty Russell } 1357296f96fcSRusty Russell 13582af7698eSAlex Williamson static void virtnet_set_rx_mode(struct net_device *dev) 13592af7698eSAlex Williamson { 13602af7698eSAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 1361f565a7c2SAlex Williamson struct scatterlist sg[2]; 1362f565a7c2SAlex Williamson struct virtio_net_ctrl_mac *mac_data; 1363ccffad25SJiri Pirko struct netdev_hw_addr *ha; 136432e7bfc4SJiri Pirko int uc_count; 13654cd24eafSJiri Pirko int mc_count; 1366f565a7c2SAlex Williamson void *buf; 1367f565a7c2SAlex Williamson int i; 13682af7698eSAlex Williamson 1369788a8b6dSstephen hemminger /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 13702af7698eSAlex Williamson if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 13712af7698eSAlex Williamson return; 13722af7698eSAlex Williamson 13732ac46030SMichael S. Tsirkin vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); 13742ac46030SMichael S. Tsirkin vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 13752af7698eSAlex Williamson 13762ac46030SMichael S. Tsirkin sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); 13772af7698eSAlex Williamson 13782af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1379d24bae32Sstephen hemminger VIRTIO_NET_CTRL_RX_PROMISC, sg)) 13802af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 13812ac46030SMichael S. Tsirkin vi->ctrl_promisc ? "en" : "dis"); 13822af7698eSAlex Williamson 13832ac46030SMichael S. Tsirkin sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); 13842af7698eSAlex Williamson 13852af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1386d24bae32Sstephen hemminger VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 13872af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 13882ac46030SMichael S. Tsirkin vi->ctrl_allmulti ? "en" : "dis"); 1389f565a7c2SAlex Williamson 139032e7bfc4SJiri Pirko uc_count = netdev_uc_count(dev); 13914cd24eafSJiri Pirko mc_count = netdev_mc_count(dev); 1392f565a7c2SAlex Williamson /* MAC filter - use one buffer for both lists */ 13934cd24eafSJiri Pirko buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 1394f565a7c2SAlex Williamson (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 13954cd24eafSJiri Pirko mac_data = buf; 1396e68ed8f0SJoe Perches if (!buf) 1397f565a7c2SAlex Williamson return; 1398f565a7c2SAlex Williamson 139923e258e1SAlex Williamson sg_init_table(sg, 2); 140023e258e1SAlex Williamson 1401f565a7c2SAlex Williamson /* Store the unicast list and count in the front of the buffer */ 1402fdd819b2SMichael S. Tsirkin mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 1403ccffad25SJiri Pirko i = 0; 140432e7bfc4SJiri Pirko netdev_for_each_uc_addr(ha, dev) 1405ccffad25SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1406f565a7c2SAlex Williamson 1407f565a7c2SAlex Williamson sg_set_buf(&sg[0], mac_data, 140832e7bfc4SJiri Pirko sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 1409f565a7c2SAlex Williamson 1410f565a7c2SAlex Williamson /* multicast list and count fill the end */ 141132e7bfc4SJiri Pirko mac_data = (void *)&mac_data->macs[uc_count][0]; 1412f565a7c2SAlex Williamson 1413fdd819b2SMichael S. Tsirkin mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 1414567ec874SJiri Pirko i = 0; 141522bedad3SJiri Pirko netdev_for_each_mc_addr(ha, dev) 141622bedad3SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1417f565a7c2SAlex Williamson 1418f565a7c2SAlex Williamson sg_set_buf(&sg[1], mac_data, 14194cd24eafSJiri Pirko sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 1420f565a7c2SAlex Williamson 1421f565a7c2SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1422d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 142399e872aeSThomas Huth dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 1424f565a7c2SAlex Williamson 1425f565a7c2SAlex Williamson kfree(buf); 14262af7698eSAlex Williamson } 14272af7698eSAlex Williamson 142880d5c368SPatrick McHardy static int virtnet_vlan_rx_add_vid(struct net_device *dev, 142980d5c368SPatrick McHardy __be16 proto, u16 vid) 14300bde9569SAlex Williamson { 14310bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 14320bde9569SAlex Williamson struct scatterlist sg; 14330bde9569SAlex Williamson 1434a725ee3eSAndy Lutomirski vi->ctrl_vid = vid; 1435a725ee3eSAndy Lutomirski sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 14360bde9569SAlex Williamson 14370bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1438d24bae32Sstephen hemminger VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 14390bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 14408e586137SJiri Pirko return 0; 14410bde9569SAlex Williamson } 14420bde9569SAlex Williamson 144380d5c368SPatrick McHardy static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 144480d5c368SPatrick McHardy __be16 proto, u16 vid) 14450bde9569SAlex Williamson { 14460bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 14470bde9569SAlex Williamson struct scatterlist sg; 14480bde9569SAlex Williamson 1449a725ee3eSAndy Lutomirski vi->ctrl_vid = vid; 1450a725ee3eSAndy Lutomirski sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 14510bde9569SAlex Williamson 14520bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1453d24bae32Sstephen hemminger VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 14540bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 14558e586137SJiri Pirko return 0; 14560bde9569SAlex Williamson } 14570bde9569SAlex Williamson 14588898c21cSWanlong Gao static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) 1459986a4f4dSJason Wang { 1460986a4f4dSJason Wang int i; 14618898c21cSWanlong Gao 14628898c21cSWanlong Gao if (vi->affinity_hint_set) { 14638898c21cSWanlong Gao for (i = 0; i < vi->max_queue_pairs; i++) { 14648898c21cSWanlong Gao virtqueue_set_affinity(vi->rq[i].vq, -1); 14658898c21cSWanlong Gao virtqueue_set_affinity(vi->sq[i].vq, -1); 14668898c21cSWanlong Gao } 14678898c21cSWanlong Gao 14688898c21cSWanlong Gao vi->affinity_hint_set = false; 14698898c21cSWanlong Gao } 14708898c21cSWanlong Gao } 14718898c21cSWanlong Gao 14728898c21cSWanlong Gao static void virtnet_set_affinity(struct virtnet_info *vi) 1473986a4f4dSJason Wang { 1474986a4f4dSJason Wang int i; 147547be2479SWanlong Gao int cpu; 1476986a4f4dSJason Wang 1477986a4f4dSJason Wang /* In multiqueue mode, when the number of cpu is equal to the number of 1478986a4f4dSJason Wang * queue pairs, we let the queue pairs to be private to one cpu by 1479986a4f4dSJason Wang * setting the affinity hint to eliminate the contention. 1480986a4f4dSJason Wang */ 14818898c21cSWanlong Gao if (vi->curr_queue_pairs == 1 || 14828898c21cSWanlong Gao vi->max_queue_pairs != num_online_cpus()) { 14838898c21cSWanlong Gao virtnet_clean_affinity(vi, -1); 1484986a4f4dSJason Wang return; 1485986a4f4dSJason Wang } 1486986a4f4dSJason Wang 148747be2479SWanlong Gao i = 0; 148847be2479SWanlong Gao for_each_online_cpu(cpu) { 1489986a4f4dSJason Wang virtqueue_set_affinity(vi->rq[i].vq, cpu); 1490986a4f4dSJason Wang virtqueue_set_affinity(vi->sq[i].vq, cpu); 14919bb8ca86SJason Wang netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); 149247be2479SWanlong Gao i++; 1493986a4f4dSJason Wang } 1494986a4f4dSJason Wang 1495986a4f4dSJason Wang vi->affinity_hint_set = true; 149647be2479SWanlong Gao } 1497986a4f4dSJason Wang 14988017c279SSebastian Andrzej Siewior static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 14998de4b2f3SWanlong Gao { 15008017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 15018017c279SSebastian Andrzej Siewior node); 15028de4b2f3SWanlong Gao virtnet_set_affinity(vi); 15038017c279SSebastian Andrzej Siewior return 0; 15048de4b2f3SWanlong Gao } 15053ab098dfSJason Wang 15068017c279SSebastian Andrzej Siewior static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 15078017c279SSebastian Andrzej Siewior { 15088017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 15098017c279SSebastian Andrzej Siewior node_dead); 15108017c279SSebastian Andrzej Siewior virtnet_set_affinity(vi); 15118017c279SSebastian Andrzej Siewior return 0; 15128017c279SSebastian Andrzej Siewior } 15138017c279SSebastian Andrzej Siewior 15148017c279SSebastian Andrzej Siewior static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 15158017c279SSebastian Andrzej Siewior { 15168017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 15178017c279SSebastian Andrzej Siewior node); 15188017c279SSebastian Andrzej Siewior 15198017c279SSebastian Andrzej Siewior virtnet_clean_affinity(vi, cpu); 15208017c279SSebastian Andrzej Siewior return 0; 15218017c279SSebastian Andrzej Siewior } 15228017c279SSebastian Andrzej Siewior 15238017c279SSebastian Andrzej Siewior static enum cpuhp_state virtionet_online; 15248017c279SSebastian Andrzej Siewior 15258017c279SSebastian Andrzej Siewior static int virtnet_cpu_notif_add(struct virtnet_info *vi) 15268017c279SSebastian Andrzej Siewior { 15278017c279SSebastian Andrzej Siewior int ret; 15288017c279SSebastian Andrzej Siewior 15298017c279SSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 15308017c279SSebastian Andrzej Siewior if (ret) 15318017c279SSebastian Andrzej Siewior return ret; 15328017c279SSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 15338017c279SSebastian Andrzej Siewior &vi->node_dead); 15348017c279SSebastian Andrzej Siewior if (!ret) 15358017c279SSebastian Andrzej Siewior return ret; 15368017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 15378017c279SSebastian Andrzej Siewior return ret; 15388017c279SSebastian Andrzej Siewior } 15398017c279SSebastian Andrzej Siewior 15408017c279SSebastian Andrzej Siewior static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 15418017c279SSebastian Andrzej Siewior { 15428017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 15438017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 15448017c279SSebastian Andrzej Siewior &vi->node_dead); 1545a9ea3fc6SHerbert Xu } 1546a9ea3fc6SHerbert Xu 15478f9f4668SRick Jones static void virtnet_get_ringparam(struct net_device *dev, 15488f9f4668SRick Jones struct ethtool_ringparam *ring) 15498f9f4668SRick Jones { 15508f9f4668SRick Jones struct virtnet_info *vi = netdev_priv(dev); 15518f9f4668SRick Jones 1552986a4f4dSJason Wang ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); 1553986a4f4dSJason Wang ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); 15548f9f4668SRick Jones ring->rx_pending = ring->rx_max_pending; 15558f9f4668SRick Jones ring->tx_pending = ring->tx_max_pending; 15568f9f4668SRick Jones } 15578f9f4668SRick Jones 155866846048SRick Jones 155966846048SRick Jones static void virtnet_get_drvinfo(struct net_device *dev, 156066846048SRick Jones struct ethtool_drvinfo *info) 156166846048SRick Jones { 156266846048SRick Jones struct virtnet_info *vi = netdev_priv(dev); 156366846048SRick Jones struct virtio_device *vdev = vi->vdev; 156466846048SRick Jones 156566846048SRick Jones strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 156666846048SRick Jones strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 156766846048SRick Jones strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 156866846048SRick Jones 156966846048SRick Jones } 157066846048SRick Jones 1571d73bcd2cSJason Wang /* TODO: Eliminate OOO packets during switching */ 1572d73bcd2cSJason Wang static int virtnet_set_channels(struct net_device *dev, 1573d73bcd2cSJason Wang struct ethtool_channels *channels) 1574d73bcd2cSJason Wang { 1575d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1576d73bcd2cSJason Wang u16 queue_pairs = channels->combined_count; 1577d73bcd2cSJason Wang int err; 1578d73bcd2cSJason Wang 1579d73bcd2cSJason Wang /* We don't support separate rx/tx channels. 1580d73bcd2cSJason Wang * We don't allow setting 'other' channels. 1581d73bcd2cSJason Wang */ 1582d73bcd2cSJason Wang if (channels->rx_count || channels->tx_count || channels->other_count) 1583d73bcd2cSJason Wang return -EINVAL; 1584d73bcd2cSJason Wang 1585c18e9cd6SAmos Kong if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 1586d73bcd2cSJason Wang return -EINVAL; 1587d73bcd2cSJason Wang 1588f600b690SJohn Fastabend /* For now we don't support modifying channels while XDP is loaded 1589f600b690SJohn Fastabend * also when XDP is loaded all RX queues have XDP programs so we only 1590f600b690SJohn Fastabend * need to check a single RX queue. 1591f600b690SJohn Fastabend */ 1592f600b690SJohn Fastabend if (vi->rq[0].xdp_prog) 1593f600b690SJohn Fastabend return -EINVAL; 1594f600b690SJohn Fastabend 159547be2479SWanlong Gao get_online_cpus(); 1596*47315329SJohn Fastabend err = _virtnet_set_queues(vi, queue_pairs); 1597d73bcd2cSJason Wang if (!err) { 1598d73bcd2cSJason Wang netif_set_real_num_tx_queues(dev, queue_pairs); 1599d73bcd2cSJason Wang netif_set_real_num_rx_queues(dev, queue_pairs); 1600d73bcd2cSJason Wang 16018898c21cSWanlong Gao virtnet_set_affinity(vi); 1602d73bcd2cSJason Wang } 160347be2479SWanlong Gao put_online_cpus(); 1604d73bcd2cSJason Wang 1605d73bcd2cSJason Wang return err; 1606d73bcd2cSJason Wang } 1607d73bcd2cSJason Wang 1608d73bcd2cSJason Wang static void virtnet_get_channels(struct net_device *dev, 1609d73bcd2cSJason Wang struct ethtool_channels *channels) 1610d73bcd2cSJason Wang { 1611d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1612d73bcd2cSJason Wang 1613d73bcd2cSJason Wang channels->combined_count = vi->curr_queue_pairs; 1614d73bcd2cSJason Wang channels->max_combined = vi->max_queue_pairs; 1615d73bcd2cSJason Wang channels->max_other = 0; 1616d73bcd2cSJason Wang channels->rx_count = 0; 1617d73bcd2cSJason Wang channels->tx_count = 0; 1618d73bcd2cSJason Wang channels->other_count = 0; 1619d73bcd2cSJason Wang } 1620d73bcd2cSJason Wang 162116032be5SNikolay Aleksandrov /* Check if the user is trying to change anything besides speed/duplex */ 162216032be5SNikolay Aleksandrov static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd) 162316032be5SNikolay Aleksandrov { 162416032be5SNikolay Aleksandrov struct ethtool_cmd diff1 = *cmd; 162516032be5SNikolay Aleksandrov struct ethtool_cmd diff2 = {}; 162616032be5SNikolay Aleksandrov 16270cf3ace9SNikolay Aleksandrov /* cmd is always set so we need to clear it, validate the port type 16280cf3ace9SNikolay Aleksandrov * and also without autonegotiation we can ignore advertising 16290cf3ace9SNikolay Aleksandrov */ 163016032be5SNikolay Aleksandrov ethtool_cmd_speed_set(&diff1, 0); 16310cf3ace9SNikolay Aleksandrov diff2.port = PORT_OTHER; 163216032be5SNikolay Aleksandrov diff1.advertising = 0; 163316032be5SNikolay Aleksandrov diff1.duplex = 0; 163416032be5SNikolay Aleksandrov diff1.cmd = 0; 163516032be5SNikolay Aleksandrov 163616032be5SNikolay Aleksandrov return !memcmp(&diff1, &diff2, sizeof(diff1)); 163716032be5SNikolay Aleksandrov } 163816032be5SNikolay Aleksandrov 163916032be5SNikolay Aleksandrov static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 164016032be5SNikolay Aleksandrov { 164116032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 164216032be5SNikolay Aleksandrov u32 speed; 164316032be5SNikolay Aleksandrov 164416032be5SNikolay Aleksandrov speed = ethtool_cmd_speed(cmd); 164516032be5SNikolay Aleksandrov /* don't allow custom speed and duplex */ 164616032be5SNikolay Aleksandrov if (!ethtool_validate_speed(speed) || 164716032be5SNikolay Aleksandrov !ethtool_validate_duplex(cmd->duplex) || 164816032be5SNikolay Aleksandrov !virtnet_validate_ethtool_cmd(cmd)) 164916032be5SNikolay Aleksandrov return -EINVAL; 165016032be5SNikolay Aleksandrov vi->speed = speed; 165116032be5SNikolay Aleksandrov vi->duplex = cmd->duplex; 165216032be5SNikolay Aleksandrov 165316032be5SNikolay Aleksandrov return 0; 165416032be5SNikolay Aleksandrov } 165516032be5SNikolay Aleksandrov 165616032be5SNikolay Aleksandrov static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 165716032be5SNikolay Aleksandrov { 165816032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 165916032be5SNikolay Aleksandrov 166016032be5SNikolay Aleksandrov ethtool_cmd_speed_set(cmd, vi->speed); 166116032be5SNikolay Aleksandrov cmd->duplex = vi->duplex; 166216032be5SNikolay Aleksandrov cmd->port = PORT_OTHER; 166316032be5SNikolay Aleksandrov 166416032be5SNikolay Aleksandrov return 0; 166516032be5SNikolay Aleksandrov } 166616032be5SNikolay Aleksandrov 166716032be5SNikolay Aleksandrov static void virtnet_init_settings(struct net_device *dev) 166816032be5SNikolay Aleksandrov { 166916032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 167016032be5SNikolay Aleksandrov 167116032be5SNikolay Aleksandrov vi->speed = SPEED_UNKNOWN; 167216032be5SNikolay Aleksandrov vi->duplex = DUPLEX_UNKNOWN; 167316032be5SNikolay Aleksandrov } 167416032be5SNikolay Aleksandrov 16750fc0b732SStephen Hemminger static const struct ethtool_ops virtnet_ethtool_ops = { 167666846048SRick Jones .get_drvinfo = virtnet_get_drvinfo, 16779f4d26d0SMark McLoughlin .get_link = ethtool_op_get_link, 16788f9f4668SRick Jones .get_ringparam = virtnet_get_ringparam, 1679d73bcd2cSJason Wang .set_channels = virtnet_set_channels, 1680d73bcd2cSJason Wang .get_channels = virtnet_get_channels, 1681074c3582SJacob Keller .get_ts_info = ethtool_op_get_ts_info, 168216032be5SNikolay Aleksandrov .get_settings = virtnet_get_settings, 168316032be5SNikolay Aleksandrov .set_settings = virtnet_set_settings, 1684a9ea3fc6SHerbert Xu }; 1685a9ea3fc6SHerbert Xu 1686f600b690SJohn Fastabend static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) 1687f600b690SJohn Fastabend { 1688f600b690SJohn Fastabend unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); 1689f600b690SJohn Fastabend struct virtnet_info *vi = netdev_priv(dev); 1690f600b690SJohn Fastabend struct bpf_prog *old_prog; 1691672aafd5SJohn Fastabend u16 xdp_qp = 0, curr_qp; 1692672aafd5SJohn Fastabend int i, err; 1693f600b690SJohn Fastabend 1694529ec6acSJakub Kicinski if (prog && prog->xdp_adjust_head) { 1695529ec6acSJakub Kicinski netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n"); 1696529ec6acSJakub Kicinski return -EOPNOTSUPP; 1697529ec6acSJakub Kicinski } 1698529ec6acSJakub Kicinski 1699f600b690SJohn Fastabend if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 170092502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 170192502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 170292502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) { 1703f600b690SJohn Fastabend netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n"); 1704f600b690SJohn Fastabend return -EOPNOTSUPP; 1705f600b690SJohn Fastabend } 1706f600b690SJohn Fastabend 1707f600b690SJohn Fastabend if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 1708f600b690SJohn Fastabend netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n"); 1709f600b690SJohn Fastabend return -EINVAL; 1710f600b690SJohn Fastabend } 1711f600b690SJohn Fastabend 1712f600b690SJohn Fastabend if (dev->mtu > max_sz) { 1713f600b690SJohn Fastabend netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); 1714f600b690SJohn Fastabend return -EINVAL; 1715f600b690SJohn Fastabend } 1716f600b690SJohn Fastabend 1717672aafd5SJohn Fastabend curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 1718672aafd5SJohn Fastabend if (prog) 1719672aafd5SJohn Fastabend xdp_qp = nr_cpu_ids; 1720672aafd5SJohn Fastabend 1721672aafd5SJohn Fastabend /* XDP requires extra queues for XDP_TX */ 1722672aafd5SJohn Fastabend if (curr_qp + xdp_qp > vi->max_queue_pairs) { 1723672aafd5SJohn Fastabend netdev_warn(dev, "request %i queues but max is %i\n", 1724672aafd5SJohn Fastabend curr_qp + xdp_qp, vi->max_queue_pairs); 1725672aafd5SJohn Fastabend return -ENOMEM; 1726672aafd5SJohn Fastabend } 1727672aafd5SJohn Fastabend 1728*47315329SJohn Fastabend err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 1729672aafd5SJohn Fastabend if (err) { 1730672aafd5SJohn Fastabend dev_warn(&dev->dev, "XDP Device queue allocation failure.\n"); 1731672aafd5SJohn Fastabend return err; 1732672aafd5SJohn Fastabend } 1733672aafd5SJohn Fastabend 1734f600b690SJohn Fastabend if (prog) { 1735f600b690SJohn Fastabend prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); 1736672aafd5SJohn Fastabend if (IS_ERR(prog)) { 1737*47315329SJohn Fastabend _virtnet_set_queues(vi, curr_qp); 1738f600b690SJohn Fastabend return PTR_ERR(prog); 1739f600b690SJohn Fastabend } 1740672aafd5SJohn Fastabend } 1741672aafd5SJohn Fastabend 1742672aafd5SJohn Fastabend vi->xdp_queue_pairs = xdp_qp; 1743672aafd5SJohn Fastabend netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 1744f600b690SJohn Fastabend 1745f600b690SJohn Fastabend for (i = 0; i < vi->max_queue_pairs; i++) { 1746f600b690SJohn Fastabend old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 1747f600b690SJohn Fastabend rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 1748f600b690SJohn Fastabend if (old_prog) 1749f600b690SJohn Fastabend bpf_prog_put(old_prog); 1750f600b690SJohn Fastabend } 1751f600b690SJohn Fastabend 1752f600b690SJohn Fastabend return 0; 1753f600b690SJohn Fastabend } 1754f600b690SJohn Fastabend 1755f600b690SJohn Fastabend static bool virtnet_xdp_query(struct net_device *dev) 1756f600b690SJohn Fastabend { 1757f600b690SJohn Fastabend struct virtnet_info *vi = netdev_priv(dev); 1758f600b690SJohn Fastabend int i; 1759f600b690SJohn Fastabend 1760f600b690SJohn Fastabend for (i = 0; i < vi->max_queue_pairs; i++) { 1761f600b690SJohn Fastabend if (vi->rq[i].xdp_prog) 1762f600b690SJohn Fastabend return true; 1763f600b690SJohn Fastabend } 1764f600b690SJohn Fastabend return false; 1765f600b690SJohn Fastabend } 1766f600b690SJohn Fastabend 1767f600b690SJohn Fastabend static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp) 1768f600b690SJohn Fastabend { 1769f600b690SJohn Fastabend switch (xdp->command) { 1770f600b690SJohn Fastabend case XDP_SETUP_PROG: 1771f600b690SJohn Fastabend return virtnet_xdp_set(dev, xdp->prog); 1772f600b690SJohn Fastabend case XDP_QUERY_PROG: 1773f600b690SJohn Fastabend xdp->prog_attached = virtnet_xdp_query(dev); 1774f600b690SJohn Fastabend return 0; 1775f600b690SJohn Fastabend default: 1776f600b690SJohn Fastabend return -EINVAL; 1777f600b690SJohn Fastabend } 1778f600b690SJohn Fastabend } 1779f600b690SJohn Fastabend 178076288b4eSStephen Hemminger static const struct net_device_ops virtnet_netdev = { 178176288b4eSStephen Hemminger .ndo_open = virtnet_open, 178276288b4eSStephen Hemminger .ndo_stop = virtnet_close, 178376288b4eSStephen Hemminger .ndo_start_xmit = start_xmit, 178476288b4eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 17859c46f6d4SAlex Williamson .ndo_set_mac_address = virtnet_set_mac_address, 17862af7698eSAlex Williamson .ndo_set_rx_mode = virtnet_set_rx_mode, 17873fa2a1dfSstephen hemminger .ndo_get_stats64 = virtnet_stats, 17881824a989SAlex Williamson .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 17891824a989SAlex Williamson .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 179076288b4eSStephen Hemminger #ifdef CONFIG_NET_POLL_CONTROLLER 179176288b4eSStephen Hemminger .ndo_poll_controller = virtnet_netpoll, 179276288b4eSStephen Hemminger #endif 1793f600b690SJohn Fastabend .ndo_xdp = virtnet_xdp, 179476288b4eSStephen Hemminger }; 179576288b4eSStephen Hemminger 1796586d17c5SJason Wang static void virtnet_config_changed_work(struct work_struct *work) 17979f4d26d0SMark McLoughlin { 1798586d17c5SJason Wang struct virtnet_info *vi = 1799586d17c5SJason Wang container_of(work, struct virtnet_info, config_work); 18009f4d26d0SMark McLoughlin u16 v; 18019f4d26d0SMark McLoughlin 1802855e0c52SRusty Russell if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 1803855e0c52SRusty Russell struct virtio_net_config, status, &v) < 0) 1804507613bfSMichael S. Tsirkin return; 1805586d17c5SJason Wang 1806586d17c5SJason Wang if (v & VIRTIO_NET_S_ANNOUNCE) { 1807ee89bab1SAmerigo Wang netdev_notify_peers(vi->dev); 1808586d17c5SJason Wang virtnet_ack_link_announce(vi); 1809586d17c5SJason Wang } 18109f4d26d0SMark McLoughlin 18119f4d26d0SMark McLoughlin /* Ignore unknown (future) status bits */ 18129f4d26d0SMark McLoughlin v &= VIRTIO_NET_S_LINK_UP; 18139f4d26d0SMark McLoughlin 18149f4d26d0SMark McLoughlin if (vi->status == v) 1815507613bfSMichael S. Tsirkin return; 18169f4d26d0SMark McLoughlin 18179f4d26d0SMark McLoughlin vi->status = v; 18189f4d26d0SMark McLoughlin 18199f4d26d0SMark McLoughlin if (vi->status & VIRTIO_NET_S_LINK_UP) { 18209f4d26d0SMark McLoughlin netif_carrier_on(vi->dev); 1821986a4f4dSJason Wang netif_tx_wake_all_queues(vi->dev); 18229f4d26d0SMark McLoughlin } else { 18239f4d26d0SMark McLoughlin netif_carrier_off(vi->dev); 1824986a4f4dSJason Wang netif_tx_stop_all_queues(vi->dev); 18259f4d26d0SMark McLoughlin } 18269f4d26d0SMark McLoughlin } 18279f4d26d0SMark McLoughlin 18289f4d26d0SMark McLoughlin static void virtnet_config_changed(struct virtio_device *vdev) 18299f4d26d0SMark McLoughlin { 18309f4d26d0SMark McLoughlin struct virtnet_info *vi = vdev->priv; 18319f4d26d0SMark McLoughlin 18323b07e9caSTejun Heo schedule_work(&vi->config_work); 18339f4d26d0SMark McLoughlin } 18349f4d26d0SMark McLoughlin 1835986a4f4dSJason Wang static void virtnet_free_queues(struct virtnet_info *vi) 1836986a4f4dSJason Wang { 1837d4fb84eeSAndrey Vagin int i; 1838d4fb84eeSAndrey Vagin 1839ab3971b1SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1840ab3971b1SJason Wang napi_hash_del(&vi->rq[i].napi); 1841d4fb84eeSAndrey Vagin netif_napi_del(&vi->rq[i].napi); 1842ab3971b1SJason Wang } 1843d4fb84eeSAndrey Vagin 1844963abe5cSEric Dumazet /* We called napi_hash_del() before netif_napi_del(), 1845963abe5cSEric Dumazet * we need to respect an RCU grace period before freeing vi->rq 1846963abe5cSEric Dumazet */ 1847963abe5cSEric Dumazet synchronize_net(); 1848963abe5cSEric Dumazet 1849986a4f4dSJason Wang kfree(vi->rq); 1850986a4f4dSJason Wang kfree(vi->sq); 1851986a4f4dSJason Wang } 1852986a4f4dSJason Wang 1853*47315329SJohn Fastabend static void _free_receive_bufs(struct virtnet_info *vi) 1854986a4f4dSJason Wang { 1855f600b690SJohn Fastabend struct bpf_prog *old_prog; 1856986a4f4dSJason Wang int i; 1857986a4f4dSJason Wang 1858986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1859986a4f4dSJason Wang while (vi->rq[i].pages) 1860986a4f4dSJason Wang __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 1861f600b690SJohn Fastabend 1862f600b690SJohn Fastabend old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 1863f600b690SJohn Fastabend RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 1864f600b690SJohn Fastabend if (old_prog) 1865f600b690SJohn Fastabend bpf_prog_put(old_prog); 1866986a4f4dSJason Wang } 1867*47315329SJohn Fastabend } 1868*47315329SJohn Fastabend 1869*47315329SJohn Fastabend static void free_receive_bufs(struct virtnet_info *vi) 1870*47315329SJohn Fastabend { 1871*47315329SJohn Fastabend rtnl_lock(); 1872*47315329SJohn Fastabend _free_receive_bufs(vi); 1873f600b690SJohn Fastabend rtnl_unlock(); 1874986a4f4dSJason Wang } 1875986a4f4dSJason Wang 1876fb51879dSMichael Dalton static void free_receive_page_frags(struct virtnet_info *vi) 1877fb51879dSMichael Dalton { 1878fb51879dSMichael Dalton int i; 1879fb51879dSMichael Dalton for (i = 0; i < vi->max_queue_pairs; i++) 1880fb51879dSMichael Dalton if (vi->rq[i].alloc_frag.page) 1881fb51879dSMichael Dalton put_page(vi->rq[i].alloc_frag.page); 1882fb51879dSMichael Dalton } 1883fb51879dSMichael Dalton 1884b68df015SJohn Fastabend static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 188556434a01SJohn Fastabend { 1886b68df015SJohn Fastabend /* For small receive mode always use kfree_skb variants */ 1887b68df015SJohn Fastabend if (!vi->mergeable_rx_bufs) 1888b68df015SJohn Fastabend return false; 1889b68df015SJohn Fastabend 189056434a01SJohn Fastabend if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 189156434a01SJohn Fastabend return false; 189256434a01SJohn Fastabend else if (q < vi->curr_queue_pairs) 189356434a01SJohn Fastabend return true; 189456434a01SJohn Fastabend else 189556434a01SJohn Fastabend return false; 189656434a01SJohn Fastabend } 189756434a01SJohn Fastabend 1898986a4f4dSJason Wang static void free_unused_bufs(struct virtnet_info *vi) 1899986a4f4dSJason Wang { 1900986a4f4dSJason Wang void *buf; 1901986a4f4dSJason Wang int i; 1902986a4f4dSJason Wang 1903986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1904986a4f4dSJason Wang struct virtqueue *vq = vi->sq[i].vq; 190556434a01SJohn Fastabend while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1906b68df015SJohn Fastabend if (!is_xdp_raw_buffer_queue(vi, i)) 1907986a4f4dSJason Wang dev_kfree_skb(buf); 190856434a01SJohn Fastabend else 190956434a01SJohn Fastabend put_page(virt_to_head_page(buf)); 191056434a01SJohn Fastabend } 1911986a4f4dSJason Wang } 1912986a4f4dSJason Wang 1913986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1914986a4f4dSJason Wang struct virtqueue *vq = vi->rq[i].vq; 1915986a4f4dSJason Wang 1916986a4f4dSJason Wang while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1917ab7db917SMichael Dalton if (vi->mergeable_rx_bufs) { 1918ab7db917SMichael Dalton unsigned long ctx = (unsigned long)buf; 1919ab7db917SMichael Dalton void *base = mergeable_ctx_to_buf_address(ctx); 1920ab7db917SMichael Dalton put_page(virt_to_head_page(base)); 1921ab7db917SMichael Dalton } else if (vi->big_packets) { 1922fa9fac17SAndrey Vagin give_pages(&vi->rq[i], buf); 1923ab7db917SMichael Dalton } else { 1924986a4f4dSJason Wang dev_kfree_skb(buf); 1925986a4f4dSJason Wang } 1926986a4f4dSJason Wang } 1927986a4f4dSJason Wang } 1928ab7db917SMichael Dalton } 1929986a4f4dSJason Wang 1930e9d7417bSJason Wang static void virtnet_del_vqs(struct virtnet_info *vi) 1931e9d7417bSJason Wang { 1932e9d7417bSJason Wang struct virtio_device *vdev = vi->vdev; 1933e9d7417bSJason Wang 19348898c21cSWanlong Gao virtnet_clean_affinity(vi, -1); 1935986a4f4dSJason Wang 1936e9d7417bSJason Wang vdev->config->del_vqs(vdev); 1937986a4f4dSJason Wang 1938986a4f4dSJason Wang virtnet_free_queues(vi); 1939986a4f4dSJason Wang } 1940986a4f4dSJason Wang 1941986a4f4dSJason Wang static int virtnet_find_vqs(struct virtnet_info *vi) 1942986a4f4dSJason Wang { 1943986a4f4dSJason Wang vq_callback_t **callbacks; 1944986a4f4dSJason Wang struct virtqueue **vqs; 1945986a4f4dSJason Wang int ret = -ENOMEM; 1946986a4f4dSJason Wang int i, total_vqs; 1947986a4f4dSJason Wang const char **names; 1948986a4f4dSJason Wang 1949986a4f4dSJason Wang /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 1950986a4f4dSJason Wang * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 1951986a4f4dSJason Wang * possible control vq. 1952986a4f4dSJason Wang */ 1953986a4f4dSJason Wang total_vqs = vi->max_queue_pairs * 2 + 1954986a4f4dSJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 1955986a4f4dSJason Wang 1956986a4f4dSJason Wang /* Allocate space for find_vqs parameters */ 1957986a4f4dSJason Wang vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); 1958986a4f4dSJason Wang if (!vqs) 1959986a4f4dSJason Wang goto err_vq; 1960986a4f4dSJason Wang callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); 1961986a4f4dSJason Wang if (!callbacks) 1962986a4f4dSJason Wang goto err_callback; 1963986a4f4dSJason Wang names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); 1964986a4f4dSJason Wang if (!names) 1965986a4f4dSJason Wang goto err_names; 1966986a4f4dSJason Wang 1967986a4f4dSJason Wang /* Parameters for control virtqueue, if any */ 1968986a4f4dSJason Wang if (vi->has_cvq) { 1969986a4f4dSJason Wang callbacks[total_vqs - 1] = NULL; 1970986a4f4dSJason Wang names[total_vqs - 1] = "control"; 1971986a4f4dSJason Wang } 1972986a4f4dSJason Wang 1973986a4f4dSJason Wang /* Allocate/initialize parameters for send/receive virtqueues */ 1974986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1975986a4f4dSJason Wang callbacks[rxq2vq(i)] = skb_recv_done; 1976986a4f4dSJason Wang callbacks[txq2vq(i)] = skb_xmit_done; 1977986a4f4dSJason Wang sprintf(vi->rq[i].name, "input.%d", i); 1978986a4f4dSJason Wang sprintf(vi->sq[i].name, "output.%d", i); 1979986a4f4dSJason Wang names[rxq2vq(i)] = vi->rq[i].name; 1980986a4f4dSJason Wang names[txq2vq(i)] = vi->sq[i].name; 1981986a4f4dSJason Wang } 1982986a4f4dSJason Wang 1983986a4f4dSJason Wang ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, 1984986a4f4dSJason Wang names); 1985986a4f4dSJason Wang if (ret) 1986986a4f4dSJason Wang goto err_find; 1987986a4f4dSJason Wang 1988986a4f4dSJason Wang if (vi->has_cvq) { 1989986a4f4dSJason Wang vi->cvq = vqs[total_vqs - 1]; 1990986a4f4dSJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 1991f646968fSPatrick McHardy vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1992986a4f4dSJason Wang } 1993986a4f4dSJason Wang 1994986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1995986a4f4dSJason Wang vi->rq[i].vq = vqs[rxq2vq(i)]; 1996986a4f4dSJason Wang vi->sq[i].vq = vqs[txq2vq(i)]; 1997986a4f4dSJason Wang } 1998986a4f4dSJason Wang 1999986a4f4dSJason Wang kfree(names); 2000986a4f4dSJason Wang kfree(callbacks); 2001986a4f4dSJason Wang kfree(vqs); 2002986a4f4dSJason Wang 2003986a4f4dSJason Wang return 0; 2004986a4f4dSJason Wang 2005986a4f4dSJason Wang err_find: 2006986a4f4dSJason Wang kfree(names); 2007986a4f4dSJason Wang err_names: 2008986a4f4dSJason Wang kfree(callbacks); 2009986a4f4dSJason Wang err_callback: 2010986a4f4dSJason Wang kfree(vqs); 2011986a4f4dSJason Wang err_vq: 2012986a4f4dSJason Wang return ret; 2013986a4f4dSJason Wang } 2014986a4f4dSJason Wang 2015986a4f4dSJason Wang static int virtnet_alloc_queues(struct virtnet_info *vi) 2016986a4f4dSJason Wang { 2017986a4f4dSJason Wang int i; 2018986a4f4dSJason Wang 2019986a4f4dSJason Wang vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); 2020986a4f4dSJason Wang if (!vi->sq) 2021986a4f4dSJason Wang goto err_sq; 2022986a4f4dSJason Wang vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); 2023008d4278SAmerigo Wang if (!vi->rq) 2024986a4f4dSJason Wang goto err_rq; 2025986a4f4dSJason Wang 2026986a4f4dSJason Wang INIT_DELAYED_WORK(&vi->refill, refill_work); 2027986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 2028986a4f4dSJason Wang vi->rq[i].pages = NULL; 2029986a4f4dSJason Wang netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, 2030986a4f4dSJason Wang napi_weight); 2031986a4f4dSJason Wang 2032986a4f4dSJason Wang sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 20335377d758SJohannes Berg ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 2034986a4f4dSJason Wang sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 2035986a4f4dSJason Wang } 2036986a4f4dSJason Wang 2037986a4f4dSJason Wang return 0; 2038986a4f4dSJason Wang 2039986a4f4dSJason Wang err_rq: 2040986a4f4dSJason Wang kfree(vi->sq); 2041986a4f4dSJason Wang err_sq: 2042986a4f4dSJason Wang return -ENOMEM; 2043e9d7417bSJason Wang } 2044e9d7417bSJason Wang 20453f9c10b0SAmit Shah static int init_vqs(struct virtnet_info *vi) 20463f9c10b0SAmit Shah { 2047986a4f4dSJason Wang int ret; 20483f9c10b0SAmit Shah 2049986a4f4dSJason Wang /* Allocate send & receive queues */ 2050986a4f4dSJason Wang ret = virtnet_alloc_queues(vi); 2051986a4f4dSJason Wang if (ret) 2052986a4f4dSJason Wang goto err; 20533f9c10b0SAmit Shah 2054986a4f4dSJason Wang ret = virtnet_find_vqs(vi); 2055986a4f4dSJason Wang if (ret) 2056986a4f4dSJason Wang goto err_free; 20573f9c10b0SAmit Shah 205847be2479SWanlong Gao get_online_cpus(); 20598898c21cSWanlong Gao virtnet_set_affinity(vi); 206047be2479SWanlong Gao put_online_cpus(); 206147be2479SWanlong Gao 20623f9c10b0SAmit Shah return 0; 2063986a4f4dSJason Wang 2064986a4f4dSJason Wang err_free: 2065986a4f4dSJason Wang virtnet_free_queues(vi); 2066986a4f4dSJason Wang err: 2067986a4f4dSJason Wang return ret; 20683f9c10b0SAmit Shah } 20693f9c10b0SAmit Shah 2070fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS 2071fbf28d78SMichael Dalton static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 2072fbf28d78SMichael Dalton struct rx_queue_attribute *attribute, char *buf) 2073fbf28d78SMichael Dalton { 2074fbf28d78SMichael Dalton struct virtnet_info *vi = netdev_priv(queue->dev); 2075fbf28d78SMichael Dalton unsigned int queue_index = get_netdev_rx_queue_index(queue); 20765377d758SJohannes Berg struct ewma_pkt_len *avg; 2077fbf28d78SMichael Dalton 2078fbf28d78SMichael Dalton BUG_ON(queue_index >= vi->max_queue_pairs); 2079fbf28d78SMichael Dalton avg = &vi->rq[queue_index].mrg_avg_pkt_len; 2080fbf28d78SMichael Dalton return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); 2081fbf28d78SMichael Dalton } 2082fbf28d78SMichael Dalton 2083fbf28d78SMichael Dalton static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 2084fbf28d78SMichael Dalton __ATTR_RO(mergeable_rx_buffer_size); 2085fbf28d78SMichael Dalton 2086fbf28d78SMichael Dalton static struct attribute *virtio_net_mrg_rx_attrs[] = { 2087fbf28d78SMichael Dalton &mergeable_rx_buffer_size_attribute.attr, 2088fbf28d78SMichael Dalton NULL 2089fbf28d78SMichael Dalton }; 2090fbf28d78SMichael Dalton 2091fbf28d78SMichael Dalton static const struct attribute_group virtio_net_mrg_rx_group = { 2092fbf28d78SMichael Dalton .name = "virtio_net", 2093fbf28d78SMichael Dalton .attrs = virtio_net_mrg_rx_attrs 2094fbf28d78SMichael Dalton }; 2095fbf28d78SMichael Dalton #endif 2096fbf28d78SMichael Dalton 2097892d6eb1SJason Wang static bool virtnet_fail_on_feature(struct virtio_device *vdev, 2098892d6eb1SJason Wang unsigned int fbit, 2099892d6eb1SJason Wang const char *fname, const char *dname) 2100892d6eb1SJason Wang { 2101892d6eb1SJason Wang if (!virtio_has_feature(vdev, fbit)) 2102892d6eb1SJason Wang return false; 2103892d6eb1SJason Wang 2104892d6eb1SJason Wang dev_err(&vdev->dev, "device advertises feature %s but not %s", 2105892d6eb1SJason Wang fname, dname); 2106892d6eb1SJason Wang 2107892d6eb1SJason Wang return true; 2108892d6eb1SJason Wang } 2109892d6eb1SJason Wang 2110892d6eb1SJason Wang #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 2111892d6eb1SJason Wang virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 2112892d6eb1SJason Wang 2113892d6eb1SJason Wang static bool virtnet_validate_features(struct virtio_device *vdev) 2114892d6eb1SJason Wang { 2115892d6eb1SJason Wang if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 2116892d6eb1SJason Wang (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 2117892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 2118892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 2119892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 2120892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 2121892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 2122892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 2123892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 2124892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ"))) { 2125892d6eb1SJason Wang return false; 2126892d6eb1SJason Wang } 2127892d6eb1SJason Wang 2128892d6eb1SJason Wang return true; 2129892d6eb1SJason Wang } 2130892d6eb1SJason Wang 2131d0c2c997SJarod Wilson #define MIN_MTU ETH_MIN_MTU 2132d0c2c997SJarod Wilson #define MAX_MTU ETH_MAX_MTU 2133d0c2c997SJarod Wilson 2134296f96fcSRusty Russell static int virtnet_probe(struct virtio_device *vdev) 2135296f96fcSRusty Russell { 2136986a4f4dSJason Wang int i, err; 2137296f96fcSRusty Russell struct net_device *dev; 2138296f96fcSRusty Russell struct virtnet_info *vi; 2139986a4f4dSJason Wang u16 max_queue_pairs; 214014de9d11SAaron Conole int mtu; 2141986a4f4dSJason Wang 21426ba42248SMichael S. Tsirkin if (!vdev->config->get) { 21436ba42248SMichael S. Tsirkin dev_err(&vdev->dev, "%s failure: config access disabled\n", 21446ba42248SMichael S. Tsirkin __func__); 21456ba42248SMichael S. Tsirkin return -EINVAL; 21466ba42248SMichael S. Tsirkin } 21476ba42248SMichael S. Tsirkin 2148892d6eb1SJason Wang if (!virtnet_validate_features(vdev)) 2149892d6eb1SJason Wang return -EINVAL; 2150892d6eb1SJason Wang 2151986a4f4dSJason Wang /* Find if host supports multiqueue virtio_net device */ 2152855e0c52SRusty Russell err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 2153855e0c52SRusty Russell struct virtio_net_config, 2154855e0c52SRusty Russell max_virtqueue_pairs, &max_queue_pairs); 2155986a4f4dSJason Wang 2156986a4f4dSJason Wang /* We need at least 2 queue's */ 2157986a4f4dSJason Wang if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 2158986a4f4dSJason Wang max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 2159986a4f4dSJason Wang !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 2160986a4f4dSJason Wang max_queue_pairs = 1; 2161296f96fcSRusty Russell 2162296f96fcSRusty Russell /* Allocate ourselves a network device with room for our info */ 2163986a4f4dSJason Wang dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 2164296f96fcSRusty Russell if (!dev) 2165296f96fcSRusty Russell return -ENOMEM; 2166296f96fcSRusty Russell 2167296f96fcSRusty Russell /* Set up network device as normal. */ 2168f2f2c8b4SJiri Pirko dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 216976288b4eSStephen Hemminger dev->netdev_ops = &virtnet_netdev; 2170296f96fcSRusty Russell dev->features = NETIF_F_HIGHDMA; 21713fa2a1dfSstephen hemminger 21727ad24ea4SWilfried Klaebe dev->ethtool_ops = &virtnet_ethtool_ops; 2173296f96fcSRusty Russell SET_NETDEV_DEV(dev, &vdev->dev); 2174296f96fcSRusty Russell 2175296f96fcSRusty Russell /* Do we support "hardware" checksums? */ 217698e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 2177296f96fcSRusty Russell /* This opens up the world of extra features. */ 217848900cb6SJason Wang dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 217998e778c9SMichał Mirosław if (csum) 218048900cb6SJason Wang dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 218198e778c9SMichał Mirosław 218298e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 2183e3e3c423SVlad Yasevich dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 218434a48579SRusty Russell | NETIF_F_TSO_ECN | NETIF_F_TSO6; 218534a48579SRusty Russell } 21865539ae96SRusty Russell /* Individual feature bits: what can host handle? */ 218798e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 218898e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO; 218998e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 219098e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO6; 219198e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 219298e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO_ECN; 2193e3e3c423SVlad Yasevich if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 2194e3e3c423SVlad Yasevich dev->hw_features |= NETIF_F_UFO; 219598e778c9SMichał Mirosław 219641f2f127SJason Wang dev->features |= NETIF_F_GSO_ROBUST; 219741f2f127SJason Wang 219898e778c9SMichał Mirosław if (gso) 2199e3e3c423SVlad Yasevich dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 220098e778c9SMichał Mirosław /* (!csum && gso) case will be fixed by register_netdev() */ 2201296f96fcSRusty Russell } 22024f49129bSThomas Huth if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 22034f49129bSThomas Huth dev->features |= NETIF_F_RXCSUM; 2204296f96fcSRusty Russell 22054fda8302SJason Wang dev->vlan_features = dev->features; 22064fda8302SJason Wang 2207d0c2c997SJarod Wilson /* MTU range: 68 - 65535 */ 2208d0c2c997SJarod Wilson dev->min_mtu = MIN_MTU; 2209d0c2c997SJarod Wilson dev->max_mtu = MAX_MTU; 2210d0c2c997SJarod Wilson 2211296f96fcSRusty Russell /* Configuration may specify what MAC to use. Otherwise random. */ 2212855e0c52SRusty Russell if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) 2213855e0c52SRusty Russell virtio_cread_bytes(vdev, 2214a586d4f6SRusty Russell offsetof(struct virtio_net_config, mac), 2215855e0c52SRusty Russell dev->dev_addr, dev->addr_len); 2216855e0c52SRusty Russell else 2217f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 2218296f96fcSRusty Russell 2219296f96fcSRusty Russell /* Set up our device-specific information */ 2220296f96fcSRusty Russell vi = netdev_priv(dev); 2221296f96fcSRusty Russell vi->dev = dev; 2222296f96fcSRusty Russell vi->vdev = vdev; 2223d9d5dcc8SChristian Borntraeger vdev->priv = vi; 22243fa2a1dfSstephen hemminger vi->stats = alloc_percpu(struct virtnet_stats); 22253fa2a1dfSstephen hemminger err = -ENOMEM; 22263fa2a1dfSstephen hemminger if (vi->stats == NULL) 22273fa2a1dfSstephen hemminger goto free; 22283fa2a1dfSstephen hemminger 2229827da44cSJohn Stultz for_each_possible_cpu(i) { 2230827da44cSJohn Stultz struct virtnet_stats *virtnet_stats; 2231827da44cSJohn Stultz virtnet_stats = per_cpu_ptr(vi->stats, i); 2232827da44cSJohn Stultz u64_stats_init(&virtnet_stats->tx_syncp); 2233827da44cSJohn Stultz u64_stats_init(&virtnet_stats->rx_syncp); 2234827da44cSJohn Stultz } 2235827da44cSJohn Stultz 2236586d17c5SJason Wang INIT_WORK(&vi->config_work, virtnet_config_changed_work); 2237296f96fcSRusty Russell 223897402b96SHerbert Xu /* If we can receive ANY GSO packets, we must allocate large ones. */ 22398e95a202SJoe Perches if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 22408e95a202SJoe Perches virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 2241e3e3c423SVlad Yasevich virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || 2242e3e3c423SVlad Yasevich virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) 224397402b96SHerbert Xu vi->big_packets = true; 224497402b96SHerbert Xu 22453f2c31d9SMark McLoughlin if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 22463f2c31d9SMark McLoughlin vi->mergeable_rx_bufs = true; 22473f2c31d9SMark McLoughlin 2248d04302b3SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 2249d04302b3SMichael S. Tsirkin virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 2250012873d0SMichael S. Tsirkin vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 2251012873d0SMichael S. Tsirkin else 2252012873d0SMichael S. Tsirkin vi->hdr_len = sizeof(struct virtio_net_hdr); 2253012873d0SMichael S. Tsirkin 225475993300SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 225575993300SMichael S. Tsirkin virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 2256e7428e95SMichael S. Tsirkin vi->any_header_sg = true; 2257e7428e95SMichael S. Tsirkin 2258986a4f4dSJason Wang if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 2259986a4f4dSJason Wang vi->has_cvq = true; 2260986a4f4dSJason Wang 226114de9d11SAaron Conole if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 226214de9d11SAaron Conole mtu = virtio_cread16(vdev, 226314de9d11SAaron Conole offsetof(struct virtio_net_config, 226414de9d11SAaron Conole mtu)); 226593a205eeSAaron Conole if (mtu < dev->min_mtu) { 226614de9d11SAaron Conole __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 226793a205eeSAaron Conole } else { 2268d0c2c997SJarod Wilson dev->mtu = mtu; 226993a205eeSAaron Conole dev->max_mtu = mtu; 227093a205eeSAaron Conole } 227114de9d11SAaron Conole } 227214de9d11SAaron Conole 2273012873d0SMichael S. Tsirkin if (vi->any_header_sg) 2274012873d0SMichael S. Tsirkin dev->needed_headroom = vi->hdr_len; 22756ebbc1a6SZhangjie \(HZ\) 227644900010SJason Wang /* Enable multiqueue by default */ 227744900010SJason Wang if (num_online_cpus() >= max_queue_pairs) 227844900010SJason Wang vi->curr_queue_pairs = max_queue_pairs; 227944900010SJason Wang else 228044900010SJason Wang vi->curr_queue_pairs = num_online_cpus(); 2281986a4f4dSJason Wang vi->max_queue_pairs = max_queue_pairs; 2282986a4f4dSJason Wang 2283986a4f4dSJason Wang /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 22843f9c10b0SAmit Shah err = init_vqs(vi); 2285d2a7dddaSMichael S. Tsirkin if (err) 22869bb8ca86SJason Wang goto free_stats; 2287d2a7dddaSMichael S. Tsirkin 2288fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS 2289fbf28d78SMichael Dalton if (vi->mergeable_rx_bufs) 2290fbf28d78SMichael Dalton dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 2291fbf28d78SMichael Dalton #endif 22920f13b66bSZhi Yong Wu netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 22930f13b66bSZhi Yong Wu netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 2294986a4f4dSJason Wang 229516032be5SNikolay Aleksandrov virtnet_init_settings(dev); 229616032be5SNikolay Aleksandrov 2297296f96fcSRusty Russell err = register_netdev(dev); 2298296f96fcSRusty Russell if (err) { 2299296f96fcSRusty Russell pr_debug("virtio_net: registering device failed\n"); 2300d2a7dddaSMichael S. Tsirkin goto free_vqs; 2301296f96fcSRusty Russell } 2302b3369c1fSRusty Russell 23034baf1e33SMichael S. Tsirkin virtio_device_ready(vdev); 23044baf1e33SMichael S. Tsirkin 23058017c279SSebastian Andrzej Siewior err = virtnet_cpu_notif_add(vi); 23068de4b2f3SWanlong Gao if (err) { 23078de4b2f3SWanlong Gao pr_debug("virtio_net: registering cpu notifier failed\n"); 2308f00e35e2Swangyunjian goto free_unregister_netdev; 23098de4b2f3SWanlong Gao } 23108de4b2f3SWanlong Gao 2311a220871bSJason Wang virtnet_set_queues(vi, vi->curr_queue_pairs); 231244900010SJason Wang 2313167c25e4SJason Wang /* Assume link up if device can't report link status, 2314167c25e4SJason Wang otherwise get link status from config. */ 2315167c25e4SJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 2316167c25e4SJason Wang netif_carrier_off(dev); 23173b07e9caSTejun Heo schedule_work(&vi->config_work); 2318167c25e4SJason Wang } else { 2319167c25e4SJason Wang vi->status = VIRTIO_NET_S_LINK_UP; 23204783256eSPantelis Koukousoulas netif_carrier_on(dev); 2321167c25e4SJason Wang } 23229f4d26d0SMark McLoughlin 2323986a4f4dSJason Wang pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 2324986a4f4dSJason Wang dev->name, max_queue_pairs); 2325986a4f4dSJason Wang 2326296f96fcSRusty Russell return 0; 2327296f96fcSRusty Russell 2328f00e35e2Swangyunjian free_unregister_netdev: 232902465555SMichael S. Tsirkin vi->vdev->config->reset(vdev); 233002465555SMichael S. Tsirkin 2331b3369c1fSRusty Russell unregister_netdev(dev); 2332d2a7dddaSMichael S. Tsirkin free_vqs: 2333986a4f4dSJason Wang cancel_delayed_work_sync(&vi->refill); 2334fb51879dSMichael Dalton free_receive_page_frags(vi); 2335e9d7417bSJason Wang virtnet_del_vqs(vi); 23363fa2a1dfSstephen hemminger free_stats: 23373fa2a1dfSstephen hemminger free_percpu(vi->stats); 2338296f96fcSRusty Russell free: 2339296f96fcSRusty Russell free_netdev(dev); 2340296f96fcSRusty Russell return err; 2341296f96fcSRusty Russell } 2342296f96fcSRusty Russell 234304486ed0SAmit Shah static void remove_vq_common(struct virtnet_info *vi) 2344296f96fcSRusty Russell { 234504486ed0SAmit Shah vi->vdev->config->reset(vi->vdev); 2346830a8a97SShirley Ma 2347830a8a97SShirley Ma /* Free unused buffers in both send and recv, if any. */ 23489ab86bbcSShirley Ma free_unused_bufs(vi); 2349fb6813f4SRusty Russell 2350986a4f4dSJason Wang free_receive_bufs(vi); 2351d2a7dddaSMichael S. Tsirkin 2352fb51879dSMichael Dalton free_receive_page_frags(vi); 2353fb51879dSMichael Dalton 2354986a4f4dSJason Wang virtnet_del_vqs(vi); 235504486ed0SAmit Shah } 235604486ed0SAmit Shah 23578cc085d6SBill Pemberton static void virtnet_remove(struct virtio_device *vdev) 235804486ed0SAmit Shah { 235904486ed0SAmit Shah struct virtnet_info *vi = vdev->priv; 236004486ed0SAmit Shah 23618017c279SSebastian Andrzej Siewior virtnet_cpu_notif_remove(vi); 23628de4b2f3SWanlong Gao 2363102a2786SMichael S. Tsirkin /* Make sure no work handler is accessing the device. */ 2364102a2786SMichael S. Tsirkin flush_work(&vi->config_work); 2365586d17c5SJason Wang 236604486ed0SAmit Shah unregister_netdev(vi->dev); 236704486ed0SAmit Shah 236804486ed0SAmit Shah remove_vq_common(vi); 2369fb6813f4SRusty Russell 23702e66f55bSKrishna Kumar free_percpu(vi->stats); 237174b2553fSRusty Russell free_netdev(vi->dev); 2372296f96fcSRusty Russell } 2373296f96fcSRusty Russell 237489107000SAaron Lu #ifdef CONFIG_PM_SLEEP 23750741bcb5SAmit Shah static int virtnet_freeze(struct virtio_device *vdev) 23760741bcb5SAmit Shah { 23770741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 2378986a4f4dSJason Wang int i; 23790741bcb5SAmit Shah 23808017c279SSebastian Andrzej Siewior virtnet_cpu_notif_remove(vi); 2381ec9debbdSJason Wang 2382102a2786SMichael S. Tsirkin /* Make sure no work handler is accessing the device */ 2383102a2786SMichael S. Tsirkin flush_work(&vi->config_work); 2384586d17c5SJason Wang 23850741bcb5SAmit Shah netif_device_detach(vi->dev); 23860741bcb5SAmit Shah cancel_delayed_work_sync(&vi->refill); 23870741bcb5SAmit Shah 238891815639SJason Wang if (netif_running(vi->dev)) { 2389ab3971b1SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 2390986a4f4dSJason Wang napi_disable(&vi->rq[i].napi); 239191815639SJason Wang } 23920741bcb5SAmit Shah 23930741bcb5SAmit Shah remove_vq_common(vi); 23940741bcb5SAmit Shah 23950741bcb5SAmit Shah return 0; 23960741bcb5SAmit Shah } 23970741bcb5SAmit Shah 23980741bcb5SAmit Shah static int virtnet_restore(struct virtio_device *vdev) 23990741bcb5SAmit Shah { 24000741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 2401986a4f4dSJason Wang int err, i; 24020741bcb5SAmit Shah 24030741bcb5SAmit Shah err = init_vqs(vi); 24040741bcb5SAmit Shah if (err) 24050741bcb5SAmit Shah return err; 24060741bcb5SAmit Shah 2407e53fbd11SMichael S. Tsirkin virtio_device_ready(vdev); 2408e53fbd11SMichael S. Tsirkin 24096cd4ce00SJason Wang if (netif_running(vi->dev)) { 241055257d72SSasha Levin for (i = 0; i < vi->curr_queue_pairs; i++) 2411946fa564SMichael S. Tsirkin if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 24123b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 24130741bcb5SAmit Shah 24146cd4ce00SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 24156cd4ce00SJason Wang virtnet_napi_enable(&vi->rq[i]); 24166cd4ce00SJason Wang } 24176cd4ce00SJason Wang 24186cd4ce00SJason Wang netif_device_attach(vi->dev); 24196cd4ce00SJason Wang 2420986a4f4dSJason Wang virtnet_set_queues(vi, vi->curr_queue_pairs); 2421986a4f4dSJason Wang 24228017c279SSebastian Andrzej Siewior err = virtnet_cpu_notif_add(vi); 2423ec9debbdSJason Wang if (err) 2424ec9debbdSJason Wang return err; 2425ec9debbdSJason Wang 24260741bcb5SAmit Shah return 0; 24270741bcb5SAmit Shah } 24280741bcb5SAmit Shah #endif 24290741bcb5SAmit Shah 2430296f96fcSRusty Russell static struct virtio_device_id id_table[] = { 2431296f96fcSRusty Russell { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 2432296f96fcSRusty Russell { 0 }, 2433296f96fcSRusty Russell }; 2434296f96fcSRusty Russell 2435f3358507SMichael S. Tsirkin #define VIRTNET_FEATURES \ 2436f3358507SMichael S. Tsirkin VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 2437f3358507SMichael S. Tsirkin VIRTIO_NET_F_MAC, \ 2438f3358507SMichael S. Tsirkin VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 2439f3358507SMichael S. Tsirkin VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 2440f3358507SMichael S. Tsirkin VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 2441f3358507SMichael S. Tsirkin VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 2442f3358507SMichael S. Tsirkin VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 2443f3358507SMichael S. Tsirkin VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 2444f3358507SMichael S. Tsirkin VIRTIO_NET_F_CTRL_MAC_ADDR, \ 2445f3358507SMichael S. Tsirkin VIRTIO_NET_F_MTU 2446f3358507SMichael S. Tsirkin 2447c45a6816SRusty Russell static unsigned int features[] = { 2448f3358507SMichael S. Tsirkin VIRTNET_FEATURES, 2449f3358507SMichael S. Tsirkin }; 2450f3358507SMichael S. Tsirkin 2451f3358507SMichael S. Tsirkin static unsigned int features_legacy[] = { 2452f3358507SMichael S. Tsirkin VIRTNET_FEATURES, 2453f3358507SMichael S. Tsirkin VIRTIO_NET_F_GSO, 2454e7428e95SMichael S. Tsirkin VIRTIO_F_ANY_LAYOUT, 2455c45a6816SRusty Russell }; 2456c45a6816SRusty Russell 245722402529SUwe Kleine-König static struct virtio_driver virtio_net_driver = { 2458c45a6816SRusty Russell .feature_table = features, 2459c45a6816SRusty Russell .feature_table_size = ARRAY_SIZE(features), 2460f3358507SMichael S. Tsirkin .feature_table_legacy = features_legacy, 2461f3358507SMichael S. Tsirkin .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 2462296f96fcSRusty Russell .driver.name = KBUILD_MODNAME, 2463296f96fcSRusty Russell .driver.owner = THIS_MODULE, 2464296f96fcSRusty Russell .id_table = id_table, 2465296f96fcSRusty Russell .probe = virtnet_probe, 24668cc085d6SBill Pemberton .remove = virtnet_remove, 24679f4d26d0SMark McLoughlin .config_changed = virtnet_config_changed, 246889107000SAaron Lu #ifdef CONFIG_PM_SLEEP 24690741bcb5SAmit Shah .freeze = virtnet_freeze, 24700741bcb5SAmit Shah .restore = virtnet_restore, 24710741bcb5SAmit Shah #endif 2472296f96fcSRusty Russell }; 2473296f96fcSRusty Russell 24748017c279SSebastian Andrzej Siewior static __init int virtio_net_driver_init(void) 24758017c279SSebastian Andrzej Siewior { 24768017c279SSebastian Andrzej Siewior int ret; 24778017c279SSebastian Andrzej Siewior 247873c1b41eSThomas Gleixner ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 24798017c279SSebastian Andrzej Siewior virtnet_cpu_online, 24808017c279SSebastian Andrzej Siewior virtnet_cpu_down_prep); 24818017c279SSebastian Andrzej Siewior if (ret < 0) 24828017c279SSebastian Andrzej Siewior goto out; 24838017c279SSebastian Andrzej Siewior virtionet_online = ret; 248473c1b41eSThomas Gleixner ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 24858017c279SSebastian Andrzej Siewior NULL, virtnet_cpu_dead); 24868017c279SSebastian Andrzej Siewior if (ret) 24878017c279SSebastian Andrzej Siewior goto err_dead; 24888017c279SSebastian Andrzej Siewior 24898017c279SSebastian Andrzej Siewior ret = register_virtio_driver(&virtio_net_driver); 24908017c279SSebastian Andrzej Siewior if (ret) 24918017c279SSebastian Andrzej Siewior goto err_virtio; 24928017c279SSebastian Andrzej Siewior return 0; 24938017c279SSebastian Andrzej Siewior err_virtio: 24948017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 24958017c279SSebastian Andrzej Siewior err_dead: 24968017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(virtionet_online); 24978017c279SSebastian Andrzej Siewior out: 24988017c279SSebastian Andrzej Siewior return ret; 24998017c279SSebastian Andrzej Siewior } 25008017c279SSebastian Andrzej Siewior module_init(virtio_net_driver_init); 25018017c279SSebastian Andrzej Siewior 25028017c279SSebastian Andrzej Siewior static __exit void virtio_net_driver_exit(void) 25038017c279SSebastian Andrzej Siewior { 25048017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 25058017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(virtionet_online); 25068017c279SSebastian Andrzej Siewior unregister_virtio_driver(&virtio_net_driver); 25078017c279SSebastian Andrzej Siewior } 25088017c279SSebastian Andrzej Siewior module_exit(virtio_net_driver_exit); 2509296f96fcSRusty Russell 2510296f96fcSRusty Russell MODULE_DEVICE_TABLE(virtio, id_table); 2511296f96fcSRusty Russell MODULE_DESCRIPTION("Virtio network driver"); 2512296f96fcSRusty Russell MODULE_LICENSE("GPL"); 2513