148925e37SRusty Russell /* A network driver using virtio. 2296f96fcSRusty Russell * 3296f96fcSRusty Russell * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4296f96fcSRusty Russell * 5296f96fcSRusty Russell * This program is free software; you can redistribute it and/or modify 6296f96fcSRusty Russell * it under the terms of the GNU General Public License as published by 7296f96fcSRusty Russell * the Free Software Foundation; either version 2 of the License, or 8296f96fcSRusty Russell * (at your option) any later version. 9296f96fcSRusty Russell * 10296f96fcSRusty Russell * This program is distributed in the hope that it will be useful, 11296f96fcSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 12296f96fcSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13296f96fcSRusty Russell * GNU General Public License for more details. 14296f96fcSRusty Russell * 15296f96fcSRusty Russell * You should have received a copy of the GNU General Public License 16adf8d3ffSJeff Kirsher * along with this program; if not, see <http://www.gnu.org/licenses/>. 17296f96fcSRusty Russell */ 18296f96fcSRusty Russell //#define DEBUG 19296f96fcSRusty Russell #include <linux/netdevice.h> 20296f96fcSRusty Russell #include <linux/etherdevice.h> 21a9ea3fc6SHerbert Xu #include <linux/ethtool.h> 22296f96fcSRusty Russell #include <linux/module.h> 23296f96fcSRusty Russell #include <linux/virtio.h> 24296f96fcSRusty Russell #include <linux/virtio_net.h> 25f600b690SJohn Fastabend #include <linux/bpf.h> 26a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h> 27296f96fcSRusty Russell #include <linux/scatterlist.h> 28e918085aSAlex Williamson #include <linux/if_vlan.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 308de4b2f3SWanlong Gao #include <linux/cpu.h> 31ab7db917SMichael Dalton #include <linux/average.h> 32296f96fcSRusty Russell 33d34710e3SAmerigo Wang static int napi_weight = NAPI_POLL_WEIGHT; 346c0cd7c0SDor Laor module_param(napi_weight, int, 0444); 356c0cd7c0SDor Laor 36eb939922SRusty Russell static bool csum = true, gso = true; 3734a48579SRusty Russell module_param(csum, bool, 0444); 3834a48579SRusty Russell module_param(gso, bool, 0444); 3934a48579SRusty Russell 40296f96fcSRusty Russell /* FIXME: MTU in config. */ 415061de36SMichael Dalton #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 423f2c31d9SMark McLoughlin #define GOOD_COPY_LEN 128 43296f96fcSRusty Russell 445377d758SJohannes Berg /* RX packet size EWMA. The average packet size is used to determine the packet 455377d758SJohannes Berg * buffer size when refilling RX rings. As the entire RX ring may be refilled 465377d758SJohannes Berg * at once, the weight is chosen so that the EWMA will be insensitive to short- 475377d758SJohannes Berg * term, transient changes in packet size. 48ab7db917SMichael Dalton */ 495377d758SJohannes Berg DECLARE_EWMA(pkt_len, 1, 64) 50ab7db917SMichael Dalton 51d0fa28f0SMichael S. Tsirkin /* With mergeable buffers we align buffer address and use the low bits to 52d0fa28f0SMichael S. Tsirkin * encode its true size. Buffer size is up to 1 page so we need to align to 53d0fa28f0SMichael S. Tsirkin * square root of page size to ensure we reserve enough bits to encode the true 54d0fa28f0SMichael S. Tsirkin * size. 55d0fa28f0SMichael S. Tsirkin */ 56d0fa28f0SMichael S. Tsirkin #define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) 57d0fa28f0SMichael S. Tsirkin 58ab7db917SMichael Dalton /* Minimum alignment for mergeable packet buffers. */ 59d0fa28f0SMichael S. Tsirkin #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ 60d0fa28f0SMichael S. Tsirkin 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) 61ab7db917SMichael Dalton 6266846048SRick Jones #define VIRTNET_DRIVER_VERSION "1.0.0" 632a41f71dSAlex Williamson 643fa2a1dfSstephen hemminger struct virtnet_stats { 6583a27052SEric Dumazet struct u64_stats_sync tx_syncp; 6683a27052SEric Dumazet struct u64_stats_sync rx_syncp; 673fa2a1dfSstephen hemminger u64 tx_bytes; 683fa2a1dfSstephen hemminger u64 tx_packets; 693fa2a1dfSstephen hemminger 703fa2a1dfSstephen hemminger u64 rx_bytes; 713fa2a1dfSstephen hemminger u64 rx_packets; 723fa2a1dfSstephen hemminger }; 733fa2a1dfSstephen hemminger 74e9d7417bSJason Wang /* Internal representation of a send virtqueue */ 75e9d7417bSJason Wang struct send_queue { 76e9d7417bSJason Wang /* Virtqueue associated with this send _queue */ 77e9d7417bSJason Wang struct virtqueue *vq; 78e9d7417bSJason Wang 79e9d7417bSJason Wang /* TX: fragments + linear part + virtio header */ 80e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 81986a4f4dSJason Wang 82986a4f4dSJason Wang /* Name of the send queue: output.$index */ 83986a4f4dSJason Wang char name[40]; 84e9d7417bSJason Wang }; 85e9d7417bSJason Wang 86e9d7417bSJason Wang /* Internal representation of a receive virtqueue */ 87e9d7417bSJason Wang struct receive_queue { 88e9d7417bSJason Wang /* Virtqueue associated with this receive_queue */ 89e9d7417bSJason Wang struct virtqueue *vq; 90e9d7417bSJason Wang 91296f96fcSRusty Russell struct napi_struct napi; 92296f96fcSRusty Russell 93f600b690SJohn Fastabend struct bpf_prog __rcu *xdp_prog; 94f600b690SJohn Fastabend 95e9d7417bSJason Wang /* Chain pages by the private ptr. */ 96e9d7417bSJason Wang struct page *pages; 97e9d7417bSJason Wang 98ab7db917SMichael Dalton /* Average packet length for mergeable receive buffers. */ 995377d758SJohannes Berg struct ewma_pkt_len mrg_avg_pkt_len; 100ab7db917SMichael Dalton 101fb51879dSMichael Dalton /* Page frag for packet buffer allocation. */ 102fb51879dSMichael Dalton struct page_frag alloc_frag; 103fb51879dSMichael Dalton 104e9d7417bSJason Wang /* RX: fragments + linear part + virtio header */ 105e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 106986a4f4dSJason Wang 107986a4f4dSJason Wang /* Name of this receive queue: input.$index */ 108986a4f4dSJason Wang char name[40]; 109e9d7417bSJason Wang }; 110e9d7417bSJason Wang 111e9d7417bSJason Wang struct virtnet_info { 112e9d7417bSJason Wang struct virtio_device *vdev; 113e9d7417bSJason Wang struct virtqueue *cvq; 114e9d7417bSJason Wang struct net_device *dev; 115986a4f4dSJason Wang struct send_queue *sq; 116986a4f4dSJason Wang struct receive_queue *rq; 117e9d7417bSJason Wang unsigned int status; 118e9d7417bSJason Wang 119986a4f4dSJason Wang /* Max # of queue pairs supported by the device */ 120986a4f4dSJason Wang u16 max_queue_pairs; 121986a4f4dSJason Wang 122986a4f4dSJason Wang /* # of queue pairs currently used by the driver */ 123986a4f4dSJason Wang u16 curr_queue_pairs; 124986a4f4dSJason Wang 125672aafd5SJohn Fastabend /* # of XDP queue pairs currently used by the driver */ 126672aafd5SJohn Fastabend u16 xdp_queue_pairs; 127672aafd5SJohn Fastabend 12897402b96SHerbert Xu /* I like... big packets and I cannot lie! */ 12997402b96SHerbert Xu bool big_packets; 13097402b96SHerbert Xu 1313f2c31d9SMark McLoughlin /* Host will merge rx buffers for big packets (shake it! shake it!) */ 1323f2c31d9SMark McLoughlin bool mergeable_rx_bufs; 1333f2c31d9SMark McLoughlin 134986a4f4dSJason Wang /* Has control virtqueue */ 135986a4f4dSJason Wang bool has_cvq; 136986a4f4dSJason Wang 137e7428e95SMichael S. Tsirkin /* Host can handle any s/g split between our header and packet data */ 138e7428e95SMichael S. Tsirkin bool any_header_sg; 139e7428e95SMichael S. Tsirkin 140012873d0SMichael S. Tsirkin /* Packet virtio header size */ 141012873d0SMichael S. Tsirkin u8 hdr_len; 142012873d0SMichael S. Tsirkin 1433fa2a1dfSstephen hemminger /* Active statistics */ 1443fa2a1dfSstephen hemminger struct virtnet_stats __percpu *stats; 1453fa2a1dfSstephen hemminger 1463161e453SRusty Russell /* Work struct for refilling if we run low on memory. */ 1473161e453SRusty Russell struct delayed_work refill; 1483161e453SRusty Russell 149586d17c5SJason Wang /* Work struct for config space updates */ 150586d17c5SJason Wang struct work_struct config_work; 151586d17c5SJason Wang 152986a4f4dSJason Wang /* Does the affinity hint is set for virtqueues? */ 153986a4f4dSJason Wang bool affinity_hint_set; 15447be2479SWanlong Gao 1558017c279SSebastian Andrzej Siewior /* CPU hotplug instances for online & dead */ 1568017c279SSebastian Andrzej Siewior struct hlist_node node; 1578017c279SSebastian Andrzej Siewior struct hlist_node node_dead; 1582ac46030SMichael S. Tsirkin 1592ac46030SMichael S. Tsirkin /* Control VQ buffers: protected by the rtnl lock */ 1602ac46030SMichael S. Tsirkin struct virtio_net_ctrl_hdr ctrl_hdr; 1612ac46030SMichael S. Tsirkin virtio_net_ctrl_ack ctrl_status; 162a725ee3eSAndy Lutomirski struct virtio_net_ctrl_mq ctrl_mq; 1632ac46030SMichael S. Tsirkin u8 ctrl_promisc; 1642ac46030SMichael S. Tsirkin u8 ctrl_allmulti; 165a725ee3eSAndy Lutomirski u16 ctrl_vid; 16616032be5SNikolay Aleksandrov 16716032be5SNikolay Aleksandrov /* Ethtool settings */ 16816032be5SNikolay Aleksandrov u8 duplex; 16916032be5SNikolay Aleksandrov u32 speed; 170296f96fcSRusty Russell }; 171296f96fcSRusty Russell 1729ab86bbcSShirley Ma struct padded_vnet_hdr { 173012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf hdr; 1749ab86bbcSShirley Ma /* 175012873d0SMichael S. Tsirkin * hdr is in a separate sg buffer, and data sg buffer shares same page 176012873d0SMichael S. Tsirkin * with this header sg. This padding makes next sg 16 byte aligned 177012873d0SMichael S. Tsirkin * after the header. 1789ab86bbcSShirley Ma */ 179012873d0SMichael S. Tsirkin char padding[4]; 1809ab86bbcSShirley Ma }; 1819ab86bbcSShirley Ma 182986a4f4dSJason Wang /* Converting between virtqueue no. and kernel tx/rx queue no. 183986a4f4dSJason Wang * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 184986a4f4dSJason Wang */ 185986a4f4dSJason Wang static int vq2txq(struct virtqueue *vq) 186986a4f4dSJason Wang { 1879d0ca6edSRusty Russell return (vq->index - 1) / 2; 188986a4f4dSJason Wang } 189986a4f4dSJason Wang 190986a4f4dSJason Wang static int txq2vq(int txq) 191986a4f4dSJason Wang { 192986a4f4dSJason Wang return txq * 2 + 1; 193986a4f4dSJason Wang } 194986a4f4dSJason Wang 195986a4f4dSJason Wang static int vq2rxq(struct virtqueue *vq) 196986a4f4dSJason Wang { 1979d0ca6edSRusty Russell return vq->index / 2; 198986a4f4dSJason Wang } 199986a4f4dSJason Wang 200986a4f4dSJason Wang static int rxq2vq(int rxq) 201986a4f4dSJason Wang { 202986a4f4dSJason Wang return rxq * 2; 203986a4f4dSJason Wang } 204986a4f4dSJason Wang 205012873d0SMichael S. Tsirkin static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) 206296f96fcSRusty Russell { 207012873d0SMichael S. Tsirkin return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; 208296f96fcSRusty Russell } 209296f96fcSRusty Russell 2109ab86bbcSShirley Ma /* 2119ab86bbcSShirley Ma * private is used to chain pages for big packets, put the whole 2129ab86bbcSShirley Ma * most recent used list in the beginning for reuse 2139ab86bbcSShirley Ma */ 214e9d7417bSJason Wang static void give_pages(struct receive_queue *rq, struct page *page) 215fb6813f4SRusty Russell { 2169ab86bbcSShirley Ma struct page *end; 2179ab86bbcSShirley Ma 218e9d7417bSJason Wang /* Find end of list, sew whole thing into vi->rq.pages. */ 2199ab86bbcSShirley Ma for (end = page; end->private; end = (struct page *)end->private); 220e9d7417bSJason Wang end->private = (unsigned long)rq->pages; 221e9d7417bSJason Wang rq->pages = page; 222fb6813f4SRusty Russell } 223fb6813f4SRusty Russell 224e9d7417bSJason Wang static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 225fb6813f4SRusty Russell { 226e9d7417bSJason Wang struct page *p = rq->pages; 227fb6813f4SRusty Russell 2289ab86bbcSShirley Ma if (p) { 229e9d7417bSJason Wang rq->pages = (struct page *)p->private; 2309ab86bbcSShirley Ma /* clear private here, it is used to chain pages */ 2319ab86bbcSShirley Ma p->private = 0; 2329ab86bbcSShirley Ma } else 233fb6813f4SRusty Russell p = alloc_page(gfp_mask); 234fb6813f4SRusty Russell return p; 235fb6813f4SRusty Russell } 236fb6813f4SRusty Russell 237e9d7417bSJason Wang static void skb_xmit_done(struct virtqueue *vq) 238296f96fcSRusty Russell { 239e9d7417bSJason Wang struct virtnet_info *vi = vq->vdev->priv; 240296f96fcSRusty Russell 2412cb9c6baSRusty Russell /* Suppress further interrupts. */ 242e9d7417bSJason Wang virtqueue_disable_cb(vq); 24311a3a154SRusty Russell 244363f1514SRusty Russell /* We were probably waiting for more output buffers. */ 245986a4f4dSJason Wang netif_wake_subqueue(vi->dev, vq2txq(vq)); 246296f96fcSRusty Russell } 247296f96fcSRusty Russell 248ab7db917SMichael Dalton static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) 249ab7db917SMichael Dalton { 250ab7db917SMichael Dalton unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); 251ab7db917SMichael Dalton return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; 252ab7db917SMichael Dalton } 253ab7db917SMichael Dalton 254ab7db917SMichael Dalton static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) 255ab7db917SMichael Dalton { 256ab7db917SMichael Dalton return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); 257ab7db917SMichael Dalton 258ab7db917SMichael Dalton } 259ab7db917SMichael Dalton 260ab7db917SMichael Dalton static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) 261ab7db917SMichael Dalton { 262ab7db917SMichael Dalton unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; 263ab7db917SMichael Dalton return (unsigned long)buf | (size - 1); 264ab7db917SMichael Dalton } 265ab7db917SMichael Dalton 2663464645aSMike Waychison /* Called from bottom half context */ 267946fa564SMichael S. Tsirkin static struct sk_buff *page_to_skb(struct virtnet_info *vi, 268946fa564SMichael S. Tsirkin struct receive_queue *rq, 2692613af0eSMichael Dalton struct page *page, unsigned int offset, 2702613af0eSMichael Dalton unsigned int len, unsigned int truesize) 2719ab86bbcSShirley Ma { 2729ab86bbcSShirley Ma struct sk_buff *skb; 273012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 2742613af0eSMichael Dalton unsigned int copy, hdr_len, hdr_padded_len; 2759ab86bbcSShirley Ma char *p; 2769ab86bbcSShirley Ma 2772613af0eSMichael Dalton p = page_address(page) + offset; 2789ab86bbcSShirley Ma 2799ab86bbcSShirley Ma /* copy small packet so we can reuse these pages for small data */ 280c67f5db8SPaolo Abeni skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 2819ab86bbcSShirley Ma if (unlikely(!skb)) 2829ab86bbcSShirley Ma return NULL; 2839ab86bbcSShirley Ma 2849ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 2859ab86bbcSShirley Ma 286012873d0SMichael S. Tsirkin hdr_len = vi->hdr_len; 287012873d0SMichael S. Tsirkin if (vi->mergeable_rx_bufs) 288012873d0SMichael S. Tsirkin hdr_padded_len = sizeof *hdr; 289012873d0SMichael S. Tsirkin else 2902613af0eSMichael Dalton hdr_padded_len = sizeof(struct padded_vnet_hdr); 2913f2c31d9SMark McLoughlin 2929ab86bbcSShirley Ma memcpy(hdr, p, hdr_len); 2933f2c31d9SMark McLoughlin 2949ab86bbcSShirley Ma len -= hdr_len; 2952613af0eSMichael Dalton offset += hdr_padded_len; 2962613af0eSMichael Dalton p += hdr_padded_len; 2973f2c31d9SMark McLoughlin 2983f2c31d9SMark McLoughlin copy = len; 2993f2c31d9SMark McLoughlin if (copy > skb_tailroom(skb)) 3003f2c31d9SMark McLoughlin copy = skb_tailroom(skb); 3013f2c31d9SMark McLoughlin memcpy(skb_put(skb, copy), p, copy); 3023f2c31d9SMark McLoughlin 3033f2c31d9SMark McLoughlin len -= copy; 3049ab86bbcSShirley Ma offset += copy; 3053f2c31d9SMark McLoughlin 3062613af0eSMichael Dalton if (vi->mergeable_rx_bufs) { 3072613af0eSMichael Dalton if (len) 3082613af0eSMichael Dalton skb_add_rx_frag(skb, 0, page, offset, len, truesize); 3092613af0eSMichael Dalton else 3102613af0eSMichael Dalton put_page(page); 3112613af0eSMichael Dalton return skb; 3122613af0eSMichael Dalton } 3132613af0eSMichael Dalton 314e878d78bSSasha Levin /* 315e878d78bSSasha Levin * Verify that we can indeed put this data into a skb. 316e878d78bSSasha Levin * This is here to handle cases when the device erroneously 317e878d78bSSasha Levin * tries to receive more than is possible. This is usually 318e878d78bSSasha Levin * the case of a broken device. 319e878d78bSSasha Levin */ 320e878d78bSSasha Levin if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 321be443899SAmerigo Wang net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 322e878d78bSSasha Levin dev_kfree_skb(skb); 323e878d78bSSasha Levin return NULL; 324e878d78bSSasha Levin } 3252613af0eSMichael Dalton BUG_ON(offset >= PAGE_SIZE); 3269ab86bbcSShirley Ma while (len) { 3272613af0eSMichael Dalton unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 3282613af0eSMichael Dalton skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 3292613af0eSMichael Dalton frag_size, truesize); 3302613af0eSMichael Dalton len -= frag_size; 3319ab86bbcSShirley Ma page = (struct page *)page->private; 3329ab86bbcSShirley Ma offset = 0; 3333f2c31d9SMark McLoughlin } 3343f2c31d9SMark McLoughlin 3359ab86bbcSShirley Ma if (page) 336e9d7417bSJason Wang give_pages(rq, page); 3373f2c31d9SMark McLoughlin 3389ab86bbcSShirley Ma return skb; 3399ab86bbcSShirley Ma } 3409ab86bbcSShirley Ma 341a67edbf4SDaniel Borkmann static bool virtnet_xdp_xmit(struct virtnet_info *vi, 34256434a01SJohn Fastabend struct receive_queue *rq, 34356434a01SJohn Fastabend struct send_queue *sq, 344bb91accfSJason Wang struct xdp_buff *xdp, 345bb91accfSJason Wang void *data) 34656434a01SJohn Fastabend { 34756434a01SJohn Fastabend struct virtio_net_hdr_mrg_rxbuf *hdr; 34856434a01SJohn Fastabend unsigned int num_sg, len; 34956434a01SJohn Fastabend void *xdp_sent; 35056434a01SJohn Fastabend int err; 35156434a01SJohn Fastabend 35256434a01SJohn Fastabend /* Free up any pending old buffers before queueing new ones. */ 35356434a01SJohn Fastabend while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { 354bb91accfSJason Wang if (vi->mergeable_rx_bufs) { 35556434a01SJohn Fastabend struct page *sent_page = virt_to_head_page(xdp_sent); 356bb91accfSJason Wang 35756434a01SJohn Fastabend put_page(sent_page); 358bb91accfSJason Wang } else { /* small buffer */ 359bb91accfSJason Wang struct sk_buff *skb = xdp_sent; 360bb91accfSJason Wang 361bb91accfSJason Wang kfree_skb(skb); 362bb91accfSJason Wang } 36356434a01SJohn Fastabend } 36456434a01SJohn Fastabend 365bb91accfSJason Wang if (vi->mergeable_rx_bufs) { 36656434a01SJohn Fastabend /* Zero header and leave csum up to XDP layers */ 36756434a01SJohn Fastabend hdr = xdp->data; 36856434a01SJohn Fastabend memset(hdr, 0, vi->hdr_len); 36956434a01SJohn Fastabend 37056434a01SJohn Fastabend num_sg = 1; 37156434a01SJohn Fastabend sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); 372bb91accfSJason Wang } else { /* small buffer */ 373bb91accfSJason Wang struct sk_buff *skb = data; 374bb91accfSJason Wang 375bb91accfSJason Wang /* Zero header and leave csum up to XDP layers */ 376bb91accfSJason Wang hdr = skb_vnet_hdr(skb); 377bb91accfSJason Wang memset(hdr, 0, vi->hdr_len); 378bb91accfSJason Wang 379bb91accfSJason Wang num_sg = 2; 380bb91accfSJason Wang sg_init_table(sq->sg, 2); 381bb91accfSJason Wang sg_set_buf(sq->sg, hdr, vi->hdr_len); 382bb91accfSJason Wang skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 383bb91accfSJason Wang } 38456434a01SJohn Fastabend err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, 385bb91accfSJason Wang data, GFP_ATOMIC); 38656434a01SJohn Fastabend if (unlikely(err)) { 387bb91accfSJason Wang if (vi->mergeable_rx_bufs) { 388bb91accfSJason Wang struct page *page = virt_to_head_page(xdp->data); 389bb91accfSJason Wang 39056434a01SJohn Fastabend put_page(page); 391bb91accfSJason Wang } else /* small buffer */ 392bb91accfSJason Wang kfree_skb(data); 393a67edbf4SDaniel Borkmann /* On error abort to avoid unnecessary kick */ 394a67edbf4SDaniel Borkmann return false; 39556434a01SJohn Fastabend } 39656434a01SJohn Fastabend 39756434a01SJohn Fastabend virtqueue_kick(sq->vq); 398a67edbf4SDaniel Borkmann return true; 39956434a01SJohn Fastabend } 40056434a01SJohn Fastabend 401bb91accfSJason Wang static struct sk_buff *receive_small(struct net_device *dev, 402bb91accfSJason Wang struct virtnet_info *vi, 403bb91accfSJason Wang struct receive_queue *rq, 404bb91accfSJason Wang void *buf, unsigned int len) 405f121159dSMichael S. Tsirkin { 406f121159dSMichael S. Tsirkin struct sk_buff * skb = buf; 407bb91accfSJason Wang struct bpf_prog *xdp_prog; 408f121159dSMichael S. Tsirkin 409012873d0SMichael S. Tsirkin len -= vi->hdr_len; 410f121159dSMichael S. Tsirkin skb_trim(skb, len); 411f121159dSMichael S. Tsirkin 412bb91accfSJason Wang rcu_read_lock(); 413bb91accfSJason Wang xdp_prog = rcu_dereference(rq->xdp_prog); 414bb91accfSJason Wang if (xdp_prog) { 415bb91accfSJason Wang struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 416*0354e4d1SJohn Fastabend struct xdp_buff xdp; 417*0354e4d1SJohn Fastabend unsigned int qp; 418bb91accfSJason Wang u32 act; 419bb91accfSJason Wang 420bb91accfSJason Wang if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) 421bb91accfSJason Wang goto err_xdp; 422*0354e4d1SJohn Fastabend 423*0354e4d1SJohn Fastabend xdp.data = skb->data; 424*0354e4d1SJohn Fastabend xdp.data_end = xdp.data + len; 425*0354e4d1SJohn Fastabend act = bpf_prog_run_xdp(xdp_prog, &xdp); 426*0354e4d1SJohn Fastabend 427bb91accfSJason Wang switch (act) { 428bb91accfSJason Wang case XDP_PASS: 429bb91accfSJason Wang break; 430bb91accfSJason Wang case XDP_TX: 431*0354e4d1SJohn Fastabend qp = vi->curr_queue_pairs - 432*0354e4d1SJohn Fastabend vi->xdp_queue_pairs + 433*0354e4d1SJohn Fastabend smp_processor_id(); 434*0354e4d1SJohn Fastabend if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], 435*0354e4d1SJohn Fastabend &xdp, skb))) 436*0354e4d1SJohn Fastabend trace_xdp_exception(vi->dev, xdp_prog, act); 437bb91accfSJason Wang rcu_read_unlock(); 438bb91accfSJason Wang goto xdp_xmit; 439bb91accfSJason Wang default: 440*0354e4d1SJohn Fastabend bpf_warn_invalid_xdp_action(act); 441*0354e4d1SJohn Fastabend case XDP_ABORTED: 442*0354e4d1SJohn Fastabend trace_xdp_exception(vi->dev, xdp_prog, act); 443*0354e4d1SJohn Fastabend case XDP_DROP: 444bb91accfSJason Wang goto err_xdp; 445bb91accfSJason Wang } 446bb91accfSJason Wang } 447bb91accfSJason Wang rcu_read_unlock(); 448bb91accfSJason Wang 449f121159dSMichael S. Tsirkin return skb; 450bb91accfSJason Wang 451bb91accfSJason Wang err_xdp: 452bb91accfSJason Wang rcu_read_unlock(); 453bb91accfSJason Wang dev->stats.rx_dropped++; 454bb91accfSJason Wang kfree_skb(skb); 455bb91accfSJason Wang xdp_xmit: 456bb91accfSJason Wang return NULL; 457f121159dSMichael S. Tsirkin } 458f121159dSMichael S. Tsirkin 459f121159dSMichael S. Tsirkin static struct sk_buff *receive_big(struct net_device *dev, 460946fa564SMichael S. Tsirkin struct virtnet_info *vi, 461f121159dSMichael S. Tsirkin struct receive_queue *rq, 462f121159dSMichael S. Tsirkin void *buf, 463f121159dSMichael S. Tsirkin unsigned int len) 464f121159dSMichael S. Tsirkin { 465f121159dSMichael S. Tsirkin struct page *page = buf; 466c47a43d3SJason Wang struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); 467f121159dSMichael S. Tsirkin 468f121159dSMichael S. Tsirkin if (unlikely(!skb)) 469f121159dSMichael S. Tsirkin goto err; 470f121159dSMichael S. Tsirkin 471f121159dSMichael S. Tsirkin return skb; 472f121159dSMichael S. Tsirkin 473f121159dSMichael S. Tsirkin err: 474f121159dSMichael S. Tsirkin dev->stats.rx_dropped++; 475f121159dSMichael S. Tsirkin give_pages(rq, page); 476f121159dSMichael S. Tsirkin return NULL; 477f121159dSMichael S. Tsirkin } 478f121159dSMichael S. Tsirkin 47972979a6cSJohn Fastabend /* The conditions to enable XDP should preclude the underlying device from 48072979a6cSJohn Fastabend * sending packets across multiple buffers (num_buf > 1). However per spec 48172979a6cSJohn Fastabend * it does not appear to be illegal to do so but rather just against convention. 48272979a6cSJohn Fastabend * So in order to avoid making a system unresponsive the packets are pushed 48372979a6cSJohn Fastabend * into a page and the XDP program is run. This will be extremely slow and we 48472979a6cSJohn Fastabend * push a warning to the user to fix this as soon as possible. Fixing this may 48572979a6cSJohn Fastabend * require resolving the underlying hardware to determine why multiple buffers 48672979a6cSJohn Fastabend * are being received or simply loading the XDP program in the ingress stack 48772979a6cSJohn Fastabend * after the skb is built because there is no advantage to running it here 48872979a6cSJohn Fastabend * anymore. 48972979a6cSJohn Fastabend */ 49072979a6cSJohn Fastabend static struct page *xdp_linearize_page(struct receive_queue *rq, 49156a86f84SJason Wang u16 *num_buf, 49272979a6cSJohn Fastabend struct page *p, 49372979a6cSJohn Fastabend int offset, 49472979a6cSJohn Fastabend unsigned int *len) 49572979a6cSJohn Fastabend { 49672979a6cSJohn Fastabend struct page *page = alloc_page(GFP_ATOMIC); 49772979a6cSJohn Fastabend unsigned int page_off = 0; 49872979a6cSJohn Fastabend 49972979a6cSJohn Fastabend if (!page) 50072979a6cSJohn Fastabend return NULL; 50172979a6cSJohn Fastabend 50272979a6cSJohn Fastabend memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 50372979a6cSJohn Fastabend page_off += *len; 50472979a6cSJohn Fastabend 50556a86f84SJason Wang while (--*num_buf) { 50672979a6cSJohn Fastabend unsigned int buflen; 50772979a6cSJohn Fastabend unsigned long ctx; 50872979a6cSJohn Fastabend void *buf; 50972979a6cSJohn Fastabend int off; 51072979a6cSJohn Fastabend 51172979a6cSJohn Fastabend ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen); 51272979a6cSJohn Fastabend if (unlikely(!ctx)) 51372979a6cSJohn Fastabend goto err_buf; 51472979a6cSJohn Fastabend 51572979a6cSJohn Fastabend buf = mergeable_ctx_to_buf_address(ctx); 51672979a6cSJohn Fastabend p = virt_to_head_page(buf); 51772979a6cSJohn Fastabend off = buf - page_address(p); 51872979a6cSJohn Fastabend 51956a86f84SJason Wang /* guard against a misconfigured or uncooperative backend that 52056a86f84SJason Wang * is sending packet larger than the MTU. 52156a86f84SJason Wang */ 52256a86f84SJason Wang if ((page_off + buflen) > PAGE_SIZE) { 52356a86f84SJason Wang put_page(p); 52456a86f84SJason Wang goto err_buf; 52556a86f84SJason Wang } 52656a86f84SJason Wang 52772979a6cSJohn Fastabend memcpy(page_address(page) + page_off, 52872979a6cSJohn Fastabend page_address(p) + off, buflen); 52972979a6cSJohn Fastabend page_off += buflen; 53056a86f84SJason Wang put_page(p); 53172979a6cSJohn Fastabend } 53272979a6cSJohn Fastabend 53372979a6cSJohn Fastabend *len = page_off; 53472979a6cSJohn Fastabend return page; 53572979a6cSJohn Fastabend err_buf: 53672979a6cSJohn Fastabend __free_pages(page, 0); 53772979a6cSJohn Fastabend return NULL; 53872979a6cSJohn Fastabend } 53972979a6cSJohn Fastabend 5408fc3b9e9SMichael S. Tsirkin static struct sk_buff *receive_mergeable(struct net_device *dev, 541fdd819b2SMichael S. Tsirkin struct virtnet_info *vi, 5428fc3b9e9SMichael S. Tsirkin struct receive_queue *rq, 543ab7db917SMichael Dalton unsigned long ctx, 5448fc3b9e9SMichael S. Tsirkin unsigned int len) 5459ab86bbcSShirley Ma { 546ab7db917SMichael Dalton void *buf = mergeable_ctx_to_buf_address(ctx); 547012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 548012873d0SMichael S. Tsirkin u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 5498fc3b9e9SMichael S. Tsirkin struct page *page = virt_to_head_page(buf); 5508fc3b9e9SMichael S. Tsirkin int offset = buf - page_address(page); 551f600b690SJohn Fastabend struct sk_buff *head_skb, *curr_skb; 552f600b690SJohn Fastabend struct bpf_prog *xdp_prog; 553f600b690SJohn Fastabend unsigned int truesize; 554ab7db917SMichael Dalton 55556434a01SJohn Fastabend head_skb = NULL; 55656434a01SJohn Fastabend 557f600b690SJohn Fastabend rcu_read_lock(); 558f600b690SJohn Fastabend xdp_prog = rcu_dereference(rq->xdp_prog); 559f600b690SJohn Fastabend if (xdp_prog) { 56072979a6cSJohn Fastabend struct page *xdp_page; 561*0354e4d1SJohn Fastabend struct xdp_buff xdp; 562*0354e4d1SJohn Fastabend unsigned int qp; 563*0354e4d1SJohn Fastabend void *data; 564f600b690SJohn Fastabend u32 act; 565f600b690SJohn Fastabend 56673b62bd0SJason Wang /* This happens when rx buffer size is underestimated */ 567f600b690SJohn Fastabend if (unlikely(num_buf > 1)) { 56872979a6cSJohn Fastabend /* linearize data for XDP */ 56956a86f84SJason Wang xdp_page = xdp_linearize_page(rq, &num_buf, 57072979a6cSJohn Fastabend page, offset, &len); 57172979a6cSJohn Fastabend if (!xdp_page) 572f600b690SJohn Fastabend goto err_xdp; 57372979a6cSJohn Fastabend offset = 0; 57472979a6cSJohn Fastabend } else { 57572979a6cSJohn Fastabend xdp_page = page; 576f600b690SJohn Fastabend } 577f600b690SJohn Fastabend 578f600b690SJohn Fastabend /* Transient failure which in theory could occur if 579f600b690SJohn Fastabend * in-flight packets from before XDP was enabled reach 580f600b690SJohn Fastabend * the receive path after XDP is loaded. In practice I 581f600b690SJohn Fastabend * was not able to create this condition. 582f600b690SJohn Fastabend */ 583b00f70b0SJason Wang if (unlikely(hdr->hdr.gso_type)) 584f600b690SJohn Fastabend goto err_xdp; 585f600b690SJohn Fastabend 586*0354e4d1SJohn Fastabend data = page_address(xdp_page) + offset; 587*0354e4d1SJohn Fastabend xdp.data = data + vi->hdr_len; 588*0354e4d1SJohn Fastabend xdp.data_end = xdp.data + (len - vi->hdr_len); 589*0354e4d1SJohn Fastabend act = bpf_prog_run_xdp(xdp_prog, &xdp); 590*0354e4d1SJohn Fastabend 59156434a01SJohn Fastabend switch (act) { 59256434a01SJohn Fastabend case XDP_PASS: 5931830f893SJason Wang /* We can only create skb based on xdp_page. */ 5941830f893SJason Wang if (unlikely(xdp_page != page)) { 5951830f893SJason Wang rcu_read_unlock(); 5961830f893SJason Wang put_page(page); 5971830f893SJason Wang head_skb = page_to_skb(vi, rq, xdp_page, 5981830f893SJason Wang 0, len, PAGE_SIZE); 5995c33474dSJason Wang ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); 6001830f893SJason Wang return head_skb; 6011830f893SJason Wang } 60256434a01SJohn Fastabend break; 60356434a01SJohn Fastabend case XDP_TX: 604*0354e4d1SJohn Fastabend qp = vi->curr_queue_pairs - 605*0354e4d1SJohn Fastabend vi->xdp_queue_pairs + 606*0354e4d1SJohn Fastabend smp_processor_id(); 607*0354e4d1SJohn Fastabend if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], 608*0354e4d1SJohn Fastabend &xdp, data))) 609*0354e4d1SJohn Fastabend trace_xdp_exception(vi->dev, xdp_prog, act); 6105c33474dSJason Wang ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); 61172979a6cSJohn Fastabend if (unlikely(xdp_page != page)) 61272979a6cSJohn Fastabend goto err_xdp; 61356434a01SJohn Fastabend rcu_read_unlock(); 61456434a01SJohn Fastabend goto xdp_xmit; 61556434a01SJohn Fastabend default: 616*0354e4d1SJohn Fastabend bpf_warn_invalid_xdp_action(act); 617*0354e4d1SJohn Fastabend case XDP_ABORTED: 618*0354e4d1SJohn Fastabend trace_xdp_exception(vi->dev, xdp_prog, act); 619*0354e4d1SJohn Fastabend case XDP_DROP: 62072979a6cSJohn Fastabend if (unlikely(xdp_page != page)) 62172979a6cSJohn Fastabend __free_pages(xdp_page, 0); 6225c33474dSJason Wang ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); 623f600b690SJohn Fastabend goto err_xdp; 624f600b690SJohn Fastabend } 62556434a01SJohn Fastabend } 626f600b690SJohn Fastabend rcu_read_unlock(); 627f600b690SJohn Fastabend 628f600b690SJohn Fastabend truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); 629f600b690SJohn Fastabend head_skb = page_to_skb(vi, rq, page, offset, len, truesize); 630f600b690SJohn Fastabend curr_skb = head_skb; 6319ab86bbcSShirley Ma 6328fc3b9e9SMichael S. Tsirkin if (unlikely(!curr_skb)) 6338fc3b9e9SMichael S. Tsirkin goto err_skb; 6349ab86bbcSShirley Ma while (--num_buf) { 6358fc3b9e9SMichael S. Tsirkin int num_skb_frags; 6368fc3b9e9SMichael S. Tsirkin 637ab7db917SMichael Dalton ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); 638ab7db917SMichael Dalton if (unlikely(!ctx)) { 6398fc3b9e9SMichael S. Tsirkin pr_debug("%s: rx error: %d buffers out of %d missing\n", 640fdd819b2SMichael S. Tsirkin dev->name, num_buf, 641012873d0SMichael S. Tsirkin virtio16_to_cpu(vi->vdev, 642012873d0SMichael S. Tsirkin hdr->num_buffers)); 6438fc3b9e9SMichael S. Tsirkin dev->stats.rx_length_errors++; 6448fc3b9e9SMichael S. Tsirkin goto err_buf; 6453f2c31d9SMark McLoughlin } 6468fc3b9e9SMichael S. Tsirkin 647ab7db917SMichael Dalton buf = mergeable_ctx_to_buf_address(ctx); 6488fc3b9e9SMichael S. Tsirkin page = virt_to_head_page(buf); 6498fc3b9e9SMichael S. Tsirkin 6508fc3b9e9SMichael S. Tsirkin num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 6512613af0eSMichael Dalton if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 6522613af0eSMichael Dalton struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 6538fc3b9e9SMichael S. Tsirkin 6548fc3b9e9SMichael S. Tsirkin if (unlikely(!nskb)) 6558fc3b9e9SMichael S. Tsirkin goto err_skb; 6562613af0eSMichael Dalton if (curr_skb == head_skb) 6572613af0eSMichael Dalton skb_shinfo(curr_skb)->frag_list = nskb; 6582613af0eSMichael Dalton else 6592613af0eSMichael Dalton curr_skb->next = nskb; 6602613af0eSMichael Dalton curr_skb = nskb; 6612613af0eSMichael Dalton head_skb->truesize += nskb->truesize; 6622613af0eSMichael Dalton num_skb_frags = 0; 6632613af0eSMichael Dalton } 664ab7db917SMichael Dalton truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); 6652613af0eSMichael Dalton if (curr_skb != head_skb) { 6662613af0eSMichael Dalton head_skb->data_len += len; 6672613af0eSMichael Dalton head_skb->len += len; 668fb51879dSMichael Dalton head_skb->truesize += truesize; 6692613af0eSMichael Dalton } 6708fc3b9e9SMichael S. Tsirkin offset = buf - page_address(page); 671ba275241SJason Wang if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 672ba275241SJason Wang put_page(page); 673ba275241SJason Wang skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 674fb51879dSMichael Dalton len, truesize); 675ba275241SJason Wang } else { 6762613af0eSMichael Dalton skb_add_rx_frag(curr_skb, num_skb_frags, page, 677fb51879dSMichael Dalton offset, len, truesize); 678ba275241SJason Wang } 6798fc3b9e9SMichael S. Tsirkin } 6808fc3b9e9SMichael S. Tsirkin 6815377d758SJohannes Berg ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 6828fc3b9e9SMichael S. Tsirkin return head_skb; 6838fc3b9e9SMichael S. Tsirkin 684f600b690SJohn Fastabend err_xdp: 685f600b690SJohn Fastabend rcu_read_unlock(); 6868fc3b9e9SMichael S. Tsirkin err_skb: 6878fc3b9e9SMichael S. Tsirkin put_page(page); 6888fc3b9e9SMichael S. Tsirkin while (--num_buf) { 689ab7db917SMichael Dalton ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); 690ab7db917SMichael Dalton if (unlikely(!ctx)) { 6918fc3b9e9SMichael S. Tsirkin pr_debug("%s: rx error: %d buffers missing\n", 6928fc3b9e9SMichael S. Tsirkin dev->name, num_buf); 6938fc3b9e9SMichael S. Tsirkin dev->stats.rx_length_errors++; 6948fc3b9e9SMichael S. Tsirkin break; 6958fc3b9e9SMichael S. Tsirkin } 696ab7db917SMichael Dalton page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); 6978fc3b9e9SMichael S. Tsirkin put_page(page); 6983f2c31d9SMark McLoughlin } 6998fc3b9e9SMichael S. Tsirkin err_buf: 7008fc3b9e9SMichael S. Tsirkin dev->stats.rx_dropped++; 7018fc3b9e9SMichael S. Tsirkin dev_kfree_skb(head_skb); 70256434a01SJohn Fastabend xdp_xmit: 7038fc3b9e9SMichael S. Tsirkin return NULL; 7049ab86bbcSShirley Ma } 7059ab86bbcSShirley Ma 706946fa564SMichael S. Tsirkin static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 707946fa564SMichael S. Tsirkin void *buf, unsigned int len) 7089ab86bbcSShirley Ma { 709e9d7417bSJason Wang struct net_device *dev = vi->dev; 71058472a76SEric Dumazet struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 7119ab86bbcSShirley Ma struct sk_buff *skb; 712012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 7139ab86bbcSShirley Ma 714bcff3162SMichael S. Tsirkin if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 7159ab86bbcSShirley Ma pr_debug("%s: short packet %i\n", dev->name, len); 7169ab86bbcSShirley Ma dev->stats.rx_length_errors++; 717ab7db917SMichael Dalton if (vi->mergeable_rx_bufs) { 718ab7db917SMichael Dalton unsigned long ctx = (unsigned long)buf; 719ab7db917SMichael Dalton void *base = mergeable_ctx_to_buf_address(ctx); 720ab7db917SMichael Dalton put_page(virt_to_head_page(base)); 721ab7db917SMichael Dalton } else if (vi->big_packets) { 72298bfd23cSMichael Dalton give_pages(rq, buf); 723ab7db917SMichael Dalton } else { 7249ab86bbcSShirley Ma dev_kfree_skb(buf); 725ab7db917SMichael Dalton } 7269ab86bbcSShirley Ma return; 7279ab86bbcSShirley Ma } 7289ab86bbcSShirley Ma 729f121159dSMichael S. Tsirkin if (vi->mergeable_rx_bufs) 730fdd819b2SMichael S. Tsirkin skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); 731f121159dSMichael S. Tsirkin else if (vi->big_packets) 732946fa564SMichael S. Tsirkin skb = receive_big(dev, vi, rq, buf, len); 733f121159dSMichael S. Tsirkin else 734bb91accfSJason Wang skb = receive_small(dev, vi, rq, buf, len); 735f121159dSMichael S. Tsirkin 7368fc3b9e9SMichael S. Tsirkin if (unlikely(!skb)) 7372613af0eSMichael Dalton return; 7383f2c31d9SMark McLoughlin 7399ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 7403fa2a1dfSstephen hemminger 74183a27052SEric Dumazet u64_stats_update_begin(&stats->rx_syncp); 7423fa2a1dfSstephen hemminger stats->rx_bytes += skb->len; 7433fa2a1dfSstephen hemminger stats->rx_packets++; 74483a27052SEric Dumazet u64_stats_update_end(&stats->rx_syncp); 745296f96fcSRusty Russell 746e858fae2SMike Rapoport if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 74710a8d94aSJason Wang skb->ip_summed = CHECKSUM_UNNECESSARY; 748296f96fcSRusty Russell 749e858fae2SMike Rapoport if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 750e858fae2SMike Rapoport virtio_is_little_endian(vi->vdev))) { 751e858fae2SMike Rapoport net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 752e858fae2SMike Rapoport dev->name, hdr->hdr.gso_type, 753fdd819b2SMichael S. Tsirkin hdr->hdr.gso_size); 754296f96fcSRusty Russell goto frame_err; 755296f96fcSRusty Russell } 756296f96fcSRusty Russell 757d1dc06dcSMike Rapoport skb->protocol = eth_type_trans(skb, dev); 758d1dc06dcSMike Rapoport pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 759d1dc06dcSMike Rapoport ntohs(skb->protocol), skb->len, skb->pkt_type); 760d1dc06dcSMike Rapoport 7610fbd050aSEric Dumazet napi_gro_receive(&rq->napi, skb); 762296f96fcSRusty Russell return; 763296f96fcSRusty Russell 764296f96fcSRusty Russell frame_err: 765296f96fcSRusty Russell dev->stats.rx_frame_errors++; 766296f96fcSRusty Russell dev_kfree_skb(skb); 767296f96fcSRusty Russell } 768296f96fcSRusty Russell 769946fa564SMichael S. Tsirkin static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 770946fa564SMichael S. Tsirkin gfp_t gfp) 771296f96fcSRusty Russell { 772296f96fcSRusty Russell struct sk_buff *skb; 773012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 7749ab86bbcSShirley Ma int err; 7753f2c31d9SMark McLoughlin 7765061de36SMichael Dalton skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); 7779ab86bbcSShirley Ma if (unlikely(!skb)) 7789ab86bbcSShirley Ma return -ENOMEM; 779296f96fcSRusty Russell 7805061de36SMichael Dalton skb_put(skb, GOOD_PACKET_LEN); 7813f2c31d9SMark McLoughlin 7823f2c31d9SMark McLoughlin hdr = skb_vnet_hdr(skb); 783547c890cSJason Wang sg_init_table(rq->sg, 2); 784012873d0SMichael S. Tsirkin sg_set_buf(rq->sg, hdr, vi->hdr_len); 785e9d7417bSJason Wang skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); 78697402b96SHerbert Xu 7879dc7b9e4SRusty Russell err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); 7889ab86bbcSShirley Ma if (err < 0) 7899ab86bbcSShirley Ma dev_kfree_skb(skb); 79097402b96SHerbert Xu 7919ab86bbcSShirley Ma return err; 79297402b96SHerbert Xu } 79397402b96SHerbert Xu 794012873d0SMichael S. Tsirkin static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 795012873d0SMichael S. Tsirkin gfp_t gfp) 7969ab86bbcSShirley Ma { 7979ab86bbcSShirley Ma struct page *first, *list = NULL; 7989ab86bbcSShirley Ma char *p; 7999ab86bbcSShirley Ma int i, err, offset; 800296f96fcSRusty Russell 801a5835440SRusty Russell sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); 802a5835440SRusty Russell 803e9d7417bSJason Wang /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ 8049ab86bbcSShirley Ma for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 805e9d7417bSJason Wang first = get_a_page(rq, gfp); 8069ab86bbcSShirley Ma if (!first) { 8079ab86bbcSShirley Ma if (list) 808e9d7417bSJason Wang give_pages(rq, list); 8099ab86bbcSShirley Ma return -ENOMEM; 810296f96fcSRusty Russell } 811e9d7417bSJason Wang sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 8129ab86bbcSShirley Ma 8139ab86bbcSShirley Ma /* chain new page in list head to match sg */ 8149ab86bbcSShirley Ma first->private = (unsigned long)list; 8159ab86bbcSShirley Ma list = first; 8169ab86bbcSShirley Ma } 8179ab86bbcSShirley Ma 818e9d7417bSJason Wang first = get_a_page(rq, gfp); 8199ab86bbcSShirley Ma if (!first) { 820e9d7417bSJason Wang give_pages(rq, list); 8219ab86bbcSShirley Ma return -ENOMEM; 8229ab86bbcSShirley Ma } 8239ab86bbcSShirley Ma p = page_address(first); 8249ab86bbcSShirley Ma 825e9d7417bSJason Wang /* rq->sg[0], rq->sg[1] share the same page */ 826012873d0SMichael S. Tsirkin /* a separated rq->sg[0] for header - required in case !any_header_sg */ 827012873d0SMichael S. Tsirkin sg_set_buf(&rq->sg[0], p, vi->hdr_len); 8289ab86bbcSShirley Ma 829e9d7417bSJason Wang /* rq->sg[1] for data packet, from offset */ 8309ab86bbcSShirley Ma offset = sizeof(struct padded_vnet_hdr); 831e9d7417bSJason Wang sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 8329ab86bbcSShirley Ma 8339ab86bbcSShirley Ma /* chain first in list head */ 8349ab86bbcSShirley Ma first->private = (unsigned long)list; 8359dc7b9e4SRusty Russell err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, 836aa989f5eSMichael S. Tsirkin first, gfp); 8379ab86bbcSShirley Ma if (err < 0) 838e9d7417bSJason Wang give_pages(rq, first); 8399ab86bbcSShirley Ma 8409ab86bbcSShirley Ma return err; 8419ab86bbcSShirley Ma } 8429ab86bbcSShirley Ma 8435377d758SJohannes Berg static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) 8449ab86bbcSShirley Ma { 845ab7db917SMichael Dalton const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 846fbf28d78SMichael Dalton unsigned int len; 847fbf28d78SMichael Dalton 8485377d758SJohannes Berg len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 849fbf28d78SMichael Dalton GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); 850fbf28d78SMichael Dalton return ALIGN(len, MERGEABLE_BUFFER_ALIGN); 851fbf28d78SMichael Dalton } 852fbf28d78SMichael Dalton 853fbf28d78SMichael Dalton static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) 854fbf28d78SMichael Dalton { 855fb51879dSMichael Dalton struct page_frag *alloc_frag = &rq->alloc_frag; 856fb51879dSMichael Dalton char *buf; 857ab7db917SMichael Dalton unsigned long ctx; 8589ab86bbcSShirley Ma int err; 859fb51879dSMichael Dalton unsigned int len, hole; 8609ab86bbcSShirley Ma 861fbf28d78SMichael Dalton len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); 862ab7db917SMichael Dalton if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) 8639ab86bbcSShirley Ma return -ENOMEM; 864ab7db917SMichael Dalton 865fb51879dSMichael Dalton buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 866ab7db917SMichael Dalton ctx = mergeable_buf_to_ctx(buf, len); 867fb51879dSMichael Dalton get_page(alloc_frag->page); 868fb51879dSMichael Dalton alloc_frag->offset += len; 869fb51879dSMichael Dalton hole = alloc_frag->size - alloc_frag->offset; 870ab7db917SMichael Dalton if (hole < len) { 871ab7db917SMichael Dalton /* To avoid internal fragmentation, if there is very likely not 872ab7db917SMichael Dalton * enough space for another buffer, add the remaining space to 873ab7db917SMichael Dalton * the current buffer. This extra space is not included in 874ab7db917SMichael Dalton * the truesize stored in ctx. 875ab7db917SMichael Dalton */ 876fb51879dSMichael Dalton len += hole; 877fb51879dSMichael Dalton alloc_frag->offset += hole; 878fb51879dSMichael Dalton } 8799ab86bbcSShirley Ma 880fb51879dSMichael Dalton sg_init_one(rq->sg, buf, len); 881ab7db917SMichael Dalton err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); 8829ab86bbcSShirley Ma if (err < 0) 8832613af0eSMichael Dalton put_page(virt_to_head_page(buf)); 8849ab86bbcSShirley Ma 8859ab86bbcSShirley Ma return err; 886296f96fcSRusty Russell } 887296f96fcSRusty Russell 888b2baed69SRusty Russell /* 889b2baed69SRusty Russell * Returns false if we couldn't fill entirely (OOM). 890b2baed69SRusty Russell * 891b2baed69SRusty Russell * Normally run in the receive path, but can also be run from ndo_open 892b2baed69SRusty Russell * before we're receiving packets, or from refill_work which is 893b2baed69SRusty Russell * careful to disable receiving (using napi_disable). 894b2baed69SRusty Russell */ 895946fa564SMichael S. Tsirkin static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 896946fa564SMichael S. Tsirkin gfp_t gfp) 8973f2c31d9SMark McLoughlin { 8983f2c31d9SMark McLoughlin int err; 8991788f495SMichael S. Tsirkin bool oom; 9003f2c31d9SMark McLoughlin 901fb51879dSMichael Dalton gfp |= __GFP_COLD; 9020aea51c3SAmit Shah do { 9039ab86bbcSShirley Ma if (vi->mergeable_rx_bufs) 904e9d7417bSJason Wang err = add_recvbuf_mergeable(rq, gfp); 9059ab86bbcSShirley Ma else if (vi->big_packets) 906012873d0SMichael S. Tsirkin err = add_recvbuf_big(vi, rq, gfp); 9079ab86bbcSShirley Ma else 908946fa564SMichael S. Tsirkin err = add_recvbuf_small(vi, rq, gfp); 9093f2c31d9SMark McLoughlin 9101788f495SMichael S. Tsirkin oom = err == -ENOMEM; 9119ed4cb07SRusty Russell if (err) 9123f2c31d9SMark McLoughlin break; 913b7dfde95SLinus Torvalds } while (rq->vq->num_free); 914681daee2SJason Wang virtqueue_kick(rq->vq); 9153161e453SRusty Russell return !oom; 9163f2c31d9SMark McLoughlin } 9173f2c31d9SMark McLoughlin 91818445c4dSRusty Russell static void skb_recv_done(struct virtqueue *rvq) 919296f96fcSRusty Russell { 920296f96fcSRusty Russell struct virtnet_info *vi = rvq->vdev->priv; 921986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 922e9d7417bSJason Wang 92318445c4dSRusty Russell /* Schedule NAPI, Suppress further interrupts if successful. */ 924e9d7417bSJason Wang if (napi_schedule_prep(&rq->napi)) { 9251915a712SMichael S. Tsirkin virtqueue_disable_cb(rvq); 926e9d7417bSJason Wang __napi_schedule(&rq->napi); 92718445c4dSRusty Russell } 928296f96fcSRusty Russell } 929296f96fcSRusty Russell 930e9d7417bSJason Wang static void virtnet_napi_enable(struct receive_queue *rq) 9313e9d08ecSBruce Rogers { 932e9d7417bSJason Wang napi_enable(&rq->napi); 9333e9d08ecSBruce Rogers 9343e9d08ecSBruce Rogers /* If all buffers were filled by other side before we napi_enabled, we 9353e9d08ecSBruce Rogers * won't get another interrupt, so process any outstanding packets 9363e9d08ecSBruce Rogers * now. virtnet_poll wants re-enable the queue, so we disable here. 9373e9d08ecSBruce Rogers * We synchronize against interrupts via NAPI_STATE_SCHED */ 938e9d7417bSJason Wang if (napi_schedule_prep(&rq->napi)) { 939e9d7417bSJason Wang virtqueue_disable_cb(rq->vq); 940ec13ee80SMichael S. Tsirkin local_bh_disable(); 941e9d7417bSJason Wang __napi_schedule(&rq->napi); 942ec13ee80SMichael S. Tsirkin local_bh_enable(); 9433e9d08ecSBruce Rogers } 9443e9d08ecSBruce Rogers } 9453e9d08ecSBruce Rogers 9463161e453SRusty Russell static void refill_work(struct work_struct *work) 9473161e453SRusty Russell { 948e9d7417bSJason Wang struct virtnet_info *vi = 949e9d7417bSJason Wang container_of(work, struct virtnet_info, refill.work); 9503161e453SRusty Russell bool still_empty; 951986a4f4dSJason Wang int i; 9523161e453SRusty Russell 95355257d72SSasha Levin for (i = 0; i < vi->curr_queue_pairs; i++) { 954986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[i]; 955986a4f4dSJason Wang 956986a4f4dSJason Wang napi_disable(&rq->napi); 957946fa564SMichael S. Tsirkin still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 958986a4f4dSJason Wang virtnet_napi_enable(rq); 9593161e453SRusty Russell 9603161e453SRusty Russell /* In theory, this can happen: if we don't get any buffers in 961986a4f4dSJason Wang * we will *never* try to fill again. 962986a4f4dSJason Wang */ 9633161e453SRusty Russell if (still_empty) 9643b07e9caSTejun Heo schedule_delayed_work(&vi->refill, HZ/2); 9653161e453SRusty Russell } 966986a4f4dSJason Wang } 9673161e453SRusty Russell 9682ffa7598SJason Wang static int virtnet_receive(struct receive_queue *rq, int budget) 969296f96fcSRusty Russell { 970e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 9712ffa7598SJason Wang unsigned int len, received = 0; 9729ab86bbcSShirley Ma void *buf; 973296f96fcSRusty Russell 974296f96fcSRusty Russell while (received < budget && 975e9d7417bSJason Wang (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 976946fa564SMichael S. Tsirkin receive_buf(vi, rq, buf, len); 977296f96fcSRusty Russell received++; 978296f96fcSRusty Russell } 979296f96fcSRusty Russell 980be121f46SJason Wang if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { 981946fa564SMichael S. Tsirkin if (!try_fill_recv(vi, rq, GFP_ATOMIC)) 9823b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 9833161e453SRusty Russell } 984296f96fcSRusty Russell 9852ffa7598SJason Wang return received; 9862ffa7598SJason Wang } 9872ffa7598SJason Wang 9882ffa7598SJason Wang static int virtnet_poll(struct napi_struct *napi, int budget) 9892ffa7598SJason Wang { 9902ffa7598SJason Wang struct receive_queue *rq = 9912ffa7598SJason Wang container_of(napi, struct receive_queue, napi); 992faadb05fSLi RongQing unsigned int r, received; 9932ffa7598SJason Wang 994faadb05fSLi RongQing received = virtnet_receive(rq, budget); 9952ffa7598SJason Wang 9968329d98eSRusty Russell /* Out of packets? */ 9978329d98eSRusty Russell if (received < budget) { 998cbdadbbfSMichael S. Tsirkin r = virtqueue_enable_cb_prepare(rq->vq); 9994d6308aaSEric Dumazet if (napi_complete_done(napi, received)) { 1000cbdadbbfSMichael S. Tsirkin if (unlikely(virtqueue_poll(rq->vq, r)) && 10018e95a202SJoe Perches napi_schedule_prep(napi)) { 1002e9d7417bSJason Wang virtqueue_disable_cb(rq->vq); 1003288379f0SBen Hutchings __napi_schedule(napi); 1004296f96fcSRusty Russell } 10054265f161SChristian Borntraeger } 10064d6308aaSEric Dumazet } 1007296f96fcSRusty Russell 1008296f96fcSRusty Russell return received; 1009296f96fcSRusty Russell } 1010296f96fcSRusty Russell 1011986a4f4dSJason Wang static int virtnet_open(struct net_device *dev) 1012986a4f4dSJason Wang { 1013986a4f4dSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1014986a4f4dSJason Wang int i; 1015986a4f4dSJason Wang 1016e4166625SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1017e4166625SJason Wang if (i < vi->curr_queue_pairs) 1018986a4f4dSJason Wang /* Make sure we have some buffers: if oom use wq. */ 1019946fa564SMichael S. Tsirkin if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 1020986a4f4dSJason Wang schedule_delayed_work(&vi->refill, 0); 1021986a4f4dSJason Wang virtnet_napi_enable(&vi->rq[i]); 1022986a4f4dSJason Wang } 1023986a4f4dSJason Wang 1024986a4f4dSJason Wang return 0; 1025986a4f4dSJason Wang } 1026986a4f4dSJason Wang 1027b7dfde95SLinus Torvalds static void free_old_xmit_skbs(struct send_queue *sq) 1028296f96fcSRusty Russell { 1029296f96fcSRusty Russell struct sk_buff *skb; 10306ee57bccSMichael S. Tsirkin unsigned int len; 1031e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv; 103258472a76SEric Dumazet struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 1033296f96fcSRusty Russell 1034e9d7417bSJason Wang while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 1035296f96fcSRusty Russell pr_debug("Sent skb %p\n", skb); 10363fa2a1dfSstephen hemminger 103783a27052SEric Dumazet u64_stats_update_begin(&stats->tx_syncp); 10383fa2a1dfSstephen hemminger stats->tx_bytes += skb->len; 10393fa2a1dfSstephen hemminger stats->tx_packets++; 104083a27052SEric Dumazet u64_stats_update_end(&stats->tx_syncp); 10413fa2a1dfSstephen hemminger 1042ed79bab8SEric Dumazet dev_kfree_skb_any(skb); 1043296f96fcSRusty Russell } 1044296f96fcSRusty Russell } 1045296f96fcSRusty Russell 1046e9d7417bSJason Wang static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 1047296f96fcSRusty Russell { 1048012873d0SMichael S. Tsirkin struct virtio_net_hdr_mrg_rxbuf *hdr; 1049296f96fcSRusty Russell const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 1050e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv; 10517bedc7dcSMichael S. Tsirkin unsigned num_sg; 1052012873d0SMichael S. Tsirkin unsigned hdr_len = vi->hdr_len; 1053e7428e95SMichael S. Tsirkin bool can_push; 1054296f96fcSRusty Russell 1055e174961cSJohannes Berg pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 1056e7428e95SMichael S. Tsirkin 1057e7428e95SMichael S. Tsirkin can_push = vi->any_header_sg && 1058e7428e95SMichael S. Tsirkin !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 1059e7428e95SMichael S. Tsirkin !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 1060e7428e95SMichael S. Tsirkin /* Even if we can, don't push here yet as this would skew 1061e7428e95SMichael S. Tsirkin * csum_start offset below. */ 1062e7428e95SMichael S. Tsirkin if (can_push) 1063012873d0SMichael S. Tsirkin hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 1064e7428e95SMichael S. Tsirkin else 1065e7428e95SMichael S. Tsirkin hdr = skb_vnet_hdr(skb); 1066296f96fcSRusty Russell 1067e858fae2SMike Rapoport if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 10686391a448SJason Wang virtio_is_little_endian(vi->vdev), false)) 1069296f96fcSRusty Russell BUG(); 1070296f96fcSRusty Russell 1071e7428e95SMichael S. Tsirkin if (vi->mergeable_rx_bufs) 1072012873d0SMichael S. Tsirkin hdr->num_buffers = 0; 10733f2c31d9SMark McLoughlin 1074547c890cSJason Wang sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 1075e7428e95SMichael S. Tsirkin if (can_push) { 1076e7428e95SMichael S. Tsirkin __skb_push(skb, hdr_len); 1077e7428e95SMichael S. Tsirkin num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 1078e7428e95SMichael S. Tsirkin /* Pull header back to avoid skew in tx bytes calculations. */ 1079e7428e95SMichael S. Tsirkin __skb_pull(skb, hdr_len); 1080e7428e95SMichael S. Tsirkin } else { 1081e7428e95SMichael S. Tsirkin sg_set_buf(sq->sg, hdr, hdr_len); 1082b7dfde95SLinus Torvalds num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; 1083e7428e95SMichael S. Tsirkin } 10849dc7b9e4SRusty Russell return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 108511a3a154SRusty Russell } 108611a3a154SRusty Russell 1087424efe9cSStephen Hemminger static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 108899ffc696SRusty Russell { 108999ffc696SRusty Russell struct virtnet_info *vi = netdev_priv(dev); 1090986a4f4dSJason Wang int qnum = skb_get_queue_mapping(skb); 1091986a4f4dSJason Wang struct send_queue *sq = &vi->sq[qnum]; 10929ed4cb07SRusty Russell int err; 10934b7fd2e6SMichael S. Tsirkin struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 10944b7fd2e6SMichael S. Tsirkin bool kick = !skb->xmit_more; 10952cb9c6baSRusty Russell 10962cb9c6baSRusty Russell /* Free up any pending old buffers before queueing new ones. */ 1097e9d7417bSJason Wang free_old_xmit_skbs(sq); 109899ffc696SRusty Russell 1099074c3582SJacob Keller /* timestamp packet in software */ 1100074c3582SJacob Keller skb_tx_timestamp(skb); 1101074c3582SJacob Keller 110203f191baSMichael S. Tsirkin /* Try to transmit */ 1103b7dfde95SLinus Torvalds err = xmit_skb(sq, skb); 110499ffc696SRusty Russell 11059ed4cb07SRusty Russell /* This should not happen! */ 1106681daee2SJason Wang if (unlikely(err)) { 110758eba97dSRusty Russell dev->stats.tx_fifo_errors++; 11082e57b79cSRick Jones if (net_ratelimit()) 110958eba97dSRusty Russell dev_warn(&dev->dev, 1110b7dfde95SLinus Torvalds "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); 111158eba97dSRusty Russell dev->stats.tx_dropped++; 111285e94525SEric W. Biederman dev_kfree_skb_any(skb); 111358eba97dSRusty Russell return NETDEV_TX_OK; 1114296f96fcSRusty Russell } 111503f191baSMichael S. Tsirkin 111648925e37SRusty Russell /* Don't wait up for transmitted skbs to be freed. */ 111748925e37SRusty Russell skb_orphan(skb); 111848925e37SRusty Russell nf_reset(skb); 111948925e37SRusty Russell 112060302ff6SMichael S. Tsirkin /* If running out of space, stop queue to avoid getting packets that we 112160302ff6SMichael S. Tsirkin * are then unable to transmit. 112260302ff6SMichael S. Tsirkin * An alternative would be to force queuing layer to requeue the skb by 112360302ff6SMichael S. Tsirkin * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 112460302ff6SMichael S. Tsirkin * returned in a normal path of operation: it means that driver is not 112560302ff6SMichael S. Tsirkin * maintaining the TX queue stop/start state properly, and causes 112660302ff6SMichael S. Tsirkin * the stack to do a non-trivial amount of useless work. 112760302ff6SMichael S. Tsirkin * Since most packets only take 1 or 2 ring slots, stopping the queue 112860302ff6SMichael S. Tsirkin * early means 16 slots are typically wasted. 1129d631b94eSstephen hemminger */ 1130b7dfde95SLinus Torvalds if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 1131986a4f4dSJason Wang netif_stop_subqueue(dev, qnum); 1132e9d7417bSJason Wang if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 113348925e37SRusty Russell /* More just got used, free them then recheck. */ 1134b7dfde95SLinus Torvalds free_old_xmit_skbs(sq); 1135b7dfde95SLinus Torvalds if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1136986a4f4dSJason Wang netif_start_subqueue(dev, qnum); 1137e9d7417bSJason Wang virtqueue_disable_cb(sq->vq); 113848925e37SRusty Russell } 113948925e37SRusty Russell } 114048925e37SRusty Russell } 114148925e37SRusty Russell 11424b7fd2e6SMichael S. Tsirkin if (kick || netif_xmit_stopped(txq)) 1143c223a078SDavid S. Miller virtqueue_kick(sq->vq); 11440b725a2cSDavid S. Miller 11450b725a2cSDavid S. Miller return NETDEV_TX_OK; 1146c223a078SDavid S. Miller } 1147c223a078SDavid S. Miller 114840cbfc37SAmos Kong /* 114940cbfc37SAmos Kong * Send command via the control virtqueue and check status. Commands 115040cbfc37SAmos Kong * supported by the hypervisor, as indicated by feature bits, should 1151788a8b6dSstephen hemminger * never fail unless improperly formatted. 115240cbfc37SAmos Kong */ 115340cbfc37SAmos Kong static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 1154d24bae32Sstephen hemminger struct scatterlist *out) 115540cbfc37SAmos Kong { 1156f7bc9594SRusty Russell struct scatterlist *sgs[4], hdr, stat; 1157d24bae32Sstephen hemminger unsigned out_num = 0, tmp; 115840cbfc37SAmos Kong 115940cbfc37SAmos Kong /* Caller should know better */ 1160f7bc9594SRusty Russell BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 116140cbfc37SAmos Kong 11622ac46030SMichael S. Tsirkin vi->ctrl_status = ~0; 11632ac46030SMichael S. Tsirkin vi->ctrl_hdr.class = class; 11642ac46030SMichael S. Tsirkin vi->ctrl_hdr.cmd = cmd; 1165f7bc9594SRusty Russell /* Add header */ 11662ac46030SMichael S. Tsirkin sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); 1167f7bc9594SRusty Russell sgs[out_num++] = &hdr; 116840cbfc37SAmos Kong 1169f7bc9594SRusty Russell if (out) 1170f7bc9594SRusty Russell sgs[out_num++] = out; 117140cbfc37SAmos Kong 1172f7bc9594SRusty Russell /* Add return status. */ 11732ac46030SMichael S. Tsirkin sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); 1174d24bae32Sstephen hemminger sgs[out_num] = &stat; 117540cbfc37SAmos Kong 1176d24bae32Sstephen hemminger BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1177a7c58146SRusty Russell virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 117840cbfc37SAmos Kong 117967975901SHeinz Graalfs if (unlikely(!virtqueue_kick(vi->cvq))) 11802ac46030SMichael S. Tsirkin return vi->ctrl_status == VIRTIO_NET_OK; 118140cbfc37SAmos Kong 118240cbfc37SAmos Kong /* Spin for a response, the kick causes an ioport write, trapping 118340cbfc37SAmos Kong * into the hypervisor, so the request should be handled immediately. 118440cbfc37SAmos Kong */ 1185047b9b94SHeinz Graalfs while (!virtqueue_get_buf(vi->cvq, &tmp) && 1186047b9b94SHeinz Graalfs !virtqueue_is_broken(vi->cvq)) 118740cbfc37SAmos Kong cpu_relax(); 118840cbfc37SAmos Kong 11892ac46030SMichael S. Tsirkin return vi->ctrl_status == VIRTIO_NET_OK; 119040cbfc37SAmos Kong } 119140cbfc37SAmos Kong 11929c46f6d4SAlex Williamson static int virtnet_set_mac_address(struct net_device *dev, void *p) 11939c46f6d4SAlex Williamson { 11949c46f6d4SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 11959c46f6d4SAlex Williamson struct virtio_device *vdev = vi->vdev; 1196f2f2c8b4SJiri Pirko int ret; 1197e37e2ff3SAndy Lutomirski struct sockaddr *addr; 11987e58d5aeSAmos Kong struct scatterlist sg; 11999c46f6d4SAlex Williamson 1200801822d1SShyam Saini addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 1201e37e2ff3SAndy Lutomirski if (!addr) 1202e37e2ff3SAndy Lutomirski return -ENOMEM; 1203e37e2ff3SAndy Lutomirski 1204e37e2ff3SAndy Lutomirski ret = eth_prepare_mac_addr_change(dev, addr); 1205f2f2c8b4SJiri Pirko if (ret) 1206e37e2ff3SAndy Lutomirski goto out; 12079c46f6d4SAlex Williamson 12087e58d5aeSAmos Kong if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 12097e58d5aeSAmos Kong sg_init_one(&sg, addr->sa_data, dev->addr_len); 12107e58d5aeSAmos Kong if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1211d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 12127e58d5aeSAmos Kong dev_warn(&vdev->dev, 12137e58d5aeSAmos Kong "Failed to set mac address by vq command.\n"); 1214e37e2ff3SAndy Lutomirski ret = -EINVAL; 1215e37e2ff3SAndy Lutomirski goto out; 12167e58d5aeSAmos Kong } 12177e93a02fSMichael S. Tsirkin } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 12187e93a02fSMichael S. Tsirkin !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1219855e0c52SRusty Russell unsigned int i; 1220855e0c52SRusty Russell 1221855e0c52SRusty Russell /* Naturally, this has an atomicity problem. */ 1222855e0c52SRusty Russell for (i = 0; i < dev->addr_len; i++) 1223855e0c52SRusty Russell virtio_cwrite8(vdev, 1224855e0c52SRusty Russell offsetof(struct virtio_net_config, mac) + 1225855e0c52SRusty Russell i, addr->sa_data[i]); 12267e58d5aeSAmos Kong } 12277e58d5aeSAmos Kong 12287e58d5aeSAmos Kong eth_commit_mac_addr_change(dev, p); 1229e37e2ff3SAndy Lutomirski ret = 0; 12309c46f6d4SAlex Williamson 1231e37e2ff3SAndy Lutomirski out: 1232e37e2ff3SAndy Lutomirski kfree(addr); 1233e37e2ff3SAndy Lutomirski return ret; 12349c46f6d4SAlex Williamson } 12359c46f6d4SAlex Williamson 1236bc1f4470Sstephen hemminger static void virtnet_stats(struct net_device *dev, 12373fa2a1dfSstephen hemminger struct rtnl_link_stats64 *tot) 12383fa2a1dfSstephen hemminger { 12393fa2a1dfSstephen hemminger struct virtnet_info *vi = netdev_priv(dev); 12403fa2a1dfSstephen hemminger int cpu; 12413fa2a1dfSstephen hemminger unsigned int start; 12423fa2a1dfSstephen hemminger 12433fa2a1dfSstephen hemminger for_each_possible_cpu(cpu) { 124458472a76SEric Dumazet struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); 12453fa2a1dfSstephen hemminger u64 tpackets, tbytes, rpackets, rbytes; 12463fa2a1dfSstephen hemminger 12473fa2a1dfSstephen hemminger do { 124857a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&stats->tx_syncp); 12493fa2a1dfSstephen hemminger tpackets = stats->tx_packets; 12503fa2a1dfSstephen hemminger tbytes = stats->tx_bytes; 125157a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); 125283a27052SEric Dumazet 125383a27052SEric Dumazet do { 125457a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&stats->rx_syncp); 12553fa2a1dfSstephen hemminger rpackets = stats->rx_packets; 12563fa2a1dfSstephen hemminger rbytes = stats->rx_bytes; 125757a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); 12583fa2a1dfSstephen hemminger 12593fa2a1dfSstephen hemminger tot->rx_packets += rpackets; 12603fa2a1dfSstephen hemminger tot->tx_packets += tpackets; 12613fa2a1dfSstephen hemminger tot->rx_bytes += rbytes; 12623fa2a1dfSstephen hemminger tot->tx_bytes += tbytes; 12633fa2a1dfSstephen hemminger } 12643fa2a1dfSstephen hemminger 12653fa2a1dfSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 1266021ac8d3SRick Jones tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 12673fa2a1dfSstephen hemminger tot->rx_dropped = dev->stats.rx_dropped; 12683fa2a1dfSstephen hemminger tot->rx_length_errors = dev->stats.rx_length_errors; 12693fa2a1dfSstephen hemminger tot->rx_frame_errors = dev->stats.rx_frame_errors; 12703fa2a1dfSstephen hemminger } 12713fa2a1dfSstephen hemminger 1272da74e89dSAmit Shah #ifdef CONFIG_NET_POLL_CONTROLLER 1273da74e89dSAmit Shah static void virtnet_netpoll(struct net_device *dev) 1274da74e89dSAmit Shah { 1275da74e89dSAmit Shah struct virtnet_info *vi = netdev_priv(dev); 1276986a4f4dSJason Wang int i; 1277da74e89dSAmit Shah 1278986a4f4dSJason Wang for (i = 0; i < vi->curr_queue_pairs; i++) 1279986a4f4dSJason Wang napi_schedule(&vi->rq[i].napi); 1280da74e89dSAmit Shah } 1281da74e89dSAmit Shah #endif 1282da74e89dSAmit Shah 1283586d17c5SJason Wang static void virtnet_ack_link_announce(struct virtnet_info *vi) 1284586d17c5SJason Wang { 1285586d17c5SJason Wang rtnl_lock(); 1286586d17c5SJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 1287d24bae32Sstephen hemminger VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 1288586d17c5SJason Wang dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 1289586d17c5SJason Wang rtnl_unlock(); 1290586d17c5SJason Wang } 1291586d17c5SJason Wang 129247315329SJohn Fastabend static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 1293986a4f4dSJason Wang { 1294986a4f4dSJason Wang struct scatterlist sg; 1295986a4f4dSJason Wang struct net_device *dev = vi->dev; 1296986a4f4dSJason Wang 1297986a4f4dSJason Wang if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 1298986a4f4dSJason Wang return 0; 1299986a4f4dSJason Wang 1300a725ee3eSAndy Lutomirski vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 1301a725ee3eSAndy Lutomirski sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); 1302986a4f4dSJason Wang 1303986a4f4dSJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 1304d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 1305986a4f4dSJason Wang dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 1306986a4f4dSJason Wang queue_pairs); 1307986a4f4dSJason Wang return -EINVAL; 130855257d72SSasha Levin } else { 1309986a4f4dSJason Wang vi->curr_queue_pairs = queue_pairs; 131035ed159bSJason Wang /* virtnet_open() will refill when device is going to up. */ 131135ed159bSJason Wang if (dev->flags & IFF_UP) 13129b9cd802SJason Wang schedule_delayed_work(&vi->refill, 0); 131355257d72SSasha Levin } 1314986a4f4dSJason Wang 1315986a4f4dSJason Wang return 0; 1316986a4f4dSJason Wang } 1317986a4f4dSJason Wang 131847315329SJohn Fastabend static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 131947315329SJohn Fastabend { 132047315329SJohn Fastabend int err; 132147315329SJohn Fastabend 132247315329SJohn Fastabend rtnl_lock(); 132347315329SJohn Fastabend err = _virtnet_set_queues(vi, queue_pairs); 132447315329SJohn Fastabend rtnl_unlock(); 132547315329SJohn Fastabend return err; 132647315329SJohn Fastabend } 132747315329SJohn Fastabend 1328296f96fcSRusty Russell static int virtnet_close(struct net_device *dev) 1329296f96fcSRusty Russell { 1330296f96fcSRusty Russell struct virtnet_info *vi = netdev_priv(dev); 1331986a4f4dSJason Wang int i; 1332296f96fcSRusty Russell 1333b2baed69SRusty Russell /* Make sure refill_work doesn't re-enable napi! */ 1334b2baed69SRusty Russell cancel_delayed_work_sync(&vi->refill); 1335986a4f4dSJason Wang 1336986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 1337986a4f4dSJason Wang napi_disable(&vi->rq[i].napi); 1338296f96fcSRusty Russell 1339296f96fcSRusty Russell return 0; 1340296f96fcSRusty Russell } 1341296f96fcSRusty Russell 13422af7698eSAlex Williamson static void virtnet_set_rx_mode(struct net_device *dev) 13432af7698eSAlex Williamson { 13442af7698eSAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 1345f565a7c2SAlex Williamson struct scatterlist sg[2]; 1346f565a7c2SAlex Williamson struct virtio_net_ctrl_mac *mac_data; 1347ccffad25SJiri Pirko struct netdev_hw_addr *ha; 134832e7bfc4SJiri Pirko int uc_count; 13494cd24eafSJiri Pirko int mc_count; 1350f565a7c2SAlex Williamson void *buf; 1351f565a7c2SAlex Williamson int i; 13522af7698eSAlex Williamson 1353788a8b6dSstephen hemminger /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 13542af7698eSAlex Williamson if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 13552af7698eSAlex Williamson return; 13562af7698eSAlex Williamson 13572ac46030SMichael S. Tsirkin vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); 13582ac46030SMichael S. Tsirkin vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 13592af7698eSAlex Williamson 13602ac46030SMichael S. Tsirkin sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); 13612af7698eSAlex Williamson 13622af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1363d24bae32Sstephen hemminger VIRTIO_NET_CTRL_RX_PROMISC, sg)) 13642af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 13652ac46030SMichael S. Tsirkin vi->ctrl_promisc ? "en" : "dis"); 13662af7698eSAlex Williamson 13672ac46030SMichael S. Tsirkin sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); 13682af7698eSAlex Williamson 13692af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1370d24bae32Sstephen hemminger VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 13712af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 13722ac46030SMichael S. Tsirkin vi->ctrl_allmulti ? "en" : "dis"); 1373f565a7c2SAlex Williamson 137432e7bfc4SJiri Pirko uc_count = netdev_uc_count(dev); 13754cd24eafSJiri Pirko mc_count = netdev_mc_count(dev); 1376f565a7c2SAlex Williamson /* MAC filter - use one buffer for both lists */ 13774cd24eafSJiri Pirko buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 1378f565a7c2SAlex Williamson (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 13794cd24eafSJiri Pirko mac_data = buf; 1380e68ed8f0SJoe Perches if (!buf) 1381f565a7c2SAlex Williamson return; 1382f565a7c2SAlex Williamson 138323e258e1SAlex Williamson sg_init_table(sg, 2); 138423e258e1SAlex Williamson 1385f565a7c2SAlex Williamson /* Store the unicast list and count in the front of the buffer */ 1386fdd819b2SMichael S. Tsirkin mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 1387ccffad25SJiri Pirko i = 0; 138832e7bfc4SJiri Pirko netdev_for_each_uc_addr(ha, dev) 1389ccffad25SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1390f565a7c2SAlex Williamson 1391f565a7c2SAlex Williamson sg_set_buf(&sg[0], mac_data, 139232e7bfc4SJiri Pirko sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 1393f565a7c2SAlex Williamson 1394f565a7c2SAlex Williamson /* multicast list and count fill the end */ 139532e7bfc4SJiri Pirko mac_data = (void *)&mac_data->macs[uc_count][0]; 1396f565a7c2SAlex Williamson 1397fdd819b2SMichael S. Tsirkin mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 1398567ec874SJiri Pirko i = 0; 139922bedad3SJiri Pirko netdev_for_each_mc_addr(ha, dev) 140022bedad3SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1401f565a7c2SAlex Williamson 1402f565a7c2SAlex Williamson sg_set_buf(&sg[1], mac_data, 14034cd24eafSJiri Pirko sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 1404f565a7c2SAlex Williamson 1405f565a7c2SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1406d24bae32Sstephen hemminger VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 140799e872aeSThomas Huth dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 1408f565a7c2SAlex Williamson 1409f565a7c2SAlex Williamson kfree(buf); 14102af7698eSAlex Williamson } 14112af7698eSAlex Williamson 141280d5c368SPatrick McHardy static int virtnet_vlan_rx_add_vid(struct net_device *dev, 141380d5c368SPatrick McHardy __be16 proto, u16 vid) 14140bde9569SAlex Williamson { 14150bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 14160bde9569SAlex Williamson struct scatterlist sg; 14170bde9569SAlex Williamson 1418a725ee3eSAndy Lutomirski vi->ctrl_vid = vid; 1419a725ee3eSAndy Lutomirski sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 14200bde9569SAlex Williamson 14210bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1422d24bae32Sstephen hemminger VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 14230bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 14248e586137SJiri Pirko return 0; 14250bde9569SAlex Williamson } 14260bde9569SAlex Williamson 142780d5c368SPatrick McHardy static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 142880d5c368SPatrick McHardy __be16 proto, u16 vid) 14290bde9569SAlex Williamson { 14300bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 14310bde9569SAlex Williamson struct scatterlist sg; 14320bde9569SAlex Williamson 1433a725ee3eSAndy Lutomirski vi->ctrl_vid = vid; 1434a725ee3eSAndy Lutomirski sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 14350bde9569SAlex Williamson 14360bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1437d24bae32Sstephen hemminger VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 14380bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 14398e586137SJiri Pirko return 0; 14400bde9569SAlex Williamson } 14410bde9569SAlex Williamson 14428898c21cSWanlong Gao static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) 1443986a4f4dSJason Wang { 1444986a4f4dSJason Wang int i; 14458898c21cSWanlong Gao 14468898c21cSWanlong Gao if (vi->affinity_hint_set) { 14478898c21cSWanlong Gao for (i = 0; i < vi->max_queue_pairs; i++) { 14488898c21cSWanlong Gao virtqueue_set_affinity(vi->rq[i].vq, -1); 14498898c21cSWanlong Gao virtqueue_set_affinity(vi->sq[i].vq, -1); 14508898c21cSWanlong Gao } 14518898c21cSWanlong Gao 14528898c21cSWanlong Gao vi->affinity_hint_set = false; 14538898c21cSWanlong Gao } 14548898c21cSWanlong Gao } 14558898c21cSWanlong Gao 14568898c21cSWanlong Gao static void virtnet_set_affinity(struct virtnet_info *vi) 1457986a4f4dSJason Wang { 1458986a4f4dSJason Wang int i; 145947be2479SWanlong Gao int cpu; 1460986a4f4dSJason Wang 1461986a4f4dSJason Wang /* In multiqueue mode, when the number of cpu is equal to the number of 1462986a4f4dSJason Wang * queue pairs, we let the queue pairs to be private to one cpu by 1463986a4f4dSJason Wang * setting the affinity hint to eliminate the contention. 1464986a4f4dSJason Wang */ 14658898c21cSWanlong Gao if (vi->curr_queue_pairs == 1 || 14668898c21cSWanlong Gao vi->max_queue_pairs != num_online_cpus()) { 14678898c21cSWanlong Gao virtnet_clean_affinity(vi, -1); 1468986a4f4dSJason Wang return; 1469986a4f4dSJason Wang } 1470986a4f4dSJason Wang 147147be2479SWanlong Gao i = 0; 147247be2479SWanlong Gao for_each_online_cpu(cpu) { 1473986a4f4dSJason Wang virtqueue_set_affinity(vi->rq[i].vq, cpu); 1474986a4f4dSJason Wang virtqueue_set_affinity(vi->sq[i].vq, cpu); 14759bb8ca86SJason Wang netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); 147647be2479SWanlong Gao i++; 1477986a4f4dSJason Wang } 1478986a4f4dSJason Wang 1479986a4f4dSJason Wang vi->affinity_hint_set = true; 148047be2479SWanlong Gao } 1481986a4f4dSJason Wang 14828017c279SSebastian Andrzej Siewior static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 14838de4b2f3SWanlong Gao { 14848017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 14858017c279SSebastian Andrzej Siewior node); 14868de4b2f3SWanlong Gao virtnet_set_affinity(vi); 14878017c279SSebastian Andrzej Siewior return 0; 14888de4b2f3SWanlong Gao } 14893ab098dfSJason Wang 14908017c279SSebastian Andrzej Siewior static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 14918017c279SSebastian Andrzej Siewior { 14928017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 14938017c279SSebastian Andrzej Siewior node_dead); 14948017c279SSebastian Andrzej Siewior virtnet_set_affinity(vi); 14958017c279SSebastian Andrzej Siewior return 0; 14968017c279SSebastian Andrzej Siewior } 14978017c279SSebastian Andrzej Siewior 14988017c279SSebastian Andrzej Siewior static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 14998017c279SSebastian Andrzej Siewior { 15008017c279SSebastian Andrzej Siewior struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 15018017c279SSebastian Andrzej Siewior node); 15028017c279SSebastian Andrzej Siewior 15038017c279SSebastian Andrzej Siewior virtnet_clean_affinity(vi, cpu); 15048017c279SSebastian Andrzej Siewior return 0; 15058017c279SSebastian Andrzej Siewior } 15068017c279SSebastian Andrzej Siewior 15078017c279SSebastian Andrzej Siewior static enum cpuhp_state virtionet_online; 15088017c279SSebastian Andrzej Siewior 15098017c279SSebastian Andrzej Siewior static int virtnet_cpu_notif_add(struct virtnet_info *vi) 15108017c279SSebastian Andrzej Siewior { 15118017c279SSebastian Andrzej Siewior int ret; 15128017c279SSebastian Andrzej Siewior 15138017c279SSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 15148017c279SSebastian Andrzej Siewior if (ret) 15158017c279SSebastian Andrzej Siewior return ret; 15168017c279SSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 15178017c279SSebastian Andrzej Siewior &vi->node_dead); 15188017c279SSebastian Andrzej Siewior if (!ret) 15198017c279SSebastian Andrzej Siewior return ret; 15208017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 15218017c279SSebastian Andrzej Siewior return ret; 15228017c279SSebastian Andrzej Siewior } 15238017c279SSebastian Andrzej Siewior 15248017c279SSebastian Andrzej Siewior static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 15258017c279SSebastian Andrzej Siewior { 15268017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 15278017c279SSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 15288017c279SSebastian Andrzej Siewior &vi->node_dead); 1529a9ea3fc6SHerbert Xu } 1530a9ea3fc6SHerbert Xu 15318f9f4668SRick Jones static void virtnet_get_ringparam(struct net_device *dev, 15328f9f4668SRick Jones struct ethtool_ringparam *ring) 15338f9f4668SRick Jones { 15348f9f4668SRick Jones struct virtnet_info *vi = netdev_priv(dev); 15358f9f4668SRick Jones 1536986a4f4dSJason Wang ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); 1537986a4f4dSJason Wang ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); 15388f9f4668SRick Jones ring->rx_pending = ring->rx_max_pending; 15398f9f4668SRick Jones ring->tx_pending = ring->tx_max_pending; 15408f9f4668SRick Jones } 15418f9f4668SRick Jones 154266846048SRick Jones 154366846048SRick Jones static void virtnet_get_drvinfo(struct net_device *dev, 154466846048SRick Jones struct ethtool_drvinfo *info) 154566846048SRick Jones { 154666846048SRick Jones struct virtnet_info *vi = netdev_priv(dev); 154766846048SRick Jones struct virtio_device *vdev = vi->vdev; 154866846048SRick Jones 154966846048SRick Jones strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 155066846048SRick Jones strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 155166846048SRick Jones strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 155266846048SRick Jones 155366846048SRick Jones } 155466846048SRick Jones 1555d73bcd2cSJason Wang /* TODO: Eliminate OOO packets during switching */ 1556d73bcd2cSJason Wang static int virtnet_set_channels(struct net_device *dev, 1557d73bcd2cSJason Wang struct ethtool_channels *channels) 1558d73bcd2cSJason Wang { 1559d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1560d73bcd2cSJason Wang u16 queue_pairs = channels->combined_count; 1561d73bcd2cSJason Wang int err; 1562d73bcd2cSJason Wang 1563d73bcd2cSJason Wang /* We don't support separate rx/tx channels. 1564d73bcd2cSJason Wang * We don't allow setting 'other' channels. 1565d73bcd2cSJason Wang */ 1566d73bcd2cSJason Wang if (channels->rx_count || channels->tx_count || channels->other_count) 1567d73bcd2cSJason Wang return -EINVAL; 1568d73bcd2cSJason Wang 1569c18e9cd6SAmos Kong if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 1570d73bcd2cSJason Wang return -EINVAL; 1571d73bcd2cSJason Wang 1572f600b690SJohn Fastabend /* For now we don't support modifying channels while XDP is loaded 1573f600b690SJohn Fastabend * also when XDP is loaded all RX queues have XDP programs so we only 1574f600b690SJohn Fastabend * need to check a single RX queue. 1575f600b690SJohn Fastabend */ 1576f600b690SJohn Fastabend if (vi->rq[0].xdp_prog) 1577f600b690SJohn Fastabend return -EINVAL; 1578f600b690SJohn Fastabend 157947be2479SWanlong Gao get_online_cpus(); 158047315329SJohn Fastabend err = _virtnet_set_queues(vi, queue_pairs); 1581d73bcd2cSJason Wang if (!err) { 1582d73bcd2cSJason Wang netif_set_real_num_tx_queues(dev, queue_pairs); 1583d73bcd2cSJason Wang netif_set_real_num_rx_queues(dev, queue_pairs); 1584d73bcd2cSJason Wang 15858898c21cSWanlong Gao virtnet_set_affinity(vi); 1586d73bcd2cSJason Wang } 158747be2479SWanlong Gao put_online_cpus(); 1588d73bcd2cSJason Wang 1589d73bcd2cSJason Wang return err; 1590d73bcd2cSJason Wang } 1591d73bcd2cSJason Wang 1592d73bcd2cSJason Wang static void virtnet_get_channels(struct net_device *dev, 1593d73bcd2cSJason Wang struct ethtool_channels *channels) 1594d73bcd2cSJason Wang { 1595d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1596d73bcd2cSJason Wang 1597d73bcd2cSJason Wang channels->combined_count = vi->curr_queue_pairs; 1598d73bcd2cSJason Wang channels->max_combined = vi->max_queue_pairs; 1599d73bcd2cSJason Wang channels->max_other = 0; 1600d73bcd2cSJason Wang channels->rx_count = 0; 1601d73bcd2cSJason Wang channels->tx_count = 0; 1602d73bcd2cSJason Wang channels->other_count = 0; 1603d73bcd2cSJason Wang } 1604d73bcd2cSJason Wang 160516032be5SNikolay Aleksandrov /* Check if the user is trying to change anything besides speed/duplex */ 160616032be5SNikolay Aleksandrov static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd) 160716032be5SNikolay Aleksandrov { 160816032be5SNikolay Aleksandrov struct ethtool_cmd diff1 = *cmd; 160916032be5SNikolay Aleksandrov struct ethtool_cmd diff2 = {}; 161016032be5SNikolay Aleksandrov 16110cf3ace9SNikolay Aleksandrov /* cmd is always set so we need to clear it, validate the port type 16120cf3ace9SNikolay Aleksandrov * and also without autonegotiation we can ignore advertising 16130cf3ace9SNikolay Aleksandrov */ 161416032be5SNikolay Aleksandrov ethtool_cmd_speed_set(&diff1, 0); 16150cf3ace9SNikolay Aleksandrov diff2.port = PORT_OTHER; 161616032be5SNikolay Aleksandrov diff1.advertising = 0; 161716032be5SNikolay Aleksandrov diff1.duplex = 0; 161816032be5SNikolay Aleksandrov diff1.cmd = 0; 161916032be5SNikolay Aleksandrov 162016032be5SNikolay Aleksandrov return !memcmp(&diff1, &diff2, sizeof(diff1)); 162116032be5SNikolay Aleksandrov } 162216032be5SNikolay Aleksandrov 162316032be5SNikolay Aleksandrov static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 162416032be5SNikolay Aleksandrov { 162516032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 162616032be5SNikolay Aleksandrov u32 speed; 162716032be5SNikolay Aleksandrov 162816032be5SNikolay Aleksandrov speed = ethtool_cmd_speed(cmd); 162916032be5SNikolay Aleksandrov /* don't allow custom speed and duplex */ 163016032be5SNikolay Aleksandrov if (!ethtool_validate_speed(speed) || 163116032be5SNikolay Aleksandrov !ethtool_validate_duplex(cmd->duplex) || 163216032be5SNikolay Aleksandrov !virtnet_validate_ethtool_cmd(cmd)) 163316032be5SNikolay Aleksandrov return -EINVAL; 163416032be5SNikolay Aleksandrov vi->speed = speed; 163516032be5SNikolay Aleksandrov vi->duplex = cmd->duplex; 163616032be5SNikolay Aleksandrov 163716032be5SNikolay Aleksandrov return 0; 163816032be5SNikolay Aleksandrov } 163916032be5SNikolay Aleksandrov 164016032be5SNikolay Aleksandrov static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 164116032be5SNikolay Aleksandrov { 164216032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 164316032be5SNikolay Aleksandrov 164416032be5SNikolay Aleksandrov ethtool_cmd_speed_set(cmd, vi->speed); 164516032be5SNikolay Aleksandrov cmd->duplex = vi->duplex; 164616032be5SNikolay Aleksandrov cmd->port = PORT_OTHER; 164716032be5SNikolay Aleksandrov 164816032be5SNikolay Aleksandrov return 0; 164916032be5SNikolay Aleksandrov } 165016032be5SNikolay Aleksandrov 165116032be5SNikolay Aleksandrov static void virtnet_init_settings(struct net_device *dev) 165216032be5SNikolay Aleksandrov { 165316032be5SNikolay Aleksandrov struct virtnet_info *vi = netdev_priv(dev); 165416032be5SNikolay Aleksandrov 165516032be5SNikolay Aleksandrov vi->speed = SPEED_UNKNOWN; 165616032be5SNikolay Aleksandrov vi->duplex = DUPLEX_UNKNOWN; 165716032be5SNikolay Aleksandrov } 165816032be5SNikolay Aleksandrov 16590fc0b732SStephen Hemminger static const struct ethtool_ops virtnet_ethtool_ops = { 166066846048SRick Jones .get_drvinfo = virtnet_get_drvinfo, 16619f4d26d0SMark McLoughlin .get_link = ethtool_op_get_link, 16628f9f4668SRick Jones .get_ringparam = virtnet_get_ringparam, 1663d73bcd2cSJason Wang .set_channels = virtnet_set_channels, 1664d73bcd2cSJason Wang .get_channels = virtnet_get_channels, 1665074c3582SJacob Keller .get_ts_info = ethtool_op_get_ts_info, 166616032be5SNikolay Aleksandrov .get_settings = virtnet_get_settings, 166716032be5SNikolay Aleksandrov .set_settings = virtnet_set_settings, 1668a9ea3fc6SHerbert Xu }; 1669a9ea3fc6SHerbert Xu 1670f600b690SJohn Fastabend static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) 1671f600b690SJohn Fastabend { 1672f600b690SJohn Fastabend unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); 1673f600b690SJohn Fastabend struct virtnet_info *vi = netdev_priv(dev); 1674f600b690SJohn Fastabend struct bpf_prog *old_prog; 1675672aafd5SJohn Fastabend u16 xdp_qp = 0, curr_qp; 1676672aafd5SJohn Fastabend int i, err; 1677f600b690SJohn Fastabend 1678529ec6acSJakub Kicinski if (prog && prog->xdp_adjust_head) { 1679529ec6acSJakub Kicinski netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n"); 1680529ec6acSJakub Kicinski return -EOPNOTSUPP; 1681529ec6acSJakub Kicinski } 1682529ec6acSJakub Kicinski 1683f600b690SJohn Fastabend if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 168492502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 168592502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 168692502fe8SJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) { 1687f600b690SJohn Fastabend netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n"); 1688f600b690SJohn Fastabend return -EOPNOTSUPP; 1689f600b690SJohn Fastabend } 1690f600b690SJohn Fastabend 1691f600b690SJohn Fastabend if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 1692f600b690SJohn Fastabend netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n"); 1693f600b690SJohn Fastabend return -EINVAL; 1694f600b690SJohn Fastabend } 1695f600b690SJohn Fastabend 1696f600b690SJohn Fastabend if (dev->mtu > max_sz) { 1697f600b690SJohn Fastabend netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); 1698f600b690SJohn Fastabend return -EINVAL; 1699f600b690SJohn Fastabend } 1700f600b690SJohn Fastabend 1701672aafd5SJohn Fastabend curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 1702672aafd5SJohn Fastabend if (prog) 1703672aafd5SJohn Fastabend xdp_qp = nr_cpu_ids; 1704672aafd5SJohn Fastabend 1705672aafd5SJohn Fastabend /* XDP requires extra queues for XDP_TX */ 1706672aafd5SJohn Fastabend if (curr_qp + xdp_qp > vi->max_queue_pairs) { 1707672aafd5SJohn Fastabend netdev_warn(dev, "request %i queues but max is %i\n", 1708672aafd5SJohn Fastabend curr_qp + xdp_qp, vi->max_queue_pairs); 1709672aafd5SJohn Fastabend return -ENOMEM; 1710672aafd5SJohn Fastabend } 1711672aafd5SJohn Fastabend 171247315329SJohn Fastabend err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 1713672aafd5SJohn Fastabend if (err) { 1714672aafd5SJohn Fastabend dev_warn(&dev->dev, "XDP Device queue allocation failure.\n"); 1715672aafd5SJohn Fastabend return err; 1716672aafd5SJohn Fastabend } 1717672aafd5SJohn Fastabend 1718f600b690SJohn Fastabend if (prog) { 1719f600b690SJohn Fastabend prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); 1720672aafd5SJohn Fastabend if (IS_ERR(prog)) { 172147315329SJohn Fastabend _virtnet_set_queues(vi, curr_qp); 1722f600b690SJohn Fastabend return PTR_ERR(prog); 1723f600b690SJohn Fastabend } 1724672aafd5SJohn Fastabend } 1725672aafd5SJohn Fastabend 1726672aafd5SJohn Fastabend vi->xdp_queue_pairs = xdp_qp; 1727672aafd5SJohn Fastabend netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 1728f600b690SJohn Fastabend 1729f600b690SJohn Fastabend for (i = 0; i < vi->max_queue_pairs; i++) { 1730f600b690SJohn Fastabend old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 1731f600b690SJohn Fastabend rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 1732f600b690SJohn Fastabend if (old_prog) 1733f600b690SJohn Fastabend bpf_prog_put(old_prog); 1734f600b690SJohn Fastabend } 1735f600b690SJohn Fastabend 1736f600b690SJohn Fastabend return 0; 1737f600b690SJohn Fastabend } 1738f600b690SJohn Fastabend 1739f600b690SJohn Fastabend static bool virtnet_xdp_query(struct net_device *dev) 1740f600b690SJohn Fastabend { 1741f600b690SJohn Fastabend struct virtnet_info *vi = netdev_priv(dev); 1742f600b690SJohn Fastabend int i; 1743f600b690SJohn Fastabend 1744f600b690SJohn Fastabend for (i = 0; i < vi->max_queue_pairs; i++) { 1745f600b690SJohn Fastabend if (vi->rq[i].xdp_prog) 1746f600b690SJohn Fastabend return true; 1747f600b690SJohn Fastabend } 1748f600b690SJohn Fastabend return false; 1749f600b690SJohn Fastabend } 1750f600b690SJohn Fastabend 1751f600b690SJohn Fastabend static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp) 1752f600b690SJohn Fastabend { 1753f600b690SJohn Fastabend switch (xdp->command) { 1754f600b690SJohn Fastabend case XDP_SETUP_PROG: 1755f600b690SJohn Fastabend return virtnet_xdp_set(dev, xdp->prog); 1756f600b690SJohn Fastabend case XDP_QUERY_PROG: 1757f600b690SJohn Fastabend xdp->prog_attached = virtnet_xdp_query(dev); 1758f600b690SJohn Fastabend return 0; 1759f600b690SJohn Fastabend default: 1760f600b690SJohn Fastabend return -EINVAL; 1761f600b690SJohn Fastabend } 1762f600b690SJohn Fastabend } 1763f600b690SJohn Fastabend 176476288b4eSStephen Hemminger static const struct net_device_ops virtnet_netdev = { 176576288b4eSStephen Hemminger .ndo_open = virtnet_open, 176676288b4eSStephen Hemminger .ndo_stop = virtnet_close, 176776288b4eSStephen Hemminger .ndo_start_xmit = start_xmit, 176876288b4eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 17699c46f6d4SAlex Williamson .ndo_set_mac_address = virtnet_set_mac_address, 17702af7698eSAlex Williamson .ndo_set_rx_mode = virtnet_set_rx_mode, 17713fa2a1dfSstephen hemminger .ndo_get_stats64 = virtnet_stats, 17721824a989SAlex Williamson .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 17731824a989SAlex Williamson .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 177476288b4eSStephen Hemminger #ifdef CONFIG_NET_POLL_CONTROLLER 177576288b4eSStephen Hemminger .ndo_poll_controller = virtnet_netpoll, 177676288b4eSStephen Hemminger #endif 1777f600b690SJohn Fastabend .ndo_xdp = virtnet_xdp, 177876288b4eSStephen Hemminger }; 177976288b4eSStephen Hemminger 1780586d17c5SJason Wang static void virtnet_config_changed_work(struct work_struct *work) 17819f4d26d0SMark McLoughlin { 1782586d17c5SJason Wang struct virtnet_info *vi = 1783586d17c5SJason Wang container_of(work, struct virtnet_info, config_work); 17849f4d26d0SMark McLoughlin u16 v; 17859f4d26d0SMark McLoughlin 1786855e0c52SRusty Russell if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 1787855e0c52SRusty Russell struct virtio_net_config, status, &v) < 0) 1788507613bfSMichael S. Tsirkin return; 1789586d17c5SJason Wang 1790586d17c5SJason Wang if (v & VIRTIO_NET_S_ANNOUNCE) { 1791ee89bab1SAmerigo Wang netdev_notify_peers(vi->dev); 1792586d17c5SJason Wang virtnet_ack_link_announce(vi); 1793586d17c5SJason Wang } 17949f4d26d0SMark McLoughlin 17959f4d26d0SMark McLoughlin /* Ignore unknown (future) status bits */ 17969f4d26d0SMark McLoughlin v &= VIRTIO_NET_S_LINK_UP; 17979f4d26d0SMark McLoughlin 17989f4d26d0SMark McLoughlin if (vi->status == v) 1799507613bfSMichael S. Tsirkin return; 18009f4d26d0SMark McLoughlin 18019f4d26d0SMark McLoughlin vi->status = v; 18029f4d26d0SMark McLoughlin 18039f4d26d0SMark McLoughlin if (vi->status & VIRTIO_NET_S_LINK_UP) { 18049f4d26d0SMark McLoughlin netif_carrier_on(vi->dev); 1805986a4f4dSJason Wang netif_tx_wake_all_queues(vi->dev); 18069f4d26d0SMark McLoughlin } else { 18079f4d26d0SMark McLoughlin netif_carrier_off(vi->dev); 1808986a4f4dSJason Wang netif_tx_stop_all_queues(vi->dev); 18099f4d26d0SMark McLoughlin } 18109f4d26d0SMark McLoughlin } 18119f4d26d0SMark McLoughlin 18129f4d26d0SMark McLoughlin static void virtnet_config_changed(struct virtio_device *vdev) 18139f4d26d0SMark McLoughlin { 18149f4d26d0SMark McLoughlin struct virtnet_info *vi = vdev->priv; 18159f4d26d0SMark McLoughlin 18163b07e9caSTejun Heo schedule_work(&vi->config_work); 18179f4d26d0SMark McLoughlin } 18189f4d26d0SMark McLoughlin 1819986a4f4dSJason Wang static void virtnet_free_queues(struct virtnet_info *vi) 1820986a4f4dSJason Wang { 1821d4fb84eeSAndrey Vagin int i; 1822d4fb84eeSAndrey Vagin 1823ab3971b1SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1824ab3971b1SJason Wang napi_hash_del(&vi->rq[i].napi); 1825d4fb84eeSAndrey Vagin netif_napi_del(&vi->rq[i].napi); 1826ab3971b1SJason Wang } 1827d4fb84eeSAndrey Vagin 1828963abe5cSEric Dumazet /* We called napi_hash_del() before netif_napi_del(), 1829963abe5cSEric Dumazet * we need to respect an RCU grace period before freeing vi->rq 1830963abe5cSEric Dumazet */ 1831963abe5cSEric Dumazet synchronize_net(); 1832963abe5cSEric Dumazet 1833986a4f4dSJason Wang kfree(vi->rq); 1834986a4f4dSJason Wang kfree(vi->sq); 1835986a4f4dSJason Wang } 1836986a4f4dSJason Wang 183747315329SJohn Fastabend static void _free_receive_bufs(struct virtnet_info *vi) 1838986a4f4dSJason Wang { 1839f600b690SJohn Fastabend struct bpf_prog *old_prog; 1840986a4f4dSJason Wang int i; 1841986a4f4dSJason Wang 1842986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1843986a4f4dSJason Wang while (vi->rq[i].pages) 1844986a4f4dSJason Wang __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 1845f600b690SJohn Fastabend 1846f600b690SJohn Fastabend old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 1847f600b690SJohn Fastabend RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 1848f600b690SJohn Fastabend if (old_prog) 1849f600b690SJohn Fastabend bpf_prog_put(old_prog); 1850986a4f4dSJason Wang } 185147315329SJohn Fastabend } 185247315329SJohn Fastabend 185347315329SJohn Fastabend static void free_receive_bufs(struct virtnet_info *vi) 185447315329SJohn Fastabend { 185547315329SJohn Fastabend rtnl_lock(); 185647315329SJohn Fastabend _free_receive_bufs(vi); 1857f600b690SJohn Fastabend rtnl_unlock(); 1858986a4f4dSJason Wang } 1859986a4f4dSJason Wang 1860fb51879dSMichael Dalton static void free_receive_page_frags(struct virtnet_info *vi) 1861fb51879dSMichael Dalton { 1862fb51879dSMichael Dalton int i; 1863fb51879dSMichael Dalton for (i = 0; i < vi->max_queue_pairs; i++) 1864fb51879dSMichael Dalton if (vi->rq[i].alloc_frag.page) 1865fb51879dSMichael Dalton put_page(vi->rq[i].alloc_frag.page); 1866fb51879dSMichael Dalton } 1867fb51879dSMichael Dalton 1868b68df015SJohn Fastabend static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 186956434a01SJohn Fastabend { 1870b68df015SJohn Fastabend /* For small receive mode always use kfree_skb variants */ 1871b68df015SJohn Fastabend if (!vi->mergeable_rx_bufs) 1872b68df015SJohn Fastabend return false; 1873b68df015SJohn Fastabend 187456434a01SJohn Fastabend if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 187556434a01SJohn Fastabend return false; 187656434a01SJohn Fastabend else if (q < vi->curr_queue_pairs) 187756434a01SJohn Fastabend return true; 187856434a01SJohn Fastabend else 187956434a01SJohn Fastabend return false; 188056434a01SJohn Fastabend } 188156434a01SJohn Fastabend 1882986a4f4dSJason Wang static void free_unused_bufs(struct virtnet_info *vi) 1883986a4f4dSJason Wang { 1884986a4f4dSJason Wang void *buf; 1885986a4f4dSJason Wang int i; 1886986a4f4dSJason Wang 1887986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1888986a4f4dSJason Wang struct virtqueue *vq = vi->sq[i].vq; 188956434a01SJohn Fastabend while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1890b68df015SJohn Fastabend if (!is_xdp_raw_buffer_queue(vi, i)) 1891986a4f4dSJason Wang dev_kfree_skb(buf); 189256434a01SJohn Fastabend else 189356434a01SJohn Fastabend put_page(virt_to_head_page(buf)); 189456434a01SJohn Fastabend } 1895986a4f4dSJason Wang } 1896986a4f4dSJason Wang 1897986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1898986a4f4dSJason Wang struct virtqueue *vq = vi->rq[i].vq; 1899986a4f4dSJason Wang 1900986a4f4dSJason Wang while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1901ab7db917SMichael Dalton if (vi->mergeable_rx_bufs) { 1902ab7db917SMichael Dalton unsigned long ctx = (unsigned long)buf; 1903ab7db917SMichael Dalton void *base = mergeable_ctx_to_buf_address(ctx); 1904ab7db917SMichael Dalton put_page(virt_to_head_page(base)); 1905ab7db917SMichael Dalton } else if (vi->big_packets) { 1906fa9fac17SAndrey Vagin give_pages(&vi->rq[i], buf); 1907ab7db917SMichael Dalton } else { 1908986a4f4dSJason Wang dev_kfree_skb(buf); 1909986a4f4dSJason Wang } 1910986a4f4dSJason Wang } 1911986a4f4dSJason Wang } 1912ab7db917SMichael Dalton } 1913986a4f4dSJason Wang 1914e9d7417bSJason Wang static void virtnet_del_vqs(struct virtnet_info *vi) 1915e9d7417bSJason Wang { 1916e9d7417bSJason Wang struct virtio_device *vdev = vi->vdev; 1917e9d7417bSJason Wang 19188898c21cSWanlong Gao virtnet_clean_affinity(vi, -1); 1919986a4f4dSJason Wang 1920e9d7417bSJason Wang vdev->config->del_vqs(vdev); 1921986a4f4dSJason Wang 1922986a4f4dSJason Wang virtnet_free_queues(vi); 1923986a4f4dSJason Wang } 1924986a4f4dSJason Wang 1925986a4f4dSJason Wang static int virtnet_find_vqs(struct virtnet_info *vi) 1926986a4f4dSJason Wang { 1927986a4f4dSJason Wang vq_callback_t **callbacks; 1928986a4f4dSJason Wang struct virtqueue **vqs; 1929986a4f4dSJason Wang int ret = -ENOMEM; 1930986a4f4dSJason Wang int i, total_vqs; 1931986a4f4dSJason Wang const char **names; 1932986a4f4dSJason Wang 1933986a4f4dSJason Wang /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 1934986a4f4dSJason Wang * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 1935986a4f4dSJason Wang * possible control vq. 1936986a4f4dSJason Wang */ 1937986a4f4dSJason Wang total_vqs = vi->max_queue_pairs * 2 + 1938986a4f4dSJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 1939986a4f4dSJason Wang 1940986a4f4dSJason Wang /* Allocate space for find_vqs parameters */ 1941986a4f4dSJason Wang vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); 1942986a4f4dSJason Wang if (!vqs) 1943986a4f4dSJason Wang goto err_vq; 1944986a4f4dSJason Wang callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); 1945986a4f4dSJason Wang if (!callbacks) 1946986a4f4dSJason Wang goto err_callback; 1947986a4f4dSJason Wang names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); 1948986a4f4dSJason Wang if (!names) 1949986a4f4dSJason Wang goto err_names; 1950986a4f4dSJason Wang 1951986a4f4dSJason Wang /* Parameters for control virtqueue, if any */ 1952986a4f4dSJason Wang if (vi->has_cvq) { 1953986a4f4dSJason Wang callbacks[total_vqs - 1] = NULL; 1954986a4f4dSJason Wang names[total_vqs - 1] = "control"; 1955986a4f4dSJason Wang } 1956986a4f4dSJason Wang 1957986a4f4dSJason Wang /* Allocate/initialize parameters for send/receive virtqueues */ 1958986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1959986a4f4dSJason Wang callbacks[rxq2vq(i)] = skb_recv_done; 1960986a4f4dSJason Wang callbacks[txq2vq(i)] = skb_xmit_done; 1961986a4f4dSJason Wang sprintf(vi->rq[i].name, "input.%d", i); 1962986a4f4dSJason Wang sprintf(vi->sq[i].name, "output.%d", i); 1963986a4f4dSJason Wang names[rxq2vq(i)] = vi->rq[i].name; 1964986a4f4dSJason Wang names[txq2vq(i)] = vi->sq[i].name; 1965986a4f4dSJason Wang } 1966986a4f4dSJason Wang 1967986a4f4dSJason Wang ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, 1968986a4f4dSJason Wang names); 1969986a4f4dSJason Wang if (ret) 1970986a4f4dSJason Wang goto err_find; 1971986a4f4dSJason Wang 1972986a4f4dSJason Wang if (vi->has_cvq) { 1973986a4f4dSJason Wang vi->cvq = vqs[total_vqs - 1]; 1974986a4f4dSJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 1975f646968fSPatrick McHardy vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1976986a4f4dSJason Wang } 1977986a4f4dSJason Wang 1978986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1979986a4f4dSJason Wang vi->rq[i].vq = vqs[rxq2vq(i)]; 1980986a4f4dSJason Wang vi->sq[i].vq = vqs[txq2vq(i)]; 1981986a4f4dSJason Wang } 1982986a4f4dSJason Wang 1983986a4f4dSJason Wang kfree(names); 1984986a4f4dSJason Wang kfree(callbacks); 1985986a4f4dSJason Wang kfree(vqs); 1986986a4f4dSJason Wang 1987986a4f4dSJason Wang return 0; 1988986a4f4dSJason Wang 1989986a4f4dSJason Wang err_find: 1990986a4f4dSJason Wang kfree(names); 1991986a4f4dSJason Wang err_names: 1992986a4f4dSJason Wang kfree(callbacks); 1993986a4f4dSJason Wang err_callback: 1994986a4f4dSJason Wang kfree(vqs); 1995986a4f4dSJason Wang err_vq: 1996986a4f4dSJason Wang return ret; 1997986a4f4dSJason Wang } 1998986a4f4dSJason Wang 1999986a4f4dSJason Wang static int virtnet_alloc_queues(struct virtnet_info *vi) 2000986a4f4dSJason Wang { 2001986a4f4dSJason Wang int i; 2002986a4f4dSJason Wang 2003986a4f4dSJason Wang vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); 2004986a4f4dSJason Wang if (!vi->sq) 2005986a4f4dSJason Wang goto err_sq; 2006986a4f4dSJason Wang vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); 2007008d4278SAmerigo Wang if (!vi->rq) 2008986a4f4dSJason Wang goto err_rq; 2009986a4f4dSJason Wang 2010986a4f4dSJason Wang INIT_DELAYED_WORK(&vi->refill, refill_work); 2011986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 2012986a4f4dSJason Wang vi->rq[i].pages = NULL; 2013986a4f4dSJason Wang netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, 2014986a4f4dSJason Wang napi_weight); 2015986a4f4dSJason Wang 2016986a4f4dSJason Wang sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 20175377d758SJohannes Berg ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 2018986a4f4dSJason Wang sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 2019986a4f4dSJason Wang } 2020986a4f4dSJason Wang 2021986a4f4dSJason Wang return 0; 2022986a4f4dSJason Wang 2023986a4f4dSJason Wang err_rq: 2024986a4f4dSJason Wang kfree(vi->sq); 2025986a4f4dSJason Wang err_sq: 2026986a4f4dSJason Wang return -ENOMEM; 2027e9d7417bSJason Wang } 2028e9d7417bSJason Wang 20293f9c10b0SAmit Shah static int init_vqs(struct virtnet_info *vi) 20303f9c10b0SAmit Shah { 2031986a4f4dSJason Wang int ret; 20323f9c10b0SAmit Shah 2033986a4f4dSJason Wang /* Allocate send & receive queues */ 2034986a4f4dSJason Wang ret = virtnet_alloc_queues(vi); 2035986a4f4dSJason Wang if (ret) 2036986a4f4dSJason Wang goto err; 20373f9c10b0SAmit Shah 2038986a4f4dSJason Wang ret = virtnet_find_vqs(vi); 2039986a4f4dSJason Wang if (ret) 2040986a4f4dSJason Wang goto err_free; 20413f9c10b0SAmit Shah 204247be2479SWanlong Gao get_online_cpus(); 20438898c21cSWanlong Gao virtnet_set_affinity(vi); 204447be2479SWanlong Gao put_online_cpus(); 204547be2479SWanlong Gao 20463f9c10b0SAmit Shah return 0; 2047986a4f4dSJason Wang 2048986a4f4dSJason Wang err_free: 2049986a4f4dSJason Wang virtnet_free_queues(vi); 2050986a4f4dSJason Wang err: 2051986a4f4dSJason Wang return ret; 20523f9c10b0SAmit Shah } 20533f9c10b0SAmit Shah 2054fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS 2055fbf28d78SMichael Dalton static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 2056fbf28d78SMichael Dalton struct rx_queue_attribute *attribute, char *buf) 2057fbf28d78SMichael Dalton { 2058fbf28d78SMichael Dalton struct virtnet_info *vi = netdev_priv(queue->dev); 2059fbf28d78SMichael Dalton unsigned int queue_index = get_netdev_rx_queue_index(queue); 20605377d758SJohannes Berg struct ewma_pkt_len *avg; 2061fbf28d78SMichael Dalton 2062fbf28d78SMichael Dalton BUG_ON(queue_index >= vi->max_queue_pairs); 2063fbf28d78SMichael Dalton avg = &vi->rq[queue_index].mrg_avg_pkt_len; 2064fbf28d78SMichael Dalton return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); 2065fbf28d78SMichael Dalton } 2066fbf28d78SMichael Dalton 2067fbf28d78SMichael Dalton static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 2068fbf28d78SMichael Dalton __ATTR_RO(mergeable_rx_buffer_size); 2069fbf28d78SMichael Dalton 2070fbf28d78SMichael Dalton static struct attribute *virtio_net_mrg_rx_attrs[] = { 2071fbf28d78SMichael Dalton &mergeable_rx_buffer_size_attribute.attr, 2072fbf28d78SMichael Dalton NULL 2073fbf28d78SMichael Dalton }; 2074fbf28d78SMichael Dalton 2075fbf28d78SMichael Dalton static const struct attribute_group virtio_net_mrg_rx_group = { 2076fbf28d78SMichael Dalton .name = "virtio_net", 2077fbf28d78SMichael Dalton .attrs = virtio_net_mrg_rx_attrs 2078fbf28d78SMichael Dalton }; 2079fbf28d78SMichael Dalton #endif 2080fbf28d78SMichael Dalton 2081892d6eb1SJason Wang static bool virtnet_fail_on_feature(struct virtio_device *vdev, 2082892d6eb1SJason Wang unsigned int fbit, 2083892d6eb1SJason Wang const char *fname, const char *dname) 2084892d6eb1SJason Wang { 2085892d6eb1SJason Wang if (!virtio_has_feature(vdev, fbit)) 2086892d6eb1SJason Wang return false; 2087892d6eb1SJason Wang 2088892d6eb1SJason Wang dev_err(&vdev->dev, "device advertises feature %s but not %s", 2089892d6eb1SJason Wang fname, dname); 2090892d6eb1SJason Wang 2091892d6eb1SJason Wang return true; 2092892d6eb1SJason Wang } 2093892d6eb1SJason Wang 2094892d6eb1SJason Wang #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 2095892d6eb1SJason Wang virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 2096892d6eb1SJason Wang 2097892d6eb1SJason Wang static bool virtnet_validate_features(struct virtio_device *vdev) 2098892d6eb1SJason Wang { 2099892d6eb1SJason Wang if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 2100892d6eb1SJason Wang (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 2101892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 2102892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 2103892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 2104892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 2105892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ") || 2106892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 2107892d6eb1SJason Wang VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 2108892d6eb1SJason Wang "VIRTIO_NET_F_CTRL_VQ"))) { 2109892d6eb1SJason Wang return false; 2110892d6eb1SJason Wang } 2111892d6eb1SJason Wang 2112892d6eb1SJason Wang return true; 2113892d6eb1SJason Wang } 2114892d6eb1SJason Wang 2115d0c2c997SJarod Wilson #define MIN_MTU ETH_MIN_MTU 2116d0c2c997SJarod Wilson #define MAX_MTU ETH_MAX_MTU 2117d0c2c997SJarod Wilson 2118296f96fcSRusty Russell static int virtnet_probe(struct virtio_device *vdev) 2119296f96fcSRusty Russell { 2120986a4f4dSJason Wang int i, err; 2121296f96fcSRusty Russell struct net_device *dev; 2122296f96fcSRusty Russell struct virtnet_info *vi; 2123986a4f4dSJason Wang u16 max_queue_pairs; 212414de9d11SAaron Conole int mtu; 2125986a4f4dSJason Wang 21266ba42248SMichael S. Tsirkin if (!vdev->config->get) { 21276ba42248SMichael S. Tsirkin dev_err(&vdev->dev, "%s failure: config access disabled\n", 21286ba42248SMichael S. Tsirkin __func__); 21296ba42248SMichael S. Tsirkin return -EINVAL; 21306ba42248SMichael S. Tsirkin } 21316ba42248SMichael S. Tsirkin 2132892d6eb1SJason Wang if (!virtnet_validate_features(vdev)) 2133892d6eb1SJason Wang return -EINVAL; 2134892d6eb1SJason Wang 2135986a4f4dSJason Wang /* Find if host supports multiqueue virtio_net device */ 2136855e0c52SRusty Russell err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 2137855e0c52SRusty Russell struct virtio_net_config, 2138855e0c52SRusty Russell max_virtqueue_pairs, &max_queue_pairs); 2139986a4f4dSJason Wang 2140986a4f4dSJason Wang /* We need at least 2 queue's */ 2141986a4f4dSJason Wang if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 2142986a4f4dSJason Wang max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 2143986a4f4dSJason Wang !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 2144986a4f4dSJason Wang max_queue_pairs = 1; 2145296f96fcSRusty Russell 2146296f96fcSRusty Russell /* Allocate ourselves a network device with room for our info */ 2147986a4f4dSJason Wang dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 2148296f96fcSRusty Russell if (!dev) 2149296f96fcSRusty Russell return -ENOMEM; 2150296f96fcSRusty Russell 2151296f96fcSRusty Russell /* Set up network device as normal. */ 2152f2f2c8b4SJiri Pirko dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 215376288b4eSStephen Hemminger dev->netdev_ops = &virtnet_netdev; 2154296f96fcSRusty Russell dev->features = NETIF_F_HIGHDMA; 21553fa2a1dfSstephen hemminger 21567ad24ea4SWilfried Klaebe dev->ethtool_ops = &virtnet_ethtool_ops; 2157296f96fcSRusty Russell SET_NETDEV_DEV(dev, &vdev->dev); 2158296f96fcSRusty Russell 2159296f96fcSRusty Russell /* Do we support "hardware" checksums? */ 216098e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 2161296f96fcSRusty Russell /* This opens up the world of extra features. */ 216248900cb6SJason Wang dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 216398e778c9SMichał Mirosław if (csum) 216448900cb6SJason Wang dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 216598e778c9SMichał Mirosław 216698e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 2167e3e3c423SVlad Yasevich dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 216834a48579SRusty Russell | NETIF_F_TSO_ECN | NETIF_F_TSO6; 216934a48579SRusty Russell } 21705539ae96SRusty Russell /* Individual feature bits: what can host handle? */ 217198e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 217298e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO; 217398e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 217498e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO6; 217598e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 217698e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO_ECN; 2177e3e3c423SVlad Yasevich if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 2178e3e3c423SVlad Yasevich dev->hw_features |= NETIF_F_UFO; 217998e778c9SMichał Mirosław 218041f2f127SJason Wang dev->features |= NETIF_F_GSO_ROBUST; 218141f2f127SJason Wang 218298e778c9SMichał Mirosław if (gso) 2183e3e3c423SVlad Yasevich dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 218498e778c9SMichał Mirosław /* (!csum && gso) case will be fixed by register_netdev() */ 2185296f96fcSRusty Russell } 21864f49129bSThomas Huth if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 21874f49129bSThomas Huth dev->features |= NETIF_F_RXCSUM; 2188296f96fcSRusty Russell 21894fda8302SJason Wang dev->vlan_features = dev->features; 21904fda8302SJason Wang 2191d0c2c997SJarod Wilson /* MTU range: 68 - 65535 */ 2192d0c2c997SJarod Wilson dev->min_mtu = MIN_MTU; 2193d0c2c997SJarod Wilson dev->max_mtu = MAX_MTU; 2194d0c2c997SJarod Wilson 2195296f96fcSRusty Russell /* Configuration may specify what MAC to use. Otherwise random. */ 2196855e0c52SRusty Russell if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) 2197855e0c52SRusty Russell virtio_cread_bytes(vdev, 2198a586d4f6SRusty Russell offsetof(struct virtio_net_config, mac), 2199855e0c52SRusty Russell dev->dev_addr, dev->addr_len); 2200855e0c52SRusty Russell else 2201f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 2202296f96fcSRusty Russell 2203296f96fcSRusty Russell /* Set up our device-specific information */ 2204296f96fcSRusty Russell vi = netdev_priv(dev); 2205296f96fcSRusty Russell vi->dev = dev; 2206296f96fcSRusty Russell vi->vdev = vdev; 2207d9d5dcc8SChristian Borntraeger vdev->priv = vi; 22083fa2a1dfSstephen hemminger vi->stats = alloc_percpu(struct virtnet_stats); 22093fa2a1dfSstephen hemminger err = -ENOMEM; 22103fa2a1dfSstephen hemminger if (vi->stats == NULL) 22113fa2a1dfSstephen hemminger goto free; 22123fa2a1dfSstephen hemminger 2213827da44cSJohn Stultz for_each_possible_cpu(i) { 2214827da44cSJohn Stultz struct virtnet_stats *virtnet_stats; 2215827da44cSJohn Stultz virtnet_stats = per_cpu_ptr(vi->stats, i); 2216827da44cSJohn Stultz u64_stats_init(&virtnet_stats->tx_syncp); 2217827da44cSJohn Stultz u64_stats_init(&virtnet_stats->rx_syncp); 2218827da44cSJohn Stultz } 2219827da44cSJohn Stultz 2220586d17c5SJason Wang INIT_WORK(&vi->config_work, virtnet_config_changed_work); 2221296f96fcSRusty Russell 222297402b96SHerbert Xu /* If we can receive ANY GSO packets, we must allocate large ones. */ 22238e95a202SJoe Perches if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 22248e95a202SJoe Perches virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 2225e3e3c423SVlad Yasevich virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || 2226e3e3c423SVlad Yasevich virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) 222797402b96SHerbert Xu vi->big_packets = true; 222897402b96SHerbert Xu 22293f2c31d9SMark McLoughlin if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 22303f2c31d9SMark McLoughlin vi->mergeable_rx_bufs = true; 22313f2c31d9SMark McLoughlin 2232d04302b3SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 2233d04302b3SMichael S. Tsirkin virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 2234012873d0SMichael S. Tsirkin vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 2235012873d0SMichael S. Tsirkin else 2236012873d0SMichael S. Tsirkin vi->hdr_len = sizeof(struct virtio_net_hdr); 2237012873d0SMichael S. Tsirkin 223875993300SMichael S. Tsirkin if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 223975993300SMichael S. Tsirkin virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 2240e7428e95SMichael S. Tsirkin vi->any_header_sg = true; 2241e7428e95SMichael S. Tsirkin 2242986a4f4dSJason Wang if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 2243986a4f4dSJason Wang vi->has_cvq = true; 2244986a4f4dSJason Wang 224514de9d11SAaron Conole if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 224614de9d11SAaron Conole mtu = virtio_cread16(vdev, 224714de9d11SAaron Conole offsetof(struct virtio_net_config, 224814de9d11SAaron Conole mtu)); 224993a205eeSAaron Conole if (mtu < dev->min_mtu) { 225014de9d11SAaron Conole __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 225193a205eeSAaron Conole } else { 2252d0c2c997SJarod Wilson dev->mtu = mtu; 225393a205eeSAaron Conole dev->max_mtu = mtu; 225493a205eeSAaron Conole } 225514de9d11SAaron Conole } 225614de9d11SAaron Conole 2257012873d0SMichael S. Tsirkin if (vi->any_header_sg) 2258012873d0SMichael S. Tsirkin dev->needed_headroom = vi->hdr_len; 22596ebbc1a6SZhangjie \(HZ\) 226044900010SJason Wang /* Enable multiqueue by default */ 226144900010SJason Wang if (num_online_cpus() >= max_queue_pairs) 226244900010SJason Wang vi->curr_queue_pairs = max_queue_pairs; 226344900010SJason Wang else 226444900010SJason Wang vi->curr_queue_pairs = num_online_cpus(); 2265986a4f4dSJason Wang vi->max_queue_pairs = max_queue_pairs; 2266986a4f4dSJason Wang 2267986a4f4dSJason Wang /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 22683f9c10b0SAmit Shah err = init_vqs(vi); 2269d2a7dddaSMichael S. Tsirkin if (err) 22709bb8ca86SJason Wang goto free_stats; 2271d2a7dddaSMichael S. Tsirkin 2272fbf28d78SMichael Dalton #ifdef CONFIG_SYSFS 2273fbf28d78SMichael Dalton if (vi->mergeable_rx_bufs) 2274fbf28d78SMichael Dalton dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 2275fbf28d78SMichael Dalton #endif 22760f13b66bSZhi Yong Wu netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 22770f13b66bSZhi Yong Wu netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 2278986a4f4dSJason Wang 227916032be5SNikolay Aleksandrov virtnet_init_settings(dev); 228016032be5SNikolay Aleksandrov 2281296f96fcSRusty Russell err = register_netdev(dev); 2282296f96fcSRusty Russell if (err) { 2283296f96fcSRusty Russell pr_debug("virtio_net: registering device failed\n"); 2284d2a7dddaSMichael S. Tsirkin goto free_vqs; 2285296f96fcSRusty Russell } 2286b3369c1fSRusty Russell 22874baf1e33SMichael S. Tsirkin virtio_device_ready(vdev); 22884baf1e33SMichael S. Tsirkin 22898017c279SSebastian Andrzej Siewior err = virtnet_cpu_notif_add(vi); 22908de4b2f3SWanlong Gao if (err) { 22918de4b2f3SWanlong Gao pr_debug("virtio_net: registering cpu notifier failed\n"); 2292f00e35e2Swangyunjian goto free_unregister_netdev; 22938de4b2f3SWanlong Gao } 22948de4b2f3SWanlong Gao 2295a220871bSJason Wang virtnet_set_queues(vi, vi->curr_queue_pairs); 229644900010SJason Wang 2297167c25e4SJason Wang /* Assume link up if device can't report link status, 2298167c25e4SJason Wang otherwise get link status from config. */ 2299167c25e4SJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 2300167c25e4SJason Wang netif_carrier_off(dev); 23013b07e9caSTejun Heo schedule_work(&vi->config_work); 2302167c25e4SJason Wang } else { 2303167c25e4SJason Wang vi->status = VIRTIO_NET_S_LINK_UP; 23044783256eSPantelis Koukousoulas netif_carrier_on(dev); 2305167c25e4SJason Wang } 23069f4d26d0SMark McLoughlin 2307986a4f4dSJason Wang pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 2308986a4f4dSJason Wang dev->name, max_queue_pairs); 2309986a4f4dSJason Wang 2310296f96fcSRusty Russell return 0; 2311296f96fcSRusty Russell 2312f00e35e2Swangyunjian free_unregister_netdev: 231302465555SMichael S. Tsirkin vi->vdev->config->reset(vdev); 231402465555SMichael S. Tsirkin 2315b3369c1fSRusty Russell unregister_netdev(dev); 2316d2a7dddaSMichael S. Tsirkin free_vqs: 2317986a4f4dSJason Wang cancel_delayed_work_sync(&vi->refill); 2318fb51879dSMichael Dalton free_receive_page_frags(vi); 2319e9d7417bSJason Wang virtnet_del_vqs(vi); 23203fa2a1dfSstephen hemminger free_stats: 23213fa2a1dfSstephen hemminger free_percpu(vi->stats); 2322296f96fcSRusty Russell free: 2323296f96fcSRusty Russell free_netdev(dev); 2324296f96fcSRusty Russell return err; 2325296f96fcSRusty Russell } 2326296f96fcSRusty Russell 232704486ed0SAmit Shah static void remove_vq_common(struct virtnet_info *vi) 2328296f96fcSRusty Russell { 232904486ed0SAmit Shah vi->vdev->config->reset(vi->vdev); 2330830a8a97SShirley Ma 2331830a8a97SShirley Ma /* Free unused buffers in both send and recv, if any. */ 23329ab86bbcSShirley Ma free_unused_bufs(vi); 2333fb6813f4SRusty Russell 2334986a4f4dSJason Wang free_receive_bufs(vi); 2335d2a7dddaSMichael S. Tsirkin 2336fb51879dSMichael Dalton free_receive_page_frags(vi); 2337fb51879dSMichael Dalton 2338986a4f4dSJason Wang virtnet_del_vqs(vi); 233904486ed0SAmit Shah } 234004486ed0SAmit Shah 23418cc085d6SBill Pemberton static void virtnet_remove(struct virtio_device *vdev) 234204486ed0SAmit Shah { 234304486ed0SAmit Shah struct virtnet_info *vi = vdev->priv; 234404486ed0SAmit Shah 23458017c279SSebastian Andrzej Siewior virtnet_cpu_notif_remove(vi); 23468de4b2f3SWanlong Gao 2347102a2786SMichael S. Tsirkin /* Make sure no work handler is accessing the device. */ 2348102a2786SMichael S. Tsirkin flush_work(&vi->config_work); 2349586d17c5SJason Wang 235004486ed0SAmit Shah unregister_netdev(vi->dev); 235104486ed0SAmit Shah 235204486ed0SAmit Shah remove_vq_common(vi); 2353fb6813f4SRusty Russell 23542e66f55bSKrishna Kumar free_percpu(vi->stats); 235574b2553fSRusty Russell free_netdev(vi->dev); 2356296f96fcSRusty Russell } 2357296f96fcSRusty Russell 235889107000SAaron Lu #ifdef CONFIG_PM_SLEEP 23590741bcb5SAmit Shah static int virtnet_freeze(struct virtio_device *vdev) 23600741bcb5SAmit Shah { 23610741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 2362986a4f4dSJason Wang int i; 23630741bcb5SAmit Shah 23648017c279SSebastian Andrzej Siewior virtnet_cpu_notif_remove(vi); 2365ec9debbdSJason Wang 2366102a2786SMichael S. Tsirkin /* Make sure no work handler is accessing the device */ 2367102a2786SMichael S. Tsirkin flush_work(&vi->config_work); 2368586d17c5SJason Wang 23690741bcb5SAmit Shah netif_device_detach(vi->dev); 23700741bcb5SAmit Shah cancel_delayed_work_sync(&vi->refill); 23710741bcb5SAmit Shah 237291815639SJason Wang if (netif_running(vi->dev)) { 2373ab3971b1SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 2374986a4f4dSJason Wang napi_disable(&vi->rq[i].napi); 237591815639SJason Wang } 23760741bcb5SAmit Shah 23770741bcb5SAmit Shah remove_vq_common(vi); 23780741bcb5SAmit Shah 23790741bcb5SAmit Shah return 0; 23800741bcb5SAmit Shah } 23810741bcb5SAmit Shah 23820741bcb5SAmit Shah static int virtnet_restore(struct virtio_device *vdev) 23830741bcb5SAmit Shah { 23840741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 2385986a4f4dSJason Wang int err, i; 23860741bcb5SAmit Shah 23870741bcb5SAmit Shah err = init_vqs(vi); 23880741bcb5SAmit Shah if (err) 23890741bcb5SAmit Shah return err; 23900741bcb5SAmit Shah 2391e53fbd11SMichael S. Tsirkin virtio_device_ready(vdev); 2392e53fbd11SMichael S. Tsirkin 23936cd4ce00SJason Wang if (netif_running(vi->dev)) { 239455257d72SSasha Levin for (i = 0; i < vi->curr_queue_pairs; i++) 2395946fa564SMichael S. Tsirkin if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 23963b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 23970741bcb5SAmit Shah 23986cd4ce00SJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 23996cd4ce00SJason Wang virtnet_napi_enable(&vi->rq[i]); 24006cd4ce00SJason Wang } 24016cd4ce00SJason Wang 24026cd4ce00SJason Wang netif_device_attach(vi->dev); 24036cd4ce00SJason Wang 2404986a4f4dSJason Wang virtnet_set_queues(vi, vi->curr_queue_pairs); 2405986a4f4dSJason Wang 24068017c279SSebastian Andrzej Siewior err = virtnet_cpu_notif_add(vi); 2407ec9debbdSJason Wang if (err) 2408ec9debbdSJason Wang return err; 2409ec9debbdSJason Wang 24100741bcb5SAmit Shah return 0; 24110741bcb5SAmit Shah } 24120741bcb5SAmit Shah #endif 24130741bcb5SAmit Shah 2414296f96fcSRusty Russell static struct virtio_device_id id_table[] = { 2415296f96fcSRusty Russell { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 2416296f96fcSRusty Russell { 0 }, 2417296f96fcSRusty Russell }; 2418296f96fcSRusty Russell 2419f3358507SMichael S. Tsirkin #define VIRTNET_FEATURES \ 2420f3358507SMichael S. Tsirkin VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 2421f3358507SMichael S. Tsirkin VIRTIO_NET_F_MAC, \ 2422f3358507SMichael S. Tsirkin VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 2423f3358507SMichael S. Tsirkin VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 2424f3358507SMichael S. Tsirkin VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 2425f3358507SMichael S. Tsirkin VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 2426f3358507SMichael S. Tsirkin VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 2427f3358507SMichael S. Tsirkin VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 2428f3358507SMichael S. Tsirkin VIRTIO_NET_F_CTRL_MAC_ADDR, \ 2429f3358507SMichael S. Tsirkin VIRTIO_NET_F_MTU 2430f3358507SMichael S. Tsirkin 2431c45a6816SRusty Russell static unsigned int features[] = { 2432f3358507SMichael S. Tsirkin VIRTNET_FEATURES, 2433f3358507SMichael S. Tsirkin }; 2434f3358507SMichael S. Tsirkin 2435f3358507SMichael S. Tsirkin static unsigned int features_legacy[] = { 2436f3358507SMichael S. Tsirkin VIRTNET_FEATURES, 2437f3358507SMichael S. Tsirkin VIRTIO_NET_F_GSO, 2438e7428e95SMichael S. Tsirkin VIRTIO_F_ANY_LAYOUT, 2439c45a6816SRusty Russell }; 2440c45a6816SRusty Russell 244122402529SUwe Kleine-König static struct virtio_driver virtio_net_driver = { 2442c45a6816SRusty Russell .feature_table = features, 2443c45a6816SRusty Russell .feature_table_size = ARRAY_SIZE(features), 2444f3358507SMichael S. Tsirkin .feature_table_legacy = features_legacy, 2445f3358507SMichael S. Tsirkin .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 2446296f96fcSRusty Russell .driver.name = KBUILD_MODNAME, 2447296f96fcSRusty Russell .driver.owner = THIS_MODULE, 2448296f96fcSRusty Russell .id_table = id_table, 2449296f96fcSRusty Russell .probe = virtnet_probe, 24508cc085d6SBill Pemberton .remove = virtnet_remove, 24519f4d26d0SMark McLoughlin .config_changed = virtnet_config_changed, 245289107000SAaron Lu #ifdef CONFIG_PM_SLEEP 24530741bcb5SAmit Shah .freeze = virtnet_freeze, 24540741bcb5SAmit Shah .restore = virtnet_restore, 24550741bcb5SAmit Shah #endif 2456296f96fcSRusty Russell }; 2457296f96fcSRusty Russell 24588017c279SSebastian Andrzej Siewior static __init int virtio_net_driver_init(void) 24598017c279SSebastian Andrzej Siewior { 24608017c279SSebastian Andrzej Siewior int ret; 24618017c279SSebastian Andrzej Siewior 246273c1b41eSThomas Gleixner ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 24638017c279SSebastian Andrzej Siewior virtnet_cpu_online, 24648017c279SSebastian Andrzej Siewior virtnet_cpu_down_prep); 24658017c279SSebastian Andrzej Siewior if (ret < 0) 24668017c279SSebastian Andrzej Siewior goto out; 24678017c279SSebastian Andrzej Siewior virtionet_online = ret; 246873c1b41eSThomas Gleixner ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 24698017c279SSebastian Andrzej Siewior NULL, virtnet_cpu_dead); 24708017c279SSebastian Andrzej Siewior if (ret) 24718017c279SSebastian Andrzej Siewior goto err_dead; 24728017c279SSebastian Andrzej Siewior 24738017c279SSebastian Andrzej Siewior ret = register_virtio_driver(&virtio_net_driver); 24748017c279SSebastian Andrzej Siewior if (ret) 24758017c279SSebastian Andrzej Siewior goto err_virtio; 24768017c279SSebastian Andrzej Siewior return 0; 24778017c279SSebastian Andrzej Siewior err_virtio: 24788017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 24798017c279SSebastian Andrzej Siewior err_dead: 24808017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(virtionet_online); 24818017c279SSebastian Andrzej Siewior out: 24828017c279SSebastian Andrzej Siewior return ret; 24838017c279SSebastian Andrzej Siewior } 24848017c279SSebastian Andrzej Siewior module_init(virtio_net_driver_init); 24858017c279SSebastian Andrzej Siewior 24868017c279SSebastian Andrzej Siewior static __exit void virtio_net_driver_exit(void) 24878017c279SSebastian Andrzej Siewior { 24888017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 24898017c279SSebastian Andrzej Siewior cpuhp_remove_multi_state(virtionet_online); 24908017c279SSebastian Andrzej Siewior unregister_virtio_driver(&virtio_net_driver); 24918017c279SSebastian Andrzej Siewior } 24928017c279SSebastian Andrzej Siewior module_exit(virtio_net_driver_exit); 2493296f96fcSRusty Russell 2494296f96fcSRusty Russell MODULE_DEVICE_TABLE(virtio, id_table); 2495296f96fcSRusty Russell MODULE_DESCRIPTION("Virtio network driver"); 2496296f96fcSRusty Russell MODULE_LICENSE("GPL"); 2497