148925e37SRusty Russell /* A network driver using virtio. 2296f96fcSRusty Russell * 3296f96fcSRusty Russell * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4296f96fcSRusty Russell * 5296f96fcSRusty Russell * This program is free software; you can redistribute it and/or modify 6296f96fcSRusty Russell * it under the terms of the GNU General Public License as published by 7296f96fcSRusty Russell * the Free Software Foundation; either version 2 of the License, or 8296f96fcSRusty Russell * (at your option) any later version. 9296f96fcSRusty Russell * 10296f96fcSRusty Russell * This program is distributed in the hope that it will be useful, 11296f96fcSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 12296f96fcSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13296f96fcSRusty Russell * GNU General Public License for more details. 14296f96fcSRusty Russell * 15296f96fcSRusty Russell * You should have received a copy of the GNU General Public License 16296f96fcSRusty Russell * along with this program; if not, write to the Free Software 17296f96fcSRusty Russell * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18296f96fcSRusty Russell */ 19296f96fcSRusty Russell //#define DEBUG 20296f96fcSRusty Russell #include <linux/netdevice.h> 21296f96fcSRusty Russell #include <linux/etherdevice.h> 22a9ea3fc6SHerbert Xu #include <linux/ethtool.h> 23296f96fcSRusty Russell #include <linux/module.h> 24296f96fcSRusty Russell #include <linux/virtio.h> 25296f96fcSRusty Russell #include <linux/virtio_net.h> 26296f96fcSRusty Russell #include <linux/scatterlist.h> 27e918085aSAlex Williamson #include <linux/if_vlan.h> 285a0e3ad6STejun Heo #include <linux/slab.h> 29296f96fcSRusty Russell 306c0cd7c0SDor Laor static int napi_weight = 128; 316c0cd7c0SDor Laor module_param(napi_weight, int, 0444); 326c0cd7c0SDor Laor 33eb939922SRusty Russell static bool csum = true, gso = true; 3434a48579SRusty Russell module_param(csum, bool, 0444); 3534a48579SRusty Russell module_param(gso, bool, 0444); 3634a48579SRusty Russell 37296f96fcSRusty Russell /* FIXME: MTU in config. */ 38e918085aSAlex Williamson #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 393f2c31d9SMark McLoughlin #define GOOD_COPY_LEN 128 40296f96fcSRusty Russell 41f565a7c2SAlex Williamson #define VIRTNET_SEND_COMMAND_SG_MAX 2 4266846048SRick Jones #define VIRTNET_DRIVER_VERSION "1.0.0" 432a41f71dSAlex Williamson 443fa2a1dfSstephen hemminger struct virtnet_stats { 4583a27052SEric Dumazet struct u64_stats_sync tx_syncp; 4683a27052SEric Dumazet struct u64_stats_sync rx_syncp; 473fa2a1dfSstephen hemminger u64 tx_bytes; 483fa2a1dfSstephen hemminger u64 tx_packets; 493fa2a1dfSstephen hemminger 503fa2a1dfSstephen hemminger u64 rx_bytes; 513fa2a1dfSstephen hemminger u64 rx_packets; 523fa2a1dfSstephen hemminger }; 533fa2a1dfSstephen hemminger 54e9d7417bSJason Wang /* Internal representation of a send virtqueue */ 55e9d7417bSJason Wang struct send_queue { 56e9d7417bSJason Wang /* Virtqueue associated with this send _queue */ 57e9d7417bSJason Wang struct virtqueue *vq; 58e9d7417bSJason Wang 59e9d7417bSJason Wang /* TX: fragments + linear part + virtio header */ 60e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 61986a4f4dSJason Wang 62986a4f4dSJason Wang /* Name of the send queue: output.$index */ 63986a4f4dSJason Wang char name[40]; 64e9d7417bSJason Wang }; 65e9d7417bSJason Wang 66e9d7417bSJason Wang /* Internal representation of a receive virtqueue */ 67e9d7417bSJason Wang struct receive_queue { 68e9d7417bSJason Wang /* Virtqueue associated with this receive_queue */ 69e9d7417bSJason Wang struct virtqueue *vq; 70e9d7417bSJason Wang 71296f96fcSRusty Russell struct napi_struct napi; 72296f96fcSRusty Russell 73296f96fcSRusty Russell /* Number of input buffers, and max we've ever had. */ 74296f96fcSRusty Russell unsigned int num, max; 75296f96fcSRusty Russell 76e9d7417bSJason Wang /* Chain pages by the private ptr. */ 77e9d7417bSJason Wang struct page *pages; 78e9d7417bSJason Wang 79e9d7417bSJason Wang /* RX: fragments + linear part + virtio header */ 80e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 81986a4f4dSJason Wang 82986a4f4dSJason Wang /* Name of this receive queue: input.$index */ 83986a4f4dSJason Wang char name[40]; 84e9d7417bSJason Wang }; 85e9d7417bSJason Wang 86e9d7417bSJason Wang struct virtnet_info { 87e9d7417bSJason Wang struct virtio_device *vdev; 88e9d7417bSJason Wang struct virtqueue *cvq; 89e9d7417bSJason Wang struct net_device *dev; 90986a4f4dSJason Wang struct send_queue *sq; 91986a4f4dSJason Wang struct receive_queue *rq; 92e9d7417bSJason Wang unsigned int status; 93e9d7417bSJason Wang 94986a4f4dSJason Wang /* Max # of queue pairs supported by the device */ 95986a4f4dSJason Wang u16 max_queue_pairs; 96986a4f4dSJason Wang 97986a4f4dSJason Wang /* # of queue pairs currently used by the driver */ 98986a4f4dSJason Wang u16 curr_queue_pairs; 99986a4f4dSJason Wang 10097402b96SHerbert Xu /* I like... big packets and I cannot lie! */ 10197402b96SHerbert Xu bool big_packets; 10297402b96SHerbert Xu 1033f2c31d9SMark McLoughlin /* Host will merge rx buffers for big packets (shake it! shake it!) */ 1043f2c31d9SMark McLoughlin bool mergeable_rx_bufs; 1053f2c31d9SMark McLoughlin 106986a4f4dSJason Wang /* Has control virtqueue */ 107986a4f4dSJason Wang bool has_cvq; 108986a4f4dSJason Wang 109586d17c5SJason Wang /* enable config space updates */ 110586d17c5SJason Wang bool config_enable; 111586d17c5SJason Wang 1123fa2a1dfSstephen hemminger /* Active statistics */ 1133fa2a1dfSstephen hemminger struct virtnet_stats __percpu *stats; 1143fa2a1dfSstephen hemminger 1153161e453SRusty Russell /* Work struct for refilling if we run low on memory. */ 1163161e453SRusty Russell struct delayed_work refill; 1173161e453SRusty Russell 118586d17c5SJason Wang /* Work struct for config space updates */ 119586d17c5SJason Wang struct work_struct config_work; 120586d17c5SJason Wang 121586d17c5SJason Wang /* Lock for config space updates */ 122586d17c5SJason Wang struct mutex config_lock; 123986a4f4dSJason Wang 124986a4f4dSJason Wang /* Does the affinity hint is set for virtqueues? */ 125986a4f4dSJason Wang bool affinity_hint_set; 126296f96fcSRusty Russell }; 127296f96fcSRusty Russell 128b3f24698SRusty Russell struct skb_vnet_hdr { 129b3f24698SRusty Russell union { 130b3f24698SRusty Russell struct virtio_net_hdr hdr; 131b3f24698SRusty Russell struct virtio_net_hdr_mrg_rxbuf mhdr; 132b3f24698SRusty Russell }; 13348925e37SRusty Russell unsigned int num_sg; 134b3f24698SRusty Russell }; 135b3f24698SRusty Russell 1369ab86bbcSShirley Ma struct padded_vnet_hdr { 1379ab86bbcSShirley Ma struct virtio_net_hdr hdr; 1389ab86bbcSShirley Ma /* 1399ab86bbcSShirley Ma * virtio_net_hdr should be in a separated sg buffer because of a 1409ab86bbcSShirley Ma * QEMU bug, and data sg buffer shares same page with this header sg. 1419ab86bbcSShirley Ma * This padding makes next sg 16 byte aligned after virtio_net_hdr. 1429ab86bbcSShirley Ma */ 1439ab86bbcSShirley Ma char padding[6]; 1449ab86bbcSShirley Ma }; 1459ab86bbcSShirley Ma 146986a4f4dSJason Wang /* Converting between virtqueue no. and kernel tx/rx queue no. 147986a4f4dSJason Wang * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 148986a4f4dSJason Wang */ 149986a4f4dSJason Wang static int vq2txq(struct virtqueue *vq) 150986a4f4dSJason Wang { 151986a4f4dSJason Wang return (virtqueue_get_queue_index(vq) - 1) / 2; 152986a4f4dSJason Wang } 153986a4f4dSJason Wang 154986a4f4dSJason Wang static int txq2vq(int txq) 155986a4f4dSJason Wang { 156986a4f4dSJason Wang return txq * 2 + 1; 157986a4f4dSJason Wang } 158986a4f4dSJason Wang 159986a4f4dSJason Wang static int vq2rxq(struct virtqueue *vq) 160986a4f4dSJason Wang { 161986a4f4dSJason Wang return virtqueue_get_queue_index(vq) / 2; 162986a4f4dSJason Wang } 163986a4f4dSJason Wang 164986a4f4dSJason Wang static int rxq2vq(int rxq) 165986a4f4dSJason Wang { 166986a4f4dSJason Wang return rxq * 2; 167986a4f4dSJason Wang } 168986a4f4dSJason Wang 169b3f24698SRusty Russell static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) 170296f96fcSRusty Russell { 171b3f24698SRusty Russell return (struct skb_vnet_hdr *)skb->cb; 172296f96fcSRusty Russell } 173296f96fcSRusty Russell 1749ab86bbcSShirley Ma /* 1759ab86bbcSShirley Ma * private is used to chain pages for big packets, put the whole 1769ab86bbcSShirley Ma * most recent used list in the beginning for reuse 1779ab86bbcSShirley Ma */ 178e9d7417bSJason Wang static void give_pages(struct receive_queue *rq, struct page *page) 179fb6813f4SRusty Russell { 1809ab86bbcSShirley Ma struct page *end; 1819ab86bbcSShirley Ma 182e9d7417bSJason Wang /* Find end of list, sew whole thing into vi->rq.pages. */ 1839ab86bbcSShirley Ma for (end = page; end->private; end = (struct page *)end->private); 184e9d7417bSJason Wang end->private = (unsigned long)rq->pages; 185e9d7417bSJason Wang rq->pages = page; 186fb6813f4SRusty Russell } 187fb6813f4SRusty Russell 188e9d7417bSJason Wang static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 189fb6813f4SRusty Russell { 190e9d7417bSJason Wang struct page *p = rq->pages; 191fb6813f4SRusty Russell 1929ab86bbcSShirley Ma if (p) { 193e9d7417bSJason Wang rq->pages = (struct page *)p->private; 1949ab86bbcSShirley Ma /* clear private here, it is used to chain pages */ 1959ab86bbcSShirley Ma p->private = 0; 1969ab86bbcSShirley Ma } else 197fb6813f4SRusty Russell p = alloc_page(gfp_mask); 198fb6813f4SRusty Russell return p; 199fb6813f4SRusty Russell } 200fb6813f4SRusty Russell 201e9d7417bSJason Wang static void skb_xmit_done(struct virtqueue *vq) 202296f96fcSRusty Russell { 203e9d7417bSJason Wang struct virtnet_info *vi = vq->vdev->priv; 204296f96fcSRusty Russell 2052cb9c6baSRusty Russell /* Suppress further interrupts. */ 206e9d7417bSJason Wang virtqueue_disable_cb(vq); 20711a3a154SRusty Russell 208363f1514SRusty Russell /* We were probably waiting for more output buffers. */ 209986a4f4dSJason Wang netif_wake_subqueue(vi->dev, vq2txq(vq)); 210296f96fcSRusty Russell } 211296f96fcSRusty Russell 2129ab86bbcSShirley Ma static void set_skb_frag(struct sk_buff *skb, struct page *page, 2139ab86bbcSShirley Ma unsigned int offset, unsigned int *len) 214296f96fcSRusty Russell { 2158a59a7b9SKrishna Kumar int size = min((unsigned)PAGE_SIZE - offset, *len); 2169ab86bbcSShirley Ma int i = skb_shinfo(skb)->nr_frags; 217296f96fcSRusty Russell 2188a59a7b9SKrishna Kumar __skb_fill_page_desc(skb, i, page, offset, size); 2199ab86bbcSShirley Ma 2208a59a7b9SKrishna Kumar skb->data_len += size; 2218a59a7b9SKrishna Kumar skb->len += size; 2224b727361SEric Dumazet skb->truesize += PAGE_SIZE; 2239ab86bbcSShirley Ma skb_shinfo(skb)->nr_frags++; 2248a59a7b9SKrishna Kumar *len -= size; 225296f96fcSRusty Russell } 2263f2c31d9SMark McLoughlin 2273464645aSMike Waychison /* Called from bottom half context */ 228e9d7417bSJason Wang static struct sk_buff *page_to_skb(struct receive_queue *rq, 2299ab86bbcSShirley Ma struct page *page, unsigned int len) 2309ab86bbcSShirley Ma { 231e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 2329ab86bbcSShirley Ma struct sk_buff *skb; 2339ab86bbcSShirley Ma struct skb_vnet_hdr *hdr; 2349ab86bbcSShirley Ma unsigned int copy, hdr_len, offset; 2359ab86bbcSShirley Ma char *p; 2369ab86bbcSShirley Ma 2379ab86bbcSShirley Ma p = page_address(page); 2389ab86bbcSShirley Ma 2399ab86bbcSShirley Ma /* copy small packet so we can reuse these pages for small data */ 2409ab86bbcSShirley Ma skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); 2419ab86bbcSShirley Ma if (unlikely(!skb)) 2429ab86bbcSShirley Ma return NULL; 2439ab86bbcSShirley Ma 2449ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 2459ab86bbcSShirley Ma 2463f2c31d9SMark McLoughlin if (vi->mergeable_rx_bufs) { 2479ab86bbcSShirley Ma hdr_len = sizeof hdr->mhdr; 2489ab86bbcSShirley Ma offset = hdr_len; 2499ab86bbcSShirley Ma } else { 2509ab86bbcSShirley Ma hdr_len = sizeof hdr->hdr; 2519ab86bbcSShirley Ma offset = sizeof(struct padded_vnet_hdr); 2529ab86bbcSShirley Ma } 2533f2c31d9SMark McLoughlin 2549ab86bbcSShirley Ma memcpy(hdr, p, hdr_len); 2553f2c31d9SMark McLoughlin 2569ab86bbcSShirley Ma len -= hdr_len; 2579ab86bbcSShirley Ma p += offset; 2583f2c31d9SMark McLoughlin 2593f2c31d9SMark McLoughlin copy = len; 2603f2c31d9SMark McLoughlin if (copy > skb_tailroom(skb)) 2613f2c31d9SMark McLoughlin copy = skb_tailroom(skb); 2623f2c31d9SMark McLoughlin memcpy(skb_put(skb, copy), p, copy); 2633f2c31d9SMark McLoughlin 2643f2c31d9SMark McLoughlin len -= copy; 2659ab86bbcSShirley Ma offset += copy; 2663f2c31d9SMark McLoughlin 267e878d78bSSasha Levin /* 268e878d78bSSasha Levin * Verify that we can indeed put this data into a skb. 269e878d78bSSasha Levin * This is here to handle cases when the device erroneously 270e878d78bSSasha Levin * tries to receive more than is possible. This is usually 271e878d78bSSasha Levin * the case of a broken device. 272e878d78bSSasha Levin */ 273e878d78bSSasha Levin if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 274be443899SAmerigo Wang net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 275e878d78bSSasha Levin dev_kfree_skb(skb); 276e878d78bSSasha Levin return NULL; 277e878d78bSSasha Levin } 278e878d78bSSasha Levin 2799ab86bbcSShirley Ma while (len) { 2809ab86bbcSShirley Ma set_skb_frag(skb, page, offset, &len); 2819ab86bbcSShirley Ma page = (struct page *)page->private; 2829ab86bbcSShirley Ma offset = 0; 2833f2c31d9SMark McLoughlin } 2843f2c31d9SMark McLoughlin 2859ab86bbcSShirley Ma if (page) 286e9d7417bSJason Wang give_pages(rq, page); 2873f2c31d9SMark McLoughlin 2889ab86bbcSShirley Ma return skb; 2899ab86bbcSShirley Ma } 2909ab86bbcSShirley Ma 291e9d7417bSJason Wang static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) 2929ab86bbcSShirley Ma { 2939ab86bbcSShirley Ma struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 2949ab86bbcSShirley Ma struct page *page; 2959ab86bbcSShirley Ma int num_buf, i, len; 2969ab86bbcSShirley Ma 2979ab86bbcSShirley Ma num_buf = hdr->mhdr.num_buffers; 2989ab86bbcSShirley Ma while (--num_buf) { 2993f2c31d9SMark McLoughlin i = skb_shinfo(skb)->nr_frags; 3003f2c31d9SMark McLoughlin if (i >= MAX_SKB_FRAGS) { 3019ab86bbcSShirley Ma pr_debug("%s: packet too long\n", skb->dev->name); 3029ab86bbcSShirley Ma skb->dev->stats.rx_length_errors++; 3039ab86bbcSShirley Ma return -EINVAL; 3043f2c31d9SMark McLoughlin } 305e9d7417bSJason Wang page = virtqueue_get_buf(rq->vq, &len); 3069ab86bbcSShirley Ma if (!page) { 3073f2c31d9SMark McLoughlin pr_debug("%s: rx error: %d buffers missing\n", 3089ab86bbcSShirley Ma skb->dev->name, hdr->mhdr.num_buffers); 3099ab86bbcSShirley Ma skb->dev->stats.rx_length_errors++; 3109ab86bbcSShirley Ma return -EINVAL; 3113f2c31d9SMark McLoughlin } 3123fa2a1dfSstephen hemminger 3133f2c31d9SMark McLoughlin if (len > PAGE_SIZE) 3143f2c31d9SMark McLoughlin len = PAGE_SIZE; 3153f2c31d9SMark McLoughlin 3169ab86bbcSShirley Ma set_skb_frag(skb, page, 0, &len); 3179ab86bbcSShirley Ma 318e9d7417bSJason Wang --rq->num; 3193f2c31d9SMark McLoughlin } 3209ab86bbcSShirley Ma return 0; 3219ab86bbcSShirley Ma } 3229ab86bbcSShirley Ma 323e9d7417bSJason Wang static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) 3249ab86bbcSShirley Ma { 325e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 326e9d7417bSJason Wang struct net_device *dev = vi->dev; 32758472a76SEric Dumazet struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 3289ab86bbcSShirley Ma struct sk_buff *skb; 3299ab86bbcSShirley Ma struct page *page; 3309ab86bbcSShirley Ma struct skb_vnet_hdr *hdr; 3319ab86bbcSShirley Ma 3329ab86bbcSShirley Ma if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 3339ab86bbcSShirley Ma pr_debug("%s: short packet %i\n", dev->name, len); 3349ab86bbcSShirley Ma dev->stats.rx_length_errors++; 3359ab86bbcSShirley Ma if (vi->mergeable_rx_bufs || vi->big_packets) 336e9d7417bSJason Wang give_pages(rq, buf); 3379ab86bbcSShirley Ma else 3389ab86bbcSShirley Ma dev_kfree_skb(buf); 3399ab86bbcSShirley Ma return; 3409ab86bbcSShirley Ma } 3419ab86bbcSShirley Ma 3429ab86bbcSShirley Ma if (!vi->mergeable_rx_bufs && !vi->big_packets) { 3439ab86bbcSShirley Ma skb = buf; 3449ab86bbcSShirley Ma len -= sizeof(struct virtio_net_hdr); 3459ab86bbcSShirley Ma skb_trim(skb, len); 3463f2c31d9SMark McLoughlin } else { 3479ab86bbcSShirley Ma page = buf; 348e9d7417bSJason Wang skb = page_to_skb(rq, page, len); 3499ab86bbcSShirley Ma if (unlikely(!skb)) { 35097402b96SHerbert Xu dev->stats.rx_dropped++; 351e9d7417bSJason Wang give_pages(rq, page); 3529ab86bbcSShirley Ma return; 3539ab86bbcSShirley Ma } 3549ab86bbcSShirley Ma if (vi->mergeable_rx_bufs) 355e9d7417bSJason Wang if (receive_mergeable(rq, skb)) { 3569ab86bbcSShirley Ma dev_kfree_skb(skb); 3579ab86bbcSShirley Ma return; 35897402b96SHerbert Xu } 3593f2c31d9SMark McLoughlin } 3603f2c31d9SMark McLoughlin 3619ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 3623fa2a1dfSstephen hemminger 36383a27052SEric Dumazet u64_stats_update_begin(&stats->rx_syncp); 3643fa2a1dfSstephen hemminger stats->rx_bytes += skb->len; 3653fa2a1dfSstephen hemminger stats->rx_packets++; 36683a27052SEric Dumazet u64_stats_update_end(&stats->rx_syncp); 367296f96fcSRusty Russell 368b3f24698SRusty Russell if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 369296f96fcSRusty Russell pr_debug("Needs csum!\n"); 370b3f24698SRusty Russell if (!skb_partial_csum_set(skb, 371b3f24698SRusty Russell hdr->hdr.csum_start, 372b3f24698SRusty Russell hdr->hdr.csum_offset)) 373296f96fcSRusty Russell goto frame_err; 37410a8d94aSJason Wang } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { 37510a8d94aSJason Wang skb->ip_summed = CHECKSUM_UNNECESSARY; 376296f96fcSRusty Russell } 377296f96fcSRusty Russell 37823cde76dSMark McLoughlin skb->protocol = eth_type_trans(skb, dev); 37923cde76dSMark McLoughlin pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 38023cde76dSMark McLoughlin ntohs(skb->protocol), skb->len, skb->pkt_type); 38123cde76dSMark McLoughlin 382b3f24698SRusty Russell if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 383296f96fcSRusty Russell pr_debug("GSO!\n"); 384b3f24698SRusty Russell switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 385296f96fcSRusty Russell case VIRTIO_NET_HDR_GSO_TCPV4: 386296f96fcSRusty Russell skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 387296f96fcSRusty Russell break; 388296f96fcSRusty Russell case VIRTIO_NET_HDR_GSO_UDP: 389296f96fcSRusty Russell skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 390296f96fcSRusty Russell break; 391296f96fcSRusty Russell case VIRTIO_NET_HDR_GSO_TCPV6: 392296f96fcSRusty Russell skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 393296f96fcSRusty Russell break; 394296f96fcSRusty Russell default: 395be443899SAmerigo Wang net_warn_ratelimited("%s: bad gso type %u.\n", 396b3f24698SRusty Russell dev->name, hdr->hdr.gso_type); 397296f96fcSRusty Russell goto frame_err; 398296f96fcSRusty Russell } 399296f96fcSRusty Russell 400b3f24698SRusty Russell if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) 40134a48579SRusty Russell skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 40234a48579SRusty Russell 403b3f24698SRusty Russell skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; 404296f96fcSRusty Russell if (skb_shinfo(skb)->gso_size == 0) { 405be443899SAmerigo Wang net_warn_ratelimited("%s: zero gso size.\n", dev->name); 406296f96fcSRusty Russell goto frame_err; 407296f96fcSRusty Russell } 408296f96fcSRusty Russell 409296f96fcSRusty Russell /* Header must be checked, and gso_segs computed. */ 410296f96fcSRusty Russell skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 411296f96fcSRusty Russell skb_shinfo(skb)->gso_segs = 0; 412296f96fcSRusty Russell } 413296f96fcSRusty Russell 414296f96fcSRusty Russell netif_receive_skb(skb); 415296f96fcSRusty Russell return; 416296f96fcSRusty Russell 417296f96fcSRusty Russell frame_err: 418296f96fcSRusty Russell dev->stats.rx_frame_errors++; 419296f96fcSRusty Russell dev_kfree_skb(skb); 420296f96fcSRusty Russell } 421296f96fcSRusty Russell 422e9d7417bSJason Wang static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) 423296f96fcSRusty Russell { 424e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 425296f96fcSRusty Russell struct sk_buff *skb; 426b3f24698SRusty Russell struct skb_vnet_hdr *hdr; 4279ab86bbcSShirley Ma int err; 4283f2c31d9SMark McLoughlin 4293464645aSMike Waychison skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); 4309ab86bbcSShirley Ma if (unlikely(!skb)) 4319ab86bbcSShirley Ma return -ENOMEM; 432296f96fcSRusty Russell 433296f96fcSRusty Russell skb_put(skb, MAX_PACKET_LEN); 4343f2c31d9SMark McLoughlin 4353f2c31d9SMark McLoughlin hdr = skb_vnet_hdr(skb); 436e9d7417bSJason Wang sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); 43797402b96SHerbert Xu 438e9d7417bSJason Wang skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); 43997402b96SHerbert Xu 440e9d7417bSJason Wang err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp); 4419ab86bbcSShirley Ma if (err < 0) 4429ab86bbcSShirley Ma dev_kfree_skb(skb); 44397402b96SHerbert Xu 4449ab86bbcSShirley Ma return err; 44597402b96SHerbert Xu } 44697402b96SHerbert Xu 447e9d7417bSJason Wang static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) 4489ab86bbcSShirley Ma { 4499ab86bbcSShirley Ma struct page *first, *list = NULL; 4509ab86bbcSShirley Ma char *p; 4519ab86bbcSShirley Ma int i, err, offset; 452296f96fcSRusty Russell 453e9d7417bSJason Wang /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ 4549ab86bbcSShirley Ma for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 455e9d7417bSJason Wang first = get_a_page(rq, gfp); 4569ab86bbcSShirley Ma if (!first) { 4579ab86bbcSShirley Ma if (list) 458e9d7417bSJason Wang give_pages(rq, list); 4599ab86bbcSShirley Ma return -ENOMEM; 460296f96fcSRusty Russell } 461e9d7417bSJason Wang sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 4629ab86bbcSShirley Ma 4639ab86bbcSShirley Ma /* chain new page in list head to match sg */ 4649ab86bbcSShirley Ma first->private = (unsigned long)list; 4659ab86bbcSShirley Ma list = first; 4669ab86bbcSShirley Ma } 4679ab86bbcSShirley Ma 468e9d7417bSJason Wang first = get_a_page(rq, gfp); 4699ab86bbcSShirley Ma if (!first) { 470e9d7417bSJason Wang give_pages(rq, list); 4719ab86bbcSShirley Ma return -ENOMEM; 4729ab86bbcSShirley Ma } 4739ab86bbcSShirley Ma p = page_address(first); 4749ab86bbcSShirley Ma 475e9d7417bSJason Wang /* rq->sg[0], rq->sg[1] share the same page */ 476e9d7417bSJason Wang /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ 477e9d7417bSJason Wang sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); 4789ab86bbcSShirley Ma 479e9d7417bSJason Wang /* rq->sg[1] for data packet, from offset */ 4809ab86bbcSShirley Ma offset = sizeof(struct padded_vnet_hdr); 481e9d7417bSJason Wang sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 4829ab86bbcSShirley Ma 4839ab86bbcSShirley Ma /* chain first in list head */ 4849ab86bbcSShirley Ma first->private = (unsigned long)list; 485e9d7417bSJason Wang err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2, 486aa989f5eSMichael S. Tsirkin first, gfp); 4879ab86bbcSShirley Ma if (err < 0) 488e9d7417bSJason Wang give_pages(rq, first); 4899ab86bbcSShirley Ma 4909ab86bbcSShirley Ma return err; 4919ab86bbcSShirley Ma } 4929ab86bbcSShirley Ma 493e9d7417bSJason Wang static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) 4949ab86bbcSShirley Ma { 4959ab86bbcSShirley Ma struct page *page; 4969ab86bbcSShirley Ma int err; 4979ab86bbcSShirley Ma 498e9d7417bSJason Wang page = get_a_page(rq, gfp); 4999ab86bbcSShirley Ma if (!page) 5009ab86bbcSShirley Ma return -ENOMEM; 5019ab86bbcSShirley Ma 502e9d7417bSJason Wang sg_init_one(rq->sg, page_address(page), PAGE_SIZE); 5039ab86bbcSShirley Ma 504e9d7417bSJason Wang err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp); 5059ab86bbcSShirley Ma if (err < 0) 506e9d7417bSJason Wang give_pages(rq, page); 5079ab86bbcSShirley Ma 5089ab86bbcSShirley Ma return err; 509296f96fcSRusty Russell } 510296f96fcSRusty Russell 511b2baed69SRusty Russell /* 512b2baed69SRusty Russell * Returns false if we couldn't fill entirely (OOM). 513b2baed69SRusty Russell * 514b2baed69SRusty Russell * Normally run in the receive path, but can also be run from ndo_open 515b2baed69SRusty Russell * before we're receiving packets, or from refill_work which is 516b2baed69SRusty Russell * careful to disable receiving (using napi_disable). 517b2baed69SRusty Russell */ 518e9d7417bSJason Wang static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) 5193f2c31d9SMark McLoughlin { 520e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 5213f2c31d9SMark McLoughlin int err; 5221788f495SMichael S. Tsirkin bool oom; 5233f2c31d9SMark McLoughlin 5240aea51c3SAmit Shah do { 5259ab86bbcSShirley Ma if (vi->mergeable_rx_bufs) 526e9d7417bSJason Wang err = add_recvbuf_mergeable(rq, gfp); 5279ab86bbcSShirley Ma else if (vi->big_packets) 528e9d7417bSJason Wang err = add_recvbuf_big(rq, gfp); 5299ab86bbcSShirley Ma else 530e9d7417bSJason Wang err = add_recvbuf_small(rq, gfp); 5313f2c31d9SMark McLoughlin 5321788f495SMichael S. Tsirkin oom = err == -ENOMEM; 5331788f495SMichael S. Tsirkin if (err < 0) 5343f2c31d9SMark McLoughlin break; 535e9d7417bSJason Wang ++rq->num; 5360aea51c3SAmit Shah } while (err > 0); 537e9d7417bSJason Wang if (unlikely(rq->num > rq->max)) 538e9d7417bSJason Wang rq->max = rq->num; 539e9d7417bSJason Wang virtqueue_kick(rq->vq); 5403161e453SRusty Russell return !oom; 5413f2c31d9SMark McLoughlin } 5423f2c31d9SMark McLoughlin 54318445c4dSRusty Russell static void skb_recv_done(struct virtqueue *rvq) 544296f96fcSRusty Russell { 545296f96fcSRusty Russell struct virtnet_info *vi = rvq->vdev->priv; 546986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 547e9d7417bSJason Wang 54818445c4dSRusty Russell /* Schedule NAPI, Suppress further interrupts if successful. */ 549e9d7417bSJason Wang if (napi_schedule_prep(&rq->napi)) { 5501915a712SMichael S. Tsirkin virtqueue_disable_cb(rvq); 551e9d7417bSJason Wang __napi_schedule(&rq->napi); 55218445c4dSRusty Russell } 553296f96fcSRusty Russell } 554296f96fcSRusty Russell 555e9d7417bSJason Wang static void virtnet_napi_enable(struct receive_queue *rq) 5563e9d08ecSBruce Rogers { 557e9d7417bSJason Wang napi_enable(&rq->napi); 5583e9d08ecSBruce Rogers 5593e9d08ecSBruce Rogers /* If all buffers were filled by other side before we napi_enabled, we 5603e9d08ecSBruce Rogers * won't get another interrupt, so process any outstanding packets 5613e9d08ecSBruce Rogers * now. virtnet_poll wants re-enable the queue, so we disable here. 5623e9d08ecSBruce Rogers * We synchronize against interrupts via NAPI_STATE_SCHED */ 563e9d7417bSJason Wang if (napi_schedule_prep(&rq->napi)) { 564e9d7417bSJason Wang virtqueue_disable_cb(rq->vq); 565ec13ee80SMichael S. Tsirkin local_bh_disable(); 566e9d7417bSJason Wang __napi_schedule(&rq->napi); 567ec13ee80SMichael S. Tsirkin local_bh_enable(); 5683e9d08ecSBruce Rogers } 5693e9d08ecSBruce Rogers } 5703e9d08ecSBruce Rogers 5713161e453SRusty Russell static void refill_work(struct work_struct *work) 5723161e453SRusty Russell { 573e9d7417bSJason Wang struct virtnet_info *vi = 574e9d7417bSJason Wang container_of(work, struct virtnet_info, refill.work); 5753161e453SRusty Russell bool still_empty; 576986a4f4dSJason Wang int i; 5773161e453SRusty Russell 578986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 579986a4f4dSJason Wang struct receive_queue *rq = &vi->rq[i]; 580986a4f4dSJason Wang 581986a4f4dSJason Wang napi_disable(&rq->napi); 582986a4f4dSJason Wang still_empty = !try_fill_recv(rq, GFP_KERNEL); 583986a4f4dSJason Wang virtnet_napi_enable(rq); 5843161e453SRusty Russell 5853161e453SRusty Russell /* In theory, this can happen: if we don't get any buffers in 586986a4f4dSJason Wang * we will *never* try to fill again. 587986a4f4dSJason Wang */ 5883161e453SRusty Russell if (still_empty) 5893b07e9caSTejun Heo schedule_delayed_work(&vi->refill, HZ/2); 5903161e453SRusty Russell } 591986a4f4dSJason Wang } 5923161e453SRusty Russell 593296f96fcSRusty Russell static int virtnet_poll(struct napi_struct *napi, int budget) 594296f96fcSRusty Russell { 595e9d7417bSJason Wang struct receive_queue *rq = 596e9d7417bSJason Wang container_of(napi, struct receive_queue, napi); 597e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 5989ab86bbcSShirley Ma void *buf; 599296f96fcSRusty Russell unsigned int len, received = 0; 600296f96fcSRusty Russell 601296f96fcSRusty Russell again: 602296f96fcSRusty Russell while (received < budget && 603e9d7417bSJason Wang (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 604e9d7417bSJason Wang receive_buf(rq, buf, len); 605e9d7417bSJason Wang --rq->num; 606296f96fcSRusty Russell received++; 607296f96fcSRusty Russell } 608296f96fcSRusty Russell 609e9d7417bSJason Wang if (rq->num < rq->max / 2) { 610e9d7417bSJason Wang if (!try_fill_recv(rq, GFP_ATOMIC)) 6113b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 6123161e453SRusty Russell } 613296f96fcSRusty Russell 6148329d98eSRusty Russell /* Out of packets? */ 6158329d98eSRusty Russell if (received < budget) { 616288379f0SBen Hutchings napi_complete(napi); 617e9d7417bSJason Wang if (unlikely(!virtqueue_enable_cb(rq->vq)) && 6188e95a202SJoe Perches napi_schedule_prep(napi)) { 619e9d7417bSJason Wang virtqueue_disable_cb(rq->vq); 620288379f0SBen Hutchings __napi_schedule(napi); 621296f96fcSRusty Russell goto again; 622296f96fcSRusty Russell } 6234265f161SChristian Borntraeger } 624296f96fcSRusty Russell 625296f96fcSRusty Russell return received; 626296f96fcSRusty Russell } 627296f96fcSRusty Russell 628986a4f4dSJason Wang static int virtnet_open(struct net_device *dev) 629986a4f4dSJason Wang { 630986a4f4dSJason Wang struct virtnet_info *vi = netdev_priv(dev); 631986a4f4dSJason Wang int i; 632986a4f4dSJason Wang 633986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 634986a4f4dSJason Wang /* Make sure we have some buffers: if oom use wq. */ 635986a4f4dSJason Wang if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 636986a4f4dSJason Wang schedule_delayed_work(&vi->refill, 0); 637986a4f4dSJason Wang virtnet_napi_enable(&vi->rq[i]); 638986a4f4dSJason Wang } 639986a4f4dSJason Wang 640986a4f4dSJason Wang return 0; 641986a4f4dSJason Wang } 642986a4f4dSJason Wang 643e9d7417bSJason Wang static unsigned int free_old_xmit_skbs(struct send_queue *sq) 644296f96fcSRusty Russell { 645296f96fcSRusty Russell struct sk_buff *skb; 64648925e37SRusty Russell unsigned int len, tot_sgs = 0; 647e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv; 64858472a76SEric Dumazet struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 649296f96fcSRusty Russell 650e9d7417bSJason Wang while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 651296f96fcSRusty Russell pr_debug("Sent skb %p\n", skb); 6523fa2a1dfSstephen hemminger 65383a27052SEric Dumazet u64_stats_update_begin(&stats->tx_syncp); 6543fa2a1dfSstephen hemminger stats->tx_bytes += skb->len; 6553fa2a1dfSstephen hemminger stats->tx_packets++; 65683a27052SEric Dumazet u64_stats_update_end(&stats->tx_syncp); 6573fa2a1dfSstephen hemminger 65848925e37SRusty Russell tot_sgs += skb_vnet_hdr(skb)->num_sg; 659ed79bab8SEric Dumazet dev_kfree_skb_any(skb); 660296f96fcSRusty Russell } 66148925e37SRusty Russell return tot_sgs; 662296f96fcSRusty Russell } 663296f96fcSRusty Russell 664e9d7417bSJason Wang static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 665296f96fcSRusty Russell { 666b3f24698SRusty Russell struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 667296f96fcSRusty Russell const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 668e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv; 669296f96fcSRusty Russell 670e174961cSJohannes Berg pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 671296f96fcSRusty Russell 672296f96fcSRusty Russell if (skb->ip_summed == CHECKSUM_PARTIAL) { 673b3f24698SRusty Russell hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 67455508d60SMichał Mirosław hdr->hdr.csum_start = skb_checksum_start_offset(skb); 675b3f24698SRusty Russell hdr->hdr.csum_offset = skb->csum_offset; 676296f96fcSRusty Russell } else { 677b3f24698SRusty Russell hdr->hdr.flags = 0; 678b3f24698SRusty Russell hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; 679296f96fcSRusty Russell } 680296f96fcSRusty Russell 681296f96fcSRusty Russell if (skb_is_gso(skb)) { 682b3f24698SRusty Russell hdr->hdr.hdr_len = skb_headlen(skb); 683b3f24698SRusty Russell hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; 68434a48579SRusty Russell if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 685b3f24698SRusty Russell hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 686296f96fcSRusty Russell else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 687b3f24698SRusty Russell hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 688296f96fcSRusty Russell else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 689b3f24698SRusty Russell hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; 690296f96fcSRusty Russell else 691296f96fcSRusty Russell BUG(); 69234a48579SRusty Russell if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 693b3f24698SRusty Russell hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; 694296f96fcSRusty Russell } else { 695b3f24698SRusty Russell hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; 696b3f24698SRusty Russell hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; 697296f96fcSRusty Russell } 698296f96fcSRusty Russell 699b3f24698SRusty Russell hdr->mhdr.num_buffers = 0; 7003f2c31d9SMark McLoughlin 7013f2c31d9SMark McLoughlin /* Encode metadata header at front. */ 7023f2c31d9SMark McLoughlin if (vi->mergeable_rx_bufs) 703e9d7417bSJason Wang sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr); 7043f2c31d9SMark McLoughlin else 705e9d7417bSJason Wang sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); 7063f2c31d9SMark McLoughlin 707e9d7417bSJason Wang hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; 708e9d7417bSJason Wang return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg, 709f96fde41SRusty Russell 0, skb, GFP_ATOMIC); 71011a3a154SRusty Russell } 71111a3a154SRusty Russell 712424efe9cSStephen Hemminger static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 71399ffc696SRusty Russell { 71499ffc696SRusty Russell struct virtnet_info *vi = netdev_priv(dev); 715986a4f4dSJason Wang int qnum = skb_get_queue_mapping(skb); 716986a4f4dSJason Wang struct send_queue *sq = &vi->sq[qnum]; 71748925e37SRusty Russell int capacity; 7182cb9c6baSRusty Russell 7192cb9c6baSRusty Russell /* Free up any pending old buffers before queueing new ones. */ 720e9d7417bSJason Wang free_old_xmit_skbs(sq); 72199ffc696SRusty Russell 72203f191baSMichael S. Tsirkin /* Try to transmit */ 723e9d7417bSJason Wang capacity = xmit_skb(sq, skb); 72499ffc696SRusty Russell 72548925e37SRusty Russell /* This can happen with OOM and indirect buffers. */ 72648925e37SRusty Russell if (unlikely(capacity < 0)) { 72758eba97dSRusty Russell if (likely(capacity == -ENOMEM)) { 72831304165STorsten Kaiser if (net_ratelimit()) 72958eba97dSRusty Russell dev_warn(&dev->dev, 730986a4f4dSJason Wang "TXQ (%d) failure: out of memory\n", 731986a4f4dSJason Wang qnum); 73258eba97dSRusty Russell } else { 73358eba97dSRusty Russell dev->stats.tx_fifo_errors++; 7342e57b79cSRick Jones if (net_ratelimit()) 73558eba97dSRusty Russell dev_warn(&dev->dev, 736986a4f4dSJason Wang "Unexpected TXQ (%d) failure: %d\n", 737986a4f4dSJason Wang qnum, capacity); 7382cb9c6baSRusty Russell } 73958eba97dSRusty Russell dev->stats.tx_dropped++; 74058eba97dSRusty Russell kfree_skb(skb); 74158eba97dSRusty Russell return NETDEV_TX_OK; 742296f96fcSRusty Russell } 743e9d7417bSJason Wang virtqueue_kick(sq->vq); 74403f191baSMichael S. Tsirkin 74548925e37SRusty Russell /* Don't wait up for transmitted skbs to be freed. */ 74648925e37SRusty Russell skb_orphan(skb); 74748925e37SRusty Russell nf_reset(skb); 74848925e37SRusty Russell 74948925e37SRusty Russell /* Apparently nice girls don't return TX_BUSY; stop the queue 75048925e37SRusty Russell * before it gets out of hand. Naturally, this wastes entries. */ 75148925e37SRusty Russell if (capacity < 2+MAX_SKB_FRAGS) { 752986a4f4dSJason Wang netif_stop_subqueue(dev, qnum); 753e9d7417bSJason Wang if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 75448925e37SRusty Russell /* More just got used, free them then recheck. */ 755e9d7417bSJason Wang capacity += free_old_xmit_skbs(sq); 75648925e37SRusty Russell if (capacity >= 2+MAX_SKB_FRAGS) { 757986a4f4dSJason Wang netif_start_subqueue(dev, qnum); 758e9d7417bSJason Wang virtqueue_disable_cb(sq->vq); 75948925e37SRusty Russell } 76048925e37SRusty Russell } 76148925e37SRusty Russell } 76248925e37SRusty Russell 76348925e37SRusty Russell return NETDEV_TX_OK; 76448925e37SRusty Russell } 76548925e37SRusty Russell 7669c46f6d4SAlex Williamson static int virtnet_set_mac_address(struct net_device *dev, void *p) 7679c46f6d4SAlex Williamson { 7689c46f6d4SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 7699c46f6d4SAlex Williamson struct virtio_device *vdev = vi->vdev; 770f2f2c8b4SJiri Pirko int ret; 7719c46f6d4SAlex Williamson 772f2f2c8b4SJiri Pirko ret = eth_mac_addr(dev, p); 773f2f2c8b4SJiri Pirko if (ret) 774f2f2c8b4SJiri Pirko return ret; 7759c46f6d4SAlex Williamson 77662994b2dSAlex Williamson if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) 7779c46f6d4SAlex Williamson vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), 7789c46f6d4SAlex Williamson dev->dev_addr, dev->addr_len); 7799c46f6d4SAlex Williamson 7809c46f6d4SAlex Williamson return 0; 7819c46f6d4SAlex Williamson } 7829c46f6d4SAlex Williamson 7833fa2a1dfSstephen hemminger static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, 7843fa2a1dfSstephen hemminger struct rtnl_link_stats64 *tot) 7853fa2a1dfSstephen hemminger { 7863fa2a1dfSstephen hemminger struct virtnet_info *vi = netdev_priv(dev); 7873fa2a1dfSstephen hemminger int cpu; 7883fa2a1dfSstephen hemminger unsigned int start; 7893fa2a1dfSstephen hemminger 7903fa2a1dfSstephen hemminger for_each_possible_cpu(cpu) { 79158472a76SEric Dumazet struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); 7923fa2a1dfSstephen hemminger u64 tpackets, tbytes, rpackets, rbytes; 7933fa2a1dfSstephen hemminger 7943fa2a1dfSstephen hemminger do { 795e3906486SKevin Groeneveld start = u64_stats_fetch_begin_bh(&stats->tx_syncp); 7963fa2a1dfSstephen hemminger tpackets = stats->tx_packets; 7973fa2a1dfSstephen hemminger tbytes = stats->tx_bytes; 798e3906486SKevin Groeneveld } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); 79983a27052SEric Dumazet 80083a27052SEric Dumazet do { 801e3906486SKevin Groeneveld start = u64_stats_fetch_begin_bh(&stats->rx_syncp); 8023fa2a1dfSstephen hemminger rpackets = stats->rx_packets; 8033fa2a1dfSstephen hemminger rbytes = stats->rx_bytes; 804e3906486SKevin Groeneveld } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); 8053fa2a1dfSstephen hemminger 8063fa2a1dfSstephen hemminger tot->rx_packets += rpackets; 8073fa2a1dfSstephen hemminger tot->tx_packets += tpackets; 8083fa2a1dfSstephen hemminger tot->rx_bytes += rbytes; 8093fa2a1dfSstephen hemminger tot->tx_bytes += tbytes; 8103fa2a1dfSstephen hemminger } 8113fa2a1dfSstephen hemminger 8123fa2a1dfSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 813021ac8d3SRick Jones tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 8143fa2a1dfSstephen hemminger tot->rx_dropped = dev->stats.rx_dropped; 8153fa2a1dfSstephen hemminger tot->rx_length_errors = dev->stats.rx_length_errors; 8163fa2a1dfSstephen hemminger tot->rx_frame_errors = dev->stats.rx_frame_errors; 8173fa2a1dfSstephen hemminger 8183fa2a1dfSstephen hemminger return tot; 8193fa2a1dfSstephen hemminger } 8203fa2a1dfSstephen hemminger 821da74e89dSAmit Shah #ifdef CONFIG_NET_POLL_CONTROLLER 822da74e89dSAmit Shah static void virtnet_netpoll(struct net_device *dev) 823da74e89dSAmit Shah { 824da74e89dSAmit Shah struct virtnet_info *vi = netdev_priv(dev); 825986a4f4dSJason Wang int i; 826da74e89dSAmit Shah 827986a4f4dSJason Wang for (i = 0; i < vi->curr_queue_pairs; i++) 828986a4f4dSJason Wang napi_schedule(&vi->rq[i].napi); 829da74e89dSAmit Shah } 830da74e89dSAmit Shah #endif 831da74e89dSAmit Shah 8322a41f71dSAlex Williamson /* 8332a41f71dSAlex Williamson * Send command via the control virtqueue and check status. Commands 8342a41f71dSAlex Williamson * supported by the hypervisor, as indicated by feature bits, should 8352a41f71dSAlex Williamson * never fail unless improperly formated. 8362a41f71dSAlex Williamson */ 8372a41f71dSAlex Williamson static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 8382a41f71dSAlex Williamson struct scatterlist *data, int out, int in) 8392a41f71dSAlex Williamson { 84023e258e1SAlex Williamson struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; 8412a41f71dSAlex Williamson struct virtio_net_ctrl_hdr ctrl; 8422a41f71dSAlex Williamson virtio_net_ctrl_ack status = ~0; 8432a41f71dSAlex Williamson unsigned int tmp; 84423e258e1SAlex Williamson int i; 8452a41f71dSAlex Williamson 8460ee904c3SAlexander Beregalov /* Caller should know better */ 8470ee904c3SAlexander Beregalov BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || 8480ee904c3SAlexander Beregalov (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); 8492a41f71dSAlex Williamson 8502a41f71dSAlex Williamson out++; /* Add header */ 8512a41f71dSAlex Williamson in++; /* Add return status */ 8522a41f71dSAlex Williamson 8532a41f71dSAlex Williamson ctrl.class = class; 8542a41f71dSAlex Williamson ctrl.cmd = cmd; 8552a41f71dSAlex Williamson 8562a41f71dSAlex Williamson sg_init_table(sg, out + in); 8572a41f71dSAlex Williamson 8582a41f71dSAlex Williamson sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); 85923e258e1SAlex Williamson for_each_sg(data, s, out + in - 2, i) 86023e258e1SAlex Williamson sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 8612a41f71dSAlex Williamson sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 8622a41f71dSAlex Williamson 863f96fde41SRusty Russell BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); 8642a41f71dSAlex Williamson 8651915a712SMichael S. Tsirkin virtqueue_kick(vi->cvq); 8662a41f71dSAlex Williamson 8672a41f71dSAlex Williamson /* 8682a41f71dSAlex Williamson * Spin for a response, the kick causes an ioport write, trapping 8692a41f71dSAlex Williamson * into the hypervisor, so the request should be handled immediately. 8702a41f71dSAlex Williamson */ 8711915a712SMichael S. Tsirkin while (!virtqueue_get_buf(vi->cvq, &tmp)) 8722a41f71dSAlex Williamson cpu_relax(); 8732a41f71dSAlex Williamson 8742a41f71dSAlex Williamson return status == VIRTIO_NET_OK; 8752a41f71dSAlex Williamson } 8762a41f71dSAlex Williamson 877586d17c5SJason Wang static void virtnet_ack_link_announce(struct virtnet_info *vi) 878586d17c5SJason Wang { 879586d17c5SJason Wang rtnl_lock(); 880586d17c5SJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 881586d17c5SJason Wang VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, 882586d17c5SJason Wang 0, 0)) 883586d17c5SJason Wang dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 884586d17c5SJason Wang rtnl_unlock(); 885586d17c5SJason Wang } 886586d17c5SJason Wang 887986a4f4dSJason Wang static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 888986a4f4dSJason Wang { 889986a4f4dSJason Wang struct scatterlist sg; 890986a4f4dSJason Wang struct virtio_net_ctrl_mq s; 891986a4f4dSJason Wang struct net_device *dev = vi->dev; 892986a4f4dSJason Wang 893986a4f4dSJason Wang if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 894986a4f4dSJason Wang return 0; 895986a4f4dSJason Wang 896986a4f4dSJason Wang s.virtqueue_pairs = queue_pairs; 897986a4f4dSJason Wang sg_init_one(&sg, &s, sizeof(s)); 898986a4f4dSJason Wang 899986a4f4dSJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 900986a4f4dSJason Wang VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){ 901986a4f4dSJason Wang dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 902986a4f4dSJason Wang queue_pairs); 903986a4f4dSJason Wang return -EINVAL; 904986a4f4dSJason Wang } else 905986a4f4dSJason Wang vi->curr_queue_pairs = queue_pairs; 906986a4f4dSJason Wang 907986a4f4dSJason Wang return 0; 908986a4f4dSJason Wang } 909986a4f4dSJason Wang 910296f96fcSRusty Russell static int virtnet_close(struct net_device *dev) 911296f96fcSRusty Russell { 912296f96fcSRusty Russell struct virtnet_info *vi = netdev_priv(dev); 913986a4f4dSJason Wang int i; 914296f96fcSRusty Russell 915b2baed69SRusty Russell /* Make sure refill_work doesn't re-enable napi! */ 916b2baed69SRusty Russell cancel_delayed_work_sync(&vi->refill); 917986a4f4dSJason Wang 918986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 919986a4f4dSJason Wang napi_disable(&vi->rq[i].napi); 920296f96fcSRusty Russell 921296f96fcSRusty Russell return 0; 922296f96fcSRusty Russell } 923296f96fcSRusty Russell 9242af7698eSAlex Williamson static void virtnet_set_rx_mode(struct net_device *dev) 9252af7698eSAlex Williamson { 9262af7698eSAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 927f565a7c2SAlex Williamson struct scatterlist sg[2]; 9282af7698eSAlex Williamson u8 promisc, allmulti; 929f565a7c2SAlex Williamson struct virtio_net_ctrl_mac *mac_data; 930ccffad25SJiri Pirko struct netdev_hw_addr *ha; 93132e7bfc4SJiri Pirko int uc_count; 9324cd24eafSJiri Pirko int mc_count; 933f565a7c2SAlex Williamson void *buf; 934f565a7c2SAlex Williamson int i; 9352af7698eSAlex Williamson 9362af7698eSAlex Williamson /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ 9372af7698eSAlex Williamson if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 9382af7698eSAlex Williamson return; 9392af7698eSAlex Williamson 940f565a7c2SAlex Williamson promisc = ((dev->flags & IFF_PROMISC) != 0); 941f565a7c2SAlex Williamson allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 9422af7698eSAlex Williamson 94323e258e1SAlex Williamson sg_init_one(sg, &promisc, sizeof(promisc)); 9442af7698eSAlex Williamson 9452af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 9462af7698eSAlex Williamson VIRTIO_NET_CTRL_RX_PROMISC, 947f565a7c2SAlex Williamson sg, 1, 0)) 9482af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 9492af7698eSAlex Williamson promisc ? "en" : "dis"); 9502af7698eSAlex Williamson 95123e258e1SAlex Williamson sg_init_one(sg, &allmulti, sizeof(allmulti)); 9522af7698eSAlex Williamson 9532af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 9542af7698eSAlex Williamson VIRTIO_NET_CTRL_RX_ALLMULTI, 955f565a7c2SAlex Williamson sg, 1, 0)) 9562af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 9572af7698eSAlex Williamson allmulti ? "en" : "dis"); 958f565a7c2SAlex Williamson 95932e7bfc4SJiri Pirko uc_count = netdev_uc_count(dev); 9604cd24eafSJiri Pirko mc_count = netdev_mc_count(dev); 961f565a7c2SAlex Williamson /* MAC filter - use one buffer for both lists */ 9624cd24eafSJiri Pirko buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 963f565a7c2SAlex Williamson (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 9644cd24eafSJiri Pirko mac_data = buf; 965f565a7c2SAlex Williamson if (!buf) { 966f565a7c2SAlex Williamson dev_warn(&dev->dev, "No memory for MAC address buffer\n"); 967f565a7c2SAlex Williamson return; 968f565a7c2SAlex Williamson } 969f565a7c2SAlex Williamson 97023e258e1SAlex Williamson sg_init_table(sg, 2); 97123e258e1SAlex Williamson 972f565a7c2SAlex Williamson /* Store the unicast list and count in the front of the buffer */ 97332e7bfc4SJiri Pirko mac_data->entries = uc_count; 974ccffad25SJiri Pirko i = 0; 97532e7bfc4SJiri Pirko netdev_for_each_uc_addr(ha, dev) 976ccffad25SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 977f565a7c2SAlex Williamson 978f565a7c2SAlex Williamson sg_set_buf(&sg[0], mac_data, 97932e7bfc4SJiri Pirko sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 980f565a7c2SAlex Williamson 981f565a7c2SAlex Williamson /* multicast list and count fill the end */ 98232e7bfc4SJiri Pirko mac_data = (void *)&mac_data->macs[uc_count][0]; 983f565a7c2SAlex Williamson 9844cd24eafSJiri Pirko mac_data->entries = mc_count; 985567ec874SJiri Pirko i = 0; 98622bedad3SJiri Pirko netdev_for_each_mc_addr(ha, dev) 98722bedad3SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 988f565a7c2SAlex Williamson 989f565a7c2SAlex Williamson sg_set_buf(&sg[1], mac_data, 9904cd24eafSJiri Pirko sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 991f565a7c2SAlex Williamson 992f565a7c2SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 993f565a7c2SAlex Williamson VIRTIO_NET_CTRL_MAC_TABLE_SET, 994f565a7c2SAlex Williamson sg, 2, 0)) 995f565a7c2SAlex Williamson dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); 996f565a7c2SAlex Williamson 997f565a7c2SAlex Williamson kfree(buf); 9982af7698eSAlex Williamson } 9992af7698eSAlex Williamson 10008e586137SJiri Pirko static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) 10010bde9569SAlex Williamson { 10020bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 10030bde9569SAlex Williamson struct scatterlist sg; 10040bde9569SAlex Williamson 100523e258e1SAlex Williamson sg_init_one(&sg, &vid, sizeof(vid)); 10060bde9569SAlex Williamson 10070bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 10080bde9569SAlex Williamson VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) 10090bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 10108e586137SJiri Pirko return 0; 10110bde9569SAlex Williamson } 10120bde9569SAlex Williamson 10138e586137SJiri Pirko static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) 10140bde9569SAlex Williamson { 10150bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 10160bde9569SAlex Williamson struct scatterlist sg; 10170bde9569SAlex Williamson 101823e258e1SAlex Williamson sg_init_one(&sg, &vid, sizeof(vid)); 10190bde9569SAlex Williamson 10200bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 10210bde9569SAlex Williamson VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) 10220bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 10238e586137SJiri Pirko return 0; 10240bde9569SAlex Williamson } 10250bde9569SAlex Williamson 1026986a4f4dSJason Wang static void virtnet_set_affinity(struct virtnet_info *vi, bool set) 1027986a4f4dSJason Wang { 1028986a4f4dSJason Wang int i; 1029986a4f4dSJason Wang 1030986a4f4dSJason Wang /* In multiqueue mode, when the number of cpu is equal to the number of 1031986a4f4dSJason Wang * queue pairs, we let the queue pairs to be private to one cpu by 1032986a4f4dSJason Wang * setting the affinity hint to eliminate the contention. 1033986a4f4dSJason Wang */ 1034986a4f4dSJason Wang if ((vi->curr_queue_pairs == 1 || 1035986a4f4dSJason Wang vi->max_queue_pairs != num_online_cpus()) && set) { 1036986a4f4dSJason Wang if (vi->affinity_hint_set) 1037986a4f4dSJason Wang set = false; 1038986a4f4dSJason Wang else 1039986a4f4dSJason Wang return; 1040986a4f4dSJason Wang } 1041986a4f4dSJason Wang 1042986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1043986a4f4dSJason Wang int cpu = set ? i : -1; 1044986a4f4dSJason Wang virtqueue_set_affinity(vi->rq[i].vq, cpu); 1045986a4f4dSJason Wang virtqueue_set_affinity(vi->sq[i].vq, cpu); 1046986a4f4dSJason Wang } 1047986a4f4dSJason Wang 1048986a4f4dSJason Wang if (set) 1049986a4f4dSJason Wang vi->affinity_hint_set = true; 1050986a4f4dSJason Wang else 1051986a4f4dSJason Wang vi->affinity_hint_set = false; 1052986a4f4dSJason Wang } 1053986a4f4dSJason Wang 10548f9f4668SRick Jones static void virtnet_get_ringparam(struct net_device *dev, 10558f9f4668SRick Jones struct ethtool_ringparam *ring) 10568f9f4668SRick Jones { 10578f9f4668SRick Jones struct virtnet_info *vi = netdev_priv(dev); 10588f9f4668SRick Jones 1059986a4f4dSJason Wang ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); 1060986a4f4dSJason Wang ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); 10618f9f4668SRick Jones ring->rx_pending = ring->rx_max_pending; 10628f9f4668SRick Jones ring->tx_pending = ring->tx_max_pending; 10638f9f4668SRick Jones } 10648f9f4668SRick Jones 106566846048SRick Jones 106666846048SRick Jones static void virtnet_get_drvinfo(struct net_device *dev, 106766846048SRick Jones struct ethtool_drvinfo *info) 106866846048SRick Jones { 106966846048SRick Jones struct virtnet_info *vi = netdev_priv(dev); 107066846048SRick Jones struct virtio_device *vdev = vi->vdev; 107166846048SRick Jones 107266846048SRick Jones strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 107366846048SRick Jones strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 107466846048SRick Jones strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 107566846048SRick Jones 107666846048SRick Jones } 107766846048SRick Jones 1078*d73bcd2cSJason Wang /* TODO: Eliminate OOO packets during switching */ 1079*d73bcd2cSJason Wang static int virtnet_set_channels(struct net_device *dev, 1080*d73bcd2cSJason Wang struct ethtool_channels *channels) 1081*d73bcd2cSJason Wang { 1082*d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1083*d73bcd2cSJason Wang u16 queue_pairs = channels->combined_count; 1084*d73bcd2cSJason Wang int err; 1085*d73bcd2cSJason Wang 1086*d73bcd2cSJason Wang /* We don't support separate rx/tx channels. 1087*d73bcd2cSJason Wang * We don't allow setting 'other' channels. 1088*d73bcd2cSJason Wang */ 1089*d73bcd2cSJason Wang if (channels->rx_count || channels->tx_count || channels->other_count) 1090*d73bcd2cSJason Wang return -EINVAL; 1091*d73bcd2cSJason Wang 1092*d73bcd2cSJason Wang if (queue_pairs > vi->max_queue_pairs) 1093*d73bcd2cSJason Wang return -EINVAL; 1094*d73bcd2cSJason Wang 1095*d73bcd2cSJason Wang err = virtnet_set_queues(vi, queue_pairs); 1096*d73bcd2cSJason Wang if (!err) { 1097*d73bcd2cSJason Wang netif_set_real_num_tx_queues(dev, queue_pairs); 1098*d73bcd2cSJason Wang netif_set_real_num_rx_queues(dev, queue_pairs); 1099*d73bcd2cSJason Wang 1100*d73bcd2cSJason Wang virtnet_set_affinity(vi, true); 1101*d73bcd2cSJason Wang } 1102*d73bcd2cSJason Wang 1103*d73bcd2cSJason Wang return err; 1104*d73bcd2cSJason Wang } 1105*d73bcd2cSJason Wang 1106*d73bcd2cSJason Wang static void virtnet_get_channels(struct net_device *dev, 1107*d73bcd2cSJason Wang struct ethtool_channels *channels) 1108*d73bcd2cSJason Wang { 1109*d73bcd2cSJason Wang struct virtnet_info *vi = netdev_priv(dev); 1110*d73bcd2cSJason Wang 1111*d73bcd2cSJason Wang channels->combined_count = vi->curr_queue_pairs; 1112*d73bcd2cSJason Wang channels->max_combined = vi->max_queue_pairs; 1113*d73bcd2cSJason Wang channels->max_other = 0; 1114*d73bcd2cSJason Wang channels->rx_count = 0; 1115*d73bcd2cSJason Wang channels->tx_count = 0; 1116*d73bcd2cSJason Wang channels->other_count = 0; 1117*d73bcd2cSJason Wang } 1118*d73bcd2cSJason Wang 11190fc0b732SStephen Hemminger static const struct ethtool_ops virtnet_ethtool_ops = { 112066846048SRick Jones .get_drvinfo = virtnet_get_drvinfo, 11219f4d26d0SMark McLoughlin .get_link = ethtool_op_get_link, 11228f9f4668SRick Jones .get_ringparam = virtnet_get_ringparam, 1123*d73bcd2cSJason Wang .set_channels = virtnet_set_channels, 1124*d73bcd2cSJason Wang .get_channels = virtnet_get_channels, 1125a9ea3fc6SHerbert Xu }; 1126a9ea3fc6SHerbert Xu 112739da5814SMark McLoughlin #define MIN_MTU 68 112839da5814SMark McLoughlin #define MAX_MTU 65535 112939da5814SMark McLoughlin 113039da5814SMark McLoughlin static int virtnet_change_mtu(struct net_device *dev, int new_mtu) 113139da5814SMark McLoughlin { 113239da5814SMark McLoughlin if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) 113339da5814SMark McLoughlin return -EINVAL; 113439da5814SMark McLoughlin dev->mtu = new_mtu; 113539da5814SMark McLoughlin return 0; 113639da5814SMark McLoughlin } 113739da5814SMark McLoughlin 1138986a4f4dSJason Wang /* To avoid contending a lock hold by a vcpu who would exit to host, select the 1139986a4f4dSJason Wang * txq based on the processor id. 1140986a4f4dSJason Wang * TODO: handle cpu hotplug. 1141986a4f4dSJason Wang */ 1142986a4f4dSJason Wang static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) 1143986a4f4dSJason Wang { 1144986a4f4dSJason Wang int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 1145986a4f4dSJason Wang smp_processor_id(); 1146986a4f4dSJason Wang 1147986a4f4dSJason Wang while (unlikely(txq >= dev->real_num_tx_queues)) 1148986a4f4dSJason Wang txq -= dev->real_num_tx_queues; 1149986a4f4dSJason Wang 1150986a4f4dSJason Wang return txq; 1151986a4f4dSJason Wang } 1152986a4f4dSJason Wang 115376288b4eSStephen Hemminger static const struct net_device_ops virtnet_netdev = { 115476288b4eSStephen Hemminger .ndo_open = virtnet_open, 115576288b4eSStephen Hemminger .ndo_stop = virtnet_close, 115676288b4eSStephen Hemminger .ndo_start_xmit = start_xmit, 115776288b4eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 11589c46f6d4SAlex Williamson .ndo_set_mac_address = virtnet_set_mac_address, 11592af7698eSAlex Williamson .ndo_set_rx_mode = virtnet_set_rx_mode, 116076288b4eSStephen Hemminger .ndo_change_mtu = virtnet_change_mtu, 11613fa2a1dfSstephen hemminger .ndo_get_stats64 = virtnet_stats, 11621824a989SAlex Williamson .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 11631824a989SAlex Williamson .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 1164986a4f4dSJason Wang .ndo_select_queue = virtnet_select_queue, 116576288b4eSStephen Hemminger #ifdef CONFIG_NET_POLL_CONTROLLER 116676288b4eSStephen Hemminger .ndo_poll_controller = virtnet_netpoll, 116776288b4eSStephen Hemminger #endif 116876288b4eSStephen Hemminger }; 116976288b4eSStephen Hemminger 1170586d17c5SJason Wang static void virtnet_config_changed_work(struct work_struct *work) 11719f4d26d0SMark McLoughlin { 1172586d17c5SJason Wang struct virtnet_info *vi = 1173586d17c5SJason Wang container_of(work, struct virtnet_info, config_work); 11749f4d26d0SMark McLoughlin u16 v; 11759f4d26d0SMark McLoughlin 1176586d17c5SJason Wang mutex_lock(&vi->config_lock); 1177586d17c5SJason Wang if (!vi->config_enable) 1178586d17c5SJason Wang goto done; 1179586d17c5SJason Wang 118077dd7693SSasha Levin if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, 11819f4d26d0SMark McLoughlin offsetof(struct virtio_net_config, status), 118277dd7693SSasha Levin &v) < 0) 1183586d17c5SJason Wang goto done; 1184586d17c5SJason Wang 1185586d17c5SJason Wang if (v & VIRTIO_NET_S_ANNOUNCE) { 1186ee89bab1SAmerigo Wang netdev_notify_peers(vi->dev); 1187586d17c5SJason Wang virtnet_ack_link_announce(vi); 1188586d17c5SJason Wang } 11899f4d26d0SMark McLoughlin 11909f4d26d0SMark McLoughlin /* Ignore unknown (future) status bits */ 11919f4d26d0SMark McLoughlin v &= VIRTIO_NET_S_LINK_UP; 11929f4d26d0SMark McLoughlin 11939f4d26d0SMark McLoughlin if (vi->status == v) 1194586d17c5SJason Wang goto done; 11959f4d26d0SMark McLoughlin 11969f4d26d0SMark McLoughlin vi->status = v; 11979f4d26d0SMark McLoughlin 11989f4d26d0SMark McLoughlin if (vi->status & VIRTIO_NET_S_LINK_UP) { 11999f4d26d0SMark McLoughlin netif_carrier_on(vi->dev); 1200986a4f4dSJason Wang netif_tx_wake_all_queues(vi->dev); 12019f4d26d0SMark McLoughlin } else { 12029f4d26d0SMark McLoughlin netif_carrier_off(vi->dev); 1203986a4f4dSJason Wang netif_tx_stop_all_queues(vi->dev); 12049f4d26d0SMark McLoughlin } 1205586d17c5SJason Wang done: 1206586d17c5SJason Wang mutex_unlock(&vi->config_lock); 12079f4d26d0SMark McLoughlin } 12089f4d26d0SMark McLoughlin 12099f4d26d0SMark McLoughlin static void virtnet_config_changed(struct virtio_device *vdev) 12109f4d26d0SMark McLoughlin { 12119f4d26d0SMark McLoughlin struct virtnet_info *vi = vdev->priv; 12129f4d26d0SMark McLoughlin 12133b07e9caSTejun Heo schedule_work(&vi->config_work); 12149f4d26d0SMark McLoughlin } 12159f4d26d0SMark McLoughlin 1216986a4f4dSJason Wang static void virtnet_free_queues(struct virtnet_info *vi) 1217986a4f4dSJason Wang { 1218986a4f4dSJason Wang kfree(vi->rq); 1219986a4f4dSJason Wang kfree(vi->sq); 1220986a4f4dSJason Wang } 1221986a4f4dSJason Wang 1222986a4f4dSJason Wang static void free_receive_bufs(struct virtnet_info *vi) 1223986a4f4dSJason Wang { 1224986a4f4dSJason Wang int i; 1225986a4f4dSJason Wang 1226986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1227986a4f4dSJason Wang while (vi->rq[i].pages) 1228986a4f4dSJason Wang __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 1229986a4f4dSJason Wang } 1230986a4f4dSJason Wang } 1231986a4f4dSJason Wang 1232986a4f4dSJason Wang static void free_unused_bufs(struct virtnet_info *vi) 1233986a4f4dSJason Wang { 1234986a4f4dSJason Wang void *buf; 1235986a4f4dSJason Wang int i; 1236986a4f4dSJason Wang 1237986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1238986a4f4dSJason Wang struct virtqueue *vq = vi->sq[i].vq; 1239986a4f4dSJason Wang while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 1240986a4f4dSJason Wang dev_kfree_skb(buf); 1241986a4f4dSJason Wang } 1242986a4f4dSJason Wang 1243986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1244986a4f4dSJason Wang struct virtqueue *vq = vi->rq[i].vq; 1245986a4f4dSJason Wang 1246986a4f4dSJason Wang while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1247986a4f4dSJason Wang if (vi->mergeable_rx_bufs || vi->big_packets) 1248986a4f4dSJason Wang give_pages(&vi->rq[i], buf); 1249986a4f4dSJason Wang else 1250986a4f4dSJason Wang dev_kfree_skb(buf); 1251986a4f4dSJason Wang --vi->rq[i].num; 1252986a4f4dSJason Wang } 1253986a4f4dSJason Wang BUG_ON(vi->rq[i].num != 0); 1254986a4f4dSJason Wang } 1255986a4f4dSJason Wang } 1256986a4f4dSJason Wang 1257e9d7417bSJason Wang static void virtnet_del_vqs(struct virtnet_info *vi) 1258e9d7417bSJason Wang { 1259e9d7417bSJason Wang struct virtio_device *vdev = vi->vdev; 1260e9d7417bSJason Wang 1261986a4f4dSJason Wang virtnet_set_affinity(vi, false); 1262986a4f4dSJason Wang 1263e9d7417bSJason Wang vdev->config->del_vqs(vdev); 1264986a4f4dSJason Wang 1265986a4f4dSJason Wang virtnet_free_queues(vi); 1266986a4f4dSJason Wang } 1267986a4f4dSJason Wang 1268986a4f4dSJason Wang static int virtnet_find_vqs(struct virtnet_info *vi) 1269986a4f4dSJason Wang { 1270986a4f4dSJason Wang vq_callback_t **callbacks; 1271986a4f4dSJason Wang struct virtqueue **vqs; 1272986a4f4dSJason Wang int ret = -ENOMEM; 1273986a4f4dSJason Wang int i, total_vqs; 1274986a4f4dSJason Wang const char **names; 1275986a4f4dSJason Wang 1276986a4f4dSJason Wang /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 1277986a4f4dSJason Wang * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 1278986a4f4dSJason Wang * possible control vq. 1279986a4f4dSJason Wang */ 1280986a4f4dSJason Wang total_vqs = vi->max_queue_pairs * 2 + 1281986a4f4dSJason Wang virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 1282986a4f4dSJason Wang 1283986a4f4dSJason Wang /* Allocate space for find_vqs parameters */ 1284986a4f4dSJason Wang vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); 1285986a4f4dSJason Wang if (!vqs) 1286986a4f4dSJason Wang goto err_vq; 1287986a4f4dSJason Wang callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); 1288986a4f4dSJason Wang if (!callbacks) 1289986a4f4dSJason Wang goto err_callback; 1290986a4f4dSJason Wang names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); 1291986a4f4dSJason Wang if (!names) 1292986a4f4dSJason Wang goto err_names; 1293986a4f4dSJason Wang 1294986a4f4dSJason Wang /* Parameters for control virtqueue, if any */ 1295986a4f4dSJason Wang if (vi->has_cvq) { 1296986a4f4dSJason Wang callbacks[total_vqs - 1] = NULL; 1297986a4f4dSJason Wang names[total_vqs - 1] = "control"; 1298986a4f4dSJason Wang } 1299986a4f4dSJason Wang 1300986a4f4dSJason Wang /* Allocate/initialize parameters for send/receive virtqueues */ 1301986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1302986a4f4dSJason Wang callbacks[rxq2vq(i)] = skb_recv_done; 1303986a4f4dSJason Wang callbacks[txq2vq(i)] = skb_xmit_done; 1304986a4f4dSJason Wang sprintf(vi->rq[i].name, "input.%d", i); 1305986a4f4dSJason Wang sprintf(vi->sq[i].name, "output.%d", i); 1306986a4f4dSJason Wang names[rxq2vq(i)] = vi->rq[i].name; 1307986a4f4dSJason Wang names[txq2vq(i)] = vi->sq[i].name; 1308986a4f4dSJason Wang } 1309986a4f4dSJason Wang 1310986a4f4dSJason Wang ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, 1311986a4f4dSJason Wang names); 1312986a4f4dSJason Wang if (ret) 1313986a4f4dSJason Wang goto err_find; 1314986a4f4dSJason Wang 1315986a4f4dSJason Wang if (vi->has_cvq) { 1316986a4f4dSJason Wang vi->cvq = vqs[total_vqs - 1]; 1317986a4f4dSJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 1318986a4f4dSJason Wang vi->dev->features |= NETIF_F_HW_VLAN_FILTER; 1319986a4f4dSJason Wang } 1320986a4f4dSJason Wang 1321986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1322986a4f4dSJason Wang vi->rq[i].vq = vqs[rxq2vq(i)]; 1323986a4f4dSJason Wang vi->sq[i].vq = vqs[txq2vq(i)]; 1324986a4f4dSJason Wang } 1325986a4f4dSJason Wang 1326986a4f4dSJason Wang kfree(names); 1327986a4f4dSJason Wang kfree(callbacks); 1328986a4f4dSJason Wang kfree(vqs); 1329986a4f4dSJason Wang 1330986a4f4dSJason Wang return 0; 1331986a4f4dSJason Wang 1332986a4f4dSJason Wang err_find: 1333986a4f4dSJason Wang kfree(names); 1334986a4f4dSJason Wang err_names: 1335986a4f4dSJason Wang kfree(callbacks); 1336986a4f4dSJason Wang err_callback: 1337986a4f4dSJason Wang kfree(vqs); 1338986a4f4dSJason Wang err_vq: 1339986a4f4dSJason Wang return ret; 1340986a4f4dSJason Wang } 1341986a4f4dSJason Wang 1342986a4f4dSJason Wang static int virtnet_alloc_queues(struct virtnet_info *vi) 1343986a4f4dSJason Wang { 1344986a4f4dSJason Wang int i; 1345986a4f4dSJason Wang 1346986a4f4dSJason Wang vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); 1347986a4f4dSJason Wang if (!vi->sq) 1348986a4f4dSJason Wang goto err_sq; 1349986a4f4dSJason Wang vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); 1350986a4f4dSJason Wang if (!vi->sq) 1351986a4f4dSJason Wang goto err_rq; 1352986a4f4dSJason Wang 1353986a4f4dSJason Wang INIT_DELAYED_WORK(&vi->refill, refill_work); 1354986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1355986a4f4dSJason Wang vi->rq[i].pages = NULL; 1356986a4f4dSJason Wang netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, 1357986a4f4dSJason Wang napi_weight); 1358986a4f4dSJason Wang 1359986a4f4dSJason Wang sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 1360986a4f4dSJason Wang sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 1361986a4f4dSJason Wang } 1362986a4f4dSJason Wang 1363986a4f4dSJason Wang return 0; 1364986a4f4dSJason Wang 1365986a4f4dSJason Wang err_rq: 1366986a4f4dSJason Wang kfree(vi->sq); 1367986a4f4dSJason Wang err_sq: 1368986a4f4dSJason Wang return -ENOMEM; 1369e9d7417bSJason Wang } 1370e9d7417bSJason Wang 13713f9c10b0SAmit Shah static int init_vqs(struct virtnet_info *vi) 13723f9c10b0SAmit Shah { 1373986a4f4dSJason Wang int ret; 13743f9c10b0SAmit Shah 1375986a4f4dSJason Wang /* Allocate send & receive queues */ 1376986a4f4dSJason Wang ret = virtnet_alloc_queues(vi); 1377986a4f4dSJason Wang if (ret) 1378986a4f4dSJason Wang goto err; 13793f9c10b0SAmit Shah 1380986a4f4dSJason Wang ret = virtnet_find_vqs(vi); 1381986a4f4dSJason Wang if (ret) 1382986a4f4dSJason Wang goto err_free; 13833f9c10b0SAmit Shah 1384986a4f4dSJason Wang virtnet_set_affinity(vi, true); 13853f9c10b0SAmit Shah return 0; 1386986a4f4dSJason Wang 1387986a4f4dSJason Wang err_free: 1388986a4f4dSJason Wang virtnet_free_queues(vi); 1389986a4f4dSJason Wang err: 1390986a4f4dSJason Wang return ret; 13913f9c10b0SAmit Shah } 13923f9c10b0SAmit Shah 1393296f96fcSRusty Russell static int virtnet_probe(struct virtio_device *vdev) 1394296f96fcSRusty Russell { 1395986a4f4dSJason Wang int i, err; 1396296f96fcSRusty Russell struct net_device *dev; 1397296f96fcSRusty Russell struct virtnet_info *vi; 1398986a4f4dSJason Wang u16 max_queue_pairs; 1399986a4f4dSJason Wang 1400986a4f4dSJason Wang /* Find if host supports multiqueue virtio_net device */ 1401986a4f4dSJason Wang err = virtio_config_val(vdev, VIRTIO_NET_F_MQ, 1402986a4f4dSJason Wang offsetof(struct virtio_net_config, 1403986a4f4dSJason Wang max_virtqueue_pairs), &max_queue_pairs); 1404986a4f4dSJason Wang 1405986a4f4dSJason Wang /* We need at least 2 queue's */ 1406986a4f4dSJason Wang if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 1407986a4f4dSJason Wang max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 1408986a4f4dSJason Wang !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1409986a4f4dSJason Wang max_queue_pairs = 1; 1410296f96fcSRusty Russell 1411296f96fcSRusty Russell /* Allocate ourselves a network device with room for our info */ 1412986a4f4dSJason Wang dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 1413296f96fcSRusty Russell if (!dev) 1414296f96fcSRusty Russell return -ENOMEM; 1415296f96fcSRusty Russell 1416296f96fcSRusty Russell /* Set up network device as normal. */ 1417f2f2c8b4SJiri Pirko dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 141876288b4eSStephen Hemminger dev->netdev_ops = &virtnet_netdev; 1419296f96fcSRusty Russell dev->features = NETIF_F_HIGHDMA; 14203fa2a1dfSstephen hemminger 1421a9ea3fc6SHerbert Xu SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); 1422296f96fcSRusty Russell SET_NETDEV_DEV(dev, &vdev->dev); 1423296f96fcSRusty Russell 1424296f96fcSRusty Russell /* Do we support "hardware" checksums? */ 142598e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 1426296f96fcSRusty Russell /* This opens up the world of extra features. */ 142798e778c9SMichał Mirosław dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 142898e778c9SMichał Mirosław if (csum) 1429296f96fcSRusty Russell dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 143098e778c9SMichał Mirosław 143198e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 143298e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 143334a48579SRusty Russell | NETIF_F_TSO_ECN | NETIF_F_TSO6; 143434a48579SRusty Russell } 14355539ae96SRusty Russell /* Individual feature bits: what can host handle? */ 143698e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 143798e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO; 143898e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 143998e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO6; 144098e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 144198e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO_ECN; 144298e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 144398e778c9SMichał Mirosław dev->hw_features |= NETIF_F_UFO; 144498e778c9SMichał Mirosław 144598e778c9SMichał Mirosław if (gso) 144698e778c9SMichał Mirosław dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 144798e778c9SMichał Mirosław /* (!csum && gso) case will be fixed by register_netdev() */ 1448296f96fcSRusty Russell } 1449296f96fcSRusty Russell 1450296f96fcSRusty Russell /* Configuration may specify what MAC to use. Otherwise random. */ 145177dd7693SSasha Levin if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, 1452a586d4f6SRusty Russell offsetof(struct virtio_net_config, mac), 145377dd7693SSasha Levin dev->dev_addr, dev->addr_len) < 0) 1454f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 1455296f96fcSRusty Russell 1456296f96fcSRusty Russell /* Set up our device-specific information */ 1457296f96fcSRusty Russell vi = netdev_priv(dev); 1458296f96fcSRusty Russell vi->dev = dev; 1459296f96fcSRusty Russell vi->vdev = vdev; 1460d9d5dcc8SChristian Borntraeger vdev->priv = vi; 14613fa2a1dfSstephen hemminger vi->stats = alloc_percpu(struct virtnet_stats); 14623fa2a1dfSstephen hemminger err = -ENOMEM; 14633fa2a1dfSstephen hemminger if (vi->stats == NULL) 14643fa2a1dfSstephen hemminger goto free; 14653fa2a1dfSstephen hemminger 1466586d17c5SJason Wang mutex_init(&vi->config_lock); 1467586d17c5SJason Wang vi->config_enable = true; 1468586d17c5SJason Wang INIT_WORK(&vi->config_work, virtnet_config_changed_work); 1469296f96fcSRusty Russell 147097402b96SHerbert Xu /* If we can receive ANY GSO packets, we must allocate large ones. */ 14718e95a202SJoe Perches if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 14728e95a202SJoe Perches virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 14738e95a202SJoe Perches virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 147497402b96SHerbert Xu vi->big_packets = true; 147597402b96SHerbert Xu 14763f2c31d9SMark McLoughlin if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 14773f2c31d9SMark McLoughlin vi->mergeable_rx_bufs = true; 14783f2c31d9SMark McLoughlin 1479986a4f4dSJason Wang if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1480986a4f4dSJason Wang vi->has_cvq = true; 1481986a4f4dSJason Wang 1482986a4f4dSJason Wang /* Use single tx/rx queue pair as default */ 1483986a4f4dSJason Wang vi->curr_queue_pairs = 1; 1484986a4f4dSJason Wang vi->max_queue_pairs = max_queue_pairs; 1485986a4f4dSJason Wang 1486986a4f4dSJason Wang /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 14873f9c10b0SAmit Shah err = init_vqs(vi); 1488d2a7dddaSMichael S. Tsirkin if (err) 14893fa2a1dfSstephen hemminger goto free_stats; 1490d2a7dddaSMichael S. Tsirkin 1491986a4f4dSJason Wang netif_set_real_num_tx_queues(dev, 1); 1492986a4f4dSJason Wang netif_set_real_num_rx_queues(dev, 1); 1493986a4f4dSJason Wang 1494296f96fcSRusty Russell err = register_netdev(dev); 1495296f96fcSRusty Russell if (err) { 1496296f96fcSRusty Russell pr_debug("virtio_net: registering device failed\n"); 1497d2a7dddaSMichael S. Tsirkin goto free_vqs; 1498296f96fcSRusty Russell } 1499b3369c1fSRusty Russell 1500b3369c1fSRusty Russell /* Last of all, set up some receive buffers. */ 1501986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1502986a4f4dSJason Wang try_fill_recv(&vi->rq[i], GFP_KERNEL); 1503b3369c1fSRusty Russell 1504b3369c1fSRusty Russell /* If we didn't even get one input buffer, we're useless. */ 1505986a4f4dSJason Wang if (vi->rq[i].num == 0) { 1506986a4f4dSJason Wang free_unused_bufs(vi); 1507b3369c1fSRusty Russell err = -ENOMEM; 1508986a4f4dSJason Wang goto free_recv_bufs; 1509986a4f4dSJason Wang } 1510b3369c1fSRusty Russell } 1511b3369c1fSRusty Russell 1512167c25e4SJason Wang /* Assume link up if device can't report link status, 1513167c25e4SJason Wang otherwise get link status from config. */ 1514167c25e4SJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1515167c25e4SJason Wang netif_carrier_off(dev); 15163b07e9caSTejun Heo schedule_work(&vi->config_work); 1517167c25e4SJason Wang } else { 1518167c25e4SJason Wang vi->status = VIRTIO_NET_S_LINK_UP; 15194783256eSPantelis Koukousoulas netif_carrier_on(dev); 1520167c25e4SJason Wang } 15219f4d26d0SMark McLoughlin 1522986a4f4dSJason Wang pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 1523986a4f4dSJason Wang dev->name, max_queue_pairs); 1524986a4f4dSJason Wang 1525296f96fcSRusty Russell return 0; 1526296f96fcSRusty Russell 1527986a4f4dSJason Wang free_recv_bufs: 1528986a4f4dSJason Wang free_receive_bufs(vi); 1529b3369c1fSRusty Russell unregister_netdev(dev); 1530d2a7dddaSMichael S. Tsirkin free_vqs: 1531986a4f4dSJason Wang cancel_delayed_work_sync(&vi->refill); 1532e9d7417bSJason Wang virtnet_del_vqs(vi); 15333fa2a1dfSstephen hemminger free_stats: 15343fa2a1dfSstephen hemminger free_percpu(vi->stats); 1535296f96fcSRusty Russell free: 1536296f96fcSRusty Russell free_netdev(dev); 1537296f96fcSRusty Russell return err; 1538296f96fcSRusty Russell } 1539296f96fcSRusty Russell 154004486ed0SAmit Shah static void remove_vq_common(struct virtnet_info *vi) 1541296f96fcSRusty Russell { 154204486ed0SAmit Shah vi->vdev->config->reset(vi->vdev); 1543830a8a97SShirley Ma 1544830a8a97SShirley Ma /* Free unused buffers in both send and recv, if any. */ 15459ab86bbcSShirley Ma free_unused_bufs(vi); 1546fb6813f4SRusty Russell 1547986a4f4dSJason Wang free_receive_bufs(vi); 1548d2a7dddaSMichael S. Tsirkin 1549986a4f4dSJason Wang virtnet_del_vqs(vi); 155004486ed0SAmit Shah } 155104486ed0SAmit Shah 15528cc085d6SBill Pemberton static void virtnet_remove(struct virtio_device *vdev) 155304486ed0SAmit Shah { 155404486ed0SAmit Shah struct virtnet_info *vi = vdev->priv; 155504486ed0SAmit Shah 1556586d17c5SJason Wang /* Prevent config work handler from accessing the device. */ 1557586d17c5SJason Wang mutex_lock(&vi->config_lock); 1558586d17c5SJason Wang vi->config_enable = false; 1559586d17c5SJason Wang mutex_unlock(&vi->config_lock); 1560586d17c5SJason Wang 156104486ed0SAmit Shah unregister_netdev(vi->dev); 156204486ed0SAmit Shah 156304486ed0SAmit Shah remove_vq_common(vi); 1564fb6813f4SRusty Russell 1565586d17c5SJason Wang flush_work(&vi->config_work); 1566586d17c5SJason Wang 15672e66f55bSKrishna Kumar free_percpu(vi->stats); 156874b2553fSRusty Russell free_netdev(vi->dev); 1569296f96fcSRusty Russell } 1570296f96fcSRusty Russell 15710741bcb5SAmit Shah #ifdef CONFIG_PM 15720741bcb5SAmit Shah static int virtnet_freeze(struct virtio_device *vdev) 15730741bcb5SAmit Shah { 15740741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 1575986a4f4dSJason Wang int i; 15760741bcb5SAmit Shah 1577586d17c5SJason Wang /* Prevent config work handler from accessing the device */ 1578586d17c5SJason Wang mutex_lock(&vi->config_lock); 1579586d17c5SJason Wang vi->config_enable = false; 1580586d17c5SJason Wang mutex_unlock(&vi->config_lock); 1581586d17c5SJason Wang 15820741bcb5SAmit Shah netif_device_detach(vi->dev); 15830741bcb5SAmit Shah cancel_delayed_work_sync(&vi->refill); 15840741bcb5SAmit Shah 15850741bcb5SAmit Shah if (netif_running(vi->dev)) 1586986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) { 1587986a4f4dSJason Wang napi_disable(&vi->rq[i].napi); 1588986a4f4dSJason Wang netif_napi_del(&vi->rq[i].napi); 1589986a4f4dSJason Wang } 15900741bcb5SAmit Shah 15910741bcb5SAmit Shah remove_vq_common(vi); 15920741bcb5SAmit Shah 1593586d17c5SJason Wang flush_work(&vi->config_work); 1594586d17c5SJason Wang 15950741bcb5SAmit Shah return 0; 15960741bcb5SAmit Shah } 15970741bcb5SAmit Shah 15980741bcb5SAmit Shah static int virtnet_restore(struct virtio_device *vdev) 15990741bcb5SAmit Shah { 16000741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 1601986a4f4dSJason Wang int err, i; 16020741bcb5SAmit Shah 16030741bcb5SAmit Shah err = init_vqs(vi); 16040741bcb5SAmit Shah if (err) 16050741bcb5SAmit Shah return err; 16060741bcb5SAmit Shah 16070741bcb5SAmit Shah if (netif_running(vi->dev)) 1608986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 1609986a4f4dSJason Wang virtnet_napi_enable(&vi->rq[i]); 16100741bcb5SAmit Shah 16110741bcb5SAmit Shah netif_device_attach(vi->dev); 16120741bcb5SAmit Shah 1613986a4f4dSJason Wang for (i = 0; i < vi->max_queue_pairs; i++) 1614986a4f4dSJason Wang if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 16153b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 16160741bcb5SAmit Shah 1617586d17c5SJason Wang mutex_lock(&vi->config_lock); 1618586d17c5SJason Wang vi->config_enable = true; 1619586d17c5SJason Wang mutex_unlock(&vi->config_lock); 1620586d17c5SJason Wang 1621986a4f4dSJason Wang virtnet_set_queues(vi, vi->curr_queue_pairs); 1622986a4f4dSJason Wang 16230741bcb5SAmit Shah return 0; 16240741bcb5SAmit Shah } 16250741bcb5SAmit Shah #endif 16260741bcb5SAmit Shah 1627296f96fcSRusty Russell static struct virtio_device_id id_table[] = { 1628296f96fcSRusty Russell { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 1629296f96fcSRusty Russell { 0 }, 1630296f96fcSRusty Russell }; 1631296f96fcSRusty Russell 1632c45a6816SRusty Russell static unsigned int features[] = { 16335e4fe5c4SMark McLoughlin VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 16345e4fe5c4SMark McLoughlin VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 1635c45a6816SRusty Russell VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 163697402b96SHerbert Xu VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 16375c516751SSridhar Samudrala VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 16382a41f71dSAlex Williamson VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 16390bde9569SAlex Williamson VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1640986a4f4dSJason Wang VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 1641c45a6816SRusty Russell }; 1642c45a6816SRusty Russell 164322402529SUwe Kleine-König static struct virtio_driver virtio_net_driver = { 1644c45a6816SRusty Russell .feature_table = features, 1645c45a6816SRusty Russell .feature_table_size = ARRAY_SIZE(features), 1646296f96fcSRusty Russell .driver.name = KBUILD_MODNAME, 1647296f96fcSRusty Russell .driver.owner = THIS_MODULE, 1648296f96fcSRusty Russell .id_table = id_table, 1649296f96fcSRusty Russell .probe = virtnet_probe, 16508cc085d6SBill Pemberton .remove = virtnet_remove, 16519f4d26d0SMark McLoughlin .config_changed = virtnet_config_changed, 16520741bcb5SAmit Shah #ifdef CONFIG_PM 16530741bcb5SAmit Shah .freeze = virtnet_freeze, 16540741bcb5SAmit Shah .restore = virtnet_restore, 16550741bcb5SAmit Shah #endif 1656296f96fcSRusty Russell }; 1657296f96fcSRusty Russell 1658296f96fcSRusty Russell static int __init init(void) 1659296f96fcSRusty Russell { 166022402529SUwe Kleine-König return register_virtio_driver(&virtio_net_driver); 1661296f96fcSRusty Russell } 1662296f96fcSRusty Russell 1663296f96fcSRusty Russell static void __exit fini(void) 1664296f96fcSRusty Russell { 166522402529SUwe Kleine-König unregister_virtio_driver(&virtio_net_driver); 1666296f96fcSRusty Russell } 1667296f96fcSRusty Russell module_init(init); 1668296f96fcSRusty Russell module_exit(fini); 1669296f96fcSRusty Russell 1670296f96fcSRusty Russell MODULE_DEVICE_TABLE(virtio, id_table); 1671296f96fcSRusty Russell MODULE_DESCRIPTION("Virtio network driver"); 1672296f96fcSRusty Russell MODULE_LICENSE("GPL"); 1673