148925e37SRusty Russell /* A network driver using virtio. 2296f96fcSRusty Russell * 3296f96fcSRusty Russell * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4296f96fcSRusty Russell * 5296f96fcSRusty Russell * This program is free software; you can redistribute it and/or modify 6296f96fcSRusty Russell * it under the terms of the GNU General Public License as published by 7296f96fcSRusty Russell * the Free Software Foundation; either version 2 of the License, or 8296f96fcSRusty Russell * (at your option) any later version. 9296f96fcSRusty Russell * 10296f96fcSRusty Russell * This program is distributed in the hope that it will be useful, 11296f96fcSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 12296f96fcSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13296f96fcSRusty Russell * GNU General Public License for more details. 14296f96fcSRusty Russell * 15296f96fcSRusty Russell * You should have received a copy of the GNU General Public License 16296f96fcSRusty Russell * along with this program; if not, write to the Free Software 17296f96fcSRusty Russell * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18296f96fcSRusty Russell */ 19296f96fcSRusty Russell //#define DEBUG 20296f96fcSRusty Russell #include <linux/netdevice.h> 21296f96fcSRusty Russell #include <linux/etherdevice.h> 22a9ea3fc6SHerbert Xu #include <linux/ethtool.h> 23296f96fcSRusty Russell #include <linux/module.h> 24296f96fcSRusty Russell #include <linux/virtio.h> 25296f96fcSRusty Russell #include <linux/virtio_net.h> 26296f96fcSRusty Russell #include <linux/scatterlist.h> 27e918085aSAlex Williamson #include <linux/if_vlan.h> 285a0e3ad6STejun Heo #include <linux/slab.h> 29296f96fcSRusty Russell 306c0cd7c0SDor Laor static int napi_weight = 128; 316c0cd7c0SDor Laor module_param(napi_weight, int, 0444); 326c0cd7c0SDor Laor 33eb939922SRusty Russell static bool csum = true, gso = true; 3434a48579SRusty Russell module_param(csum, bool, 0444); 3534a48579SRusty Russell module_param(gso, bool, 0444); 3634a48579SRusty Russell 37296f96fcSRusty Russell /* FIXME: MTU in config. */ 38e918085aSAlex Williamson #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 393f2c31d9SMark McLoughlin #define GOOD_COPY_LEN 128 40296f96fcSRusty Russell 41f565a7c2SAlex Williamson #define VIRTNET_SEND_COMMAND_SG_MAX 2 4266846048SRick Jones #define VIRTNET_DRIVER_VERSION "1.0.0" 432a41f71dSAlex Williamson 443fa2a1dfSstephen hemminger struct virtnet_stats { 4583a27052SEric Dumazet struct u64_stats_sync tx_syncp; 4683a27052SEric Dumazet struct u64_stats_sync rx_syncp; 473fa2a1dfSstephen hemminger u64 tx_bytes; 483fa2a1dfSstephen hemminger u64 tx_packets; 493fa2a1dfSstephen hemminger 503fa2a1dfSstephen hemminger u64 rx_bytes; 513fa2a1dfSstephen hemminger u64 rx_packets; 523fa2a1dfSstephen hemminger }; 533fa2a1dfSstephen hemminger 54*e9d7417bSJason Wang /* Internal representation of a send virtqueue */ 55*e9d7417bSJason Wang struct send_queue { 56*e9d7417bSJason Wang /* Virtqueue associated with this send _queue */ 57*e9d7417bSJason Wang struct virtqueue *vq; 58*e9d7417bSJason Wang 59*e9d7417bSJason Wang /* TX: fragments + linear part + virtio header */ 60*e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 61*e9d7417bSJason Wang }; 62*e9d7417bSJason Wang 63*e9d7417bSJason Wang /* Internal representation of a receive virtqueue */ 64*e9d7417bSJason Wang struct receive_queue { 65*e9d7417bSJason Wang /* Virtqueue associated with this receive_queue */ 66*e9d7417bSJason Wang struct virtqueue *vq; 67*e9d7417bSJason Wang 68296f96fcSRusty Russell struct napi_struct napi; 69296f96fcSRusty Russell 70296f96fcSRusty Russell /* Number of input buffers, and max we've ever had. */ 71296f96fcSRusty Russell unsigned int num, max; 72296f96fcSRusty Russell 73*e9d7417bSJason Wang /* Chain pages by the private ptr. */ 74*e9d7417bSJason Wang struct page *pages; 75*e9d7417bSJason Wang 76*e9d7417bSJason Wang /* RX: fragments + linear part + virtio header */ 77*e9d7417bSJason Wang struct scatterlist sg[MAX_SKB_FRAGS + 2]; 78*e9d7417bSJason Wang }; 79*e9d7417bSJason Wang 80*e9d7417bSJason Wang struct virtnet_info { 81*e9d7417bSJason Wang struct virtio_device *vdev; 82*e9d7417bSJason Wang struct virtqueue *cvq; 83*e9d7417bSJason Wang struct net_device *dev; 84*e9d7417bSJason Wang struct send_queue sq; 85*e9d7417bSJason Wang struct receive_queue rq; 86*e9d7417bSJason Wang unsigned int status; 87*e9d7417bSJason Wang 8897402b96SHerbert Xu /* I like... big packets and I cannot lie! */ 8997402b96SHerbert Xu bool big_packets; 9097402b96SHerbert Xu 913f2c31d9SMark McLoughlin /* Host will merge rx buffers for big packets (shake it! shake it!) */ 923f2c31d9SMark McLoughlin bool mergeable_rx_bufs; 933f2c31d9SMark McLoughlin 94586d17c5SJason Wang /* enable config space updates */ 95586d17c5SJason Wang bool config_enable; 96586d17c5SJason Wang 973fa2a1dfSstephen hemminger /* Active statistics */ 983fa2a1dfSstephen hemminger struct virtnet_stats __percpu *stats; 993fa2a1dfSstephen hemminger 1003161e453SRusty Russell /* Work struct for refilling if we run low on memory. */ 1013161e453SRusty Russell struct delayed_work refill; 1023161e453SRusty Russell 103586d17c5SJason Wang /* Work struct for config space updates */ 104586d17c5SJason Wang struct work_struct config_work; 105586d17c5SJason Wang 106586d17c5SJason Wang /* Lock for config space updates */ 107586d17c5SJason Wang struct mutex config_lock; 108296f96fcSRusty Russell }; 109296f96fcSRusty Russell 110b3f24698SRusty Russell struct skb_vnet_hdr { 111b3f24698SRusty Russell union { 112b3f24698SRusty Russell struct virtio_net_hdr hdr; 113b3f24698SRusty Russell struct virtio_net_hdr_mrg_rxbuf mhdr; 114b3f24698SRusty Russell }; 11548925e37SRusty Russell unsigned int num_sg; 116b3f24698SRusty Russell }; 117b3f24698SRusty Russell 1189ab86bbcSShirley Ma struct padded_vnet_hdr { 1199ab86bbcSShirley Ma struct virtio_net_hdr hdr; 1209ab86bbcSShirley Ma /* 1219ab86bbcSShirley Ma * virtio_net_hdr should be in a separated sg buffer because of a 1229ab86bbcSShirley Ma * QEMU bug, and data sg buffer shares same page with this header sg. 1239ab86bbcSShirley Ma * This padding makes next sg 16 byte aligned after virtio_net_hdr. 1249ab86bbcSShirley Ma */ 1259ab86bbcSShirley Ma char padding[6]; 1269ab86bbcSShirley Ma }; 1279ab86bbcSShirley Ma 128b3f24698SRusty Russell static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) 129296f96fcSRusty Russell { 130b3f24698SRusty Russell return (struct skb_vnet_hdr *)skb->cb; 131296f96fcSRusty Russell } 132296f96fcSRusty Russell 1339ab86bbcSShirley Ma /* 1349ab86bbcSShirley Ma * private is used to chain pages for big packets, put the whole 1359ab86bbcSShirley Ma * most recent used list in the beginning for reuse 1369ab86bbcSShirley Ma */ 137*e9d7417bSJason Wang static void give_pages(struct receive_queue *rq, struct page *page) 138fb6813f4SRusty Russell { 1399ab86bbcSShirley Ma struct page *end; 1409ab86bbcSShirley Ma 141*e9d7417bSJason Wang /* Find end of list, sew whole thing into vi->rq.pages. */ 1429ab86bbcSShirley Ma for (end = page; end->private; end = (struct page *)end->private); 143*e9d7417bSJason Wang end->private = (unsigned long)rq->pages; 144*e9d7417bSJason Wang rq->pages = page; 145fb6813f4SRusty Russell } 146fb6813f4SRusty Russell 147*e9d7417bSJason Wang static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 148fb6813f4SRusty Russell { 149*e9d7417bSJason Wang struct page *p = rq->pages; 150fb6813f4SRusty Russell 1519ab86bbcSShirley Ma if (p) { 152*e9d7417bSJason Wang rq->pages = (struct page *)p->private; 1539ab86bbcSShirley Ma /* clear private here, it is used to chain pages */ 1549ab86bbcSShirley Ma p->private = 0; 1559ab86bbcSShirley Ma } else 156fb6813f4SRusty Russell p = alloc_page(gfp_mask); 157fb6813f4SRusty Russell return p; 158fb6813f4SRusty Russell } 159fb6813f4SRusty Russell 160*e9d7417bSJason Wang static void skb_xmit_done(struct virtqueue *vq) 161296f96fcSRusty Russell { 162*e9d7417bSJason Wang struct virtnet_info *vi = vq->vdev->priv; 163296f96fcSRusty Russell 1642cb9c6baSRusty Russell /* Suppress further interrupts. */ 165*e9d7417bSJason Wang virtqueue_disable_cb(vq); 16611a3a154SRusty Russell 167363f1514SRusty Russell /* We were probably waiting for more output buffers. */ 168296f96fcSRusty Russell netif_wake_queue(vi->dev); 169296f96fcSRusty Russell } 170296f96fcSRusty Russell 1719ab86bbcSShirley Ma static void set_skb_frag(struct sk_buff *skb, struct page *page, 1729ab86bbcSShirley Ma unsigned int offset, unsigned int *len) 173296f96fcSRusty Russell { 1748a59a7b9SKrishna Kumar int size = min((unsigned)PAGE_SIZE - offset, *len); 1759ab86bbcSShirley Ma int i = skb_shinfo(skb)->nr_frags; 176296f96fcSRusty Russell 1778a59a7b9SKrishna Kumar __skb_fill_page_desc(skb, i, page, offset, size); 1789ab86bbcSShirley Ma 1798a59a7b9SKrishna Kumar skb->data_len += size; 1808a59a7b9SKrishna Kumar skb->len += size; 1814b727361SEric Dumazet skb->truesize += PAGE_SIZE; 1829ab86bbcSShirley Ma skb_shinfo(skb)->nr_frags++; 1838a59a7b9SKrishna Kumar *len -= size; 184296f96fcSRusty Russell } 1853f2c31d9SMark McLoughlin 1863464645aSMike Waychison /* Called from bottom half context */ 187*e9d7417bSJason Wang static struct sk_buff *page_to_skb(struct receive_queue *rq, 1889ab86bbcSShirley Ma struct page *page, unsigned int len) 1899ab86bbcSShirley Ma { 190*e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 1919ab86bbcSShirley Ma struct sk_buff *skb; 1929ab86bbcSShirley Ma struct skb_vnet_hdr *hdr; 1939ab86bbcSShirley Ma unsigned int copy, hdr_len, offset; 1949ab86bbcSShirley Ma char *p; 1959ab86bbcSShirley Ma 1969ab86bbcSShirley Ma p = page_address(page); 1979ab86bbcSShirley Ma 1989ab86bbcSShirley Ma /* copy small packet so we can reuse these pages for small data */ 1999ab86bbcSShirley Ma skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); 2009ab86bbcSShirley Ma if (unlikely(!skb)) 2019ab86bbcSShirley Ma return NULL; 2029ab86bbcSShirley Ma 2039ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 2049ab86bbcSShirley Ma 2053f2c31d9SMark McLoughlin if (vi->mergeable_rx_bufs) { 2069ab86bbcSShirley Ma hdr_len = sizeof hdr->mhdr; 2079ab86bbcSShirley Ma offset = hdr_len; 2089ab86bbcSShirley Ma } else { 2099ab86bbcSShirley Ma hdr_len = sizeof hdr->hdr; 2109ab86bbcSShirley Ma offset = sizeof(struct padded_vnet_hdr); 2119ab86bbcSShirley Ma } 2123f2c31d9SMark McLoughlin 2139ab86bbcSShirley Ma memcpy(hdr, p, hdr_len); 2143f2c31d9SMark McLoughlin 2159ab86bbcSShirley Ma len -= hdr_len; 2169ab86bbcSShirley Ma p += offset; 2173f2c31d9SMark McLoughlin 2183f2c31d9SMark McLoughlin copy = len; 2193f2c31d9SMark McLoughlin if (copy > skb_tailroom(skb)) 2203f2c31d9SMark McLoughlin copy = skb_tailroom(skb); 2213f2c31d9SMark McLoughlin memcpy(skb_put(skb, copy), p, copy); 2223f2c31d9SMark McLoughlin 2233f2c31d9SMark McLoughlin len -= copy; 2249ab86bbcSShirley Ma offset += copy; 2253f2c31d9SMark McLoughlin 226e878d78bSSasha Levin /* 227e878d78bSSasha Levin * Verify that we can indeed put this data into a skb. 228e878d78bSSasha Levin * This is here to handle cases when the device erroneously 229e878d78bSSasha Levin * tries to receive more than is possible. This is usually 230e878d78bSSasha Levin * the case of a broken device. 231e878d78bSSasha Levin */ 232e878d78bSSasha Levin if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 233be443899SAmerigo Wang net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 234e878d78bSSasha Levin dev_kfree_skb(skb); 235e878d78bSSasha Levin return NULL; 236e878d78bSSasha Levin } 237e878d78bSSasha Levin 2389ab86bbcSShirley Ma while (len) { 2399ab86bbcSShirley Ma set_skb_frag(skb, page, offset, &len); 2409ab86bbcSShirley Ma page = (struct page *)page->private; 2419ab86bbcSShirley Ma offset = 0; 2423f2c31d9SMark McLoughlin } 2433f2c31d9SMark McLoughlin 2449ab86bbcSShirley Ma if (page) 245*e9d7417bSJason Wang give_pages(rq, page); 2463f2c31d9SMark McLoughlin 2479ab86bbcSShirley Ma return skb; 2489ab86bbcSShirley Ma } 2499ab86bbcSShirley Ma 250*e9d7417bSJason Wang static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) 2519ab86bbcSShirley Ma { 2529ab86bbcSShirley Ma struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 2539ab86bbcSShirley Ma struct page *page; 2549ab86bbcSShirley Ma int num_buf, i, len; 2559ab86bbcSShirley Ma 2569ab86bbcSShirley Ma num_buf = hdr->mhdr.num_buffers; 2579ab86bbcSShirley Ma while (--num_buf) { 2583f2c31d9SMark McLoughlin i = skb_shinfo(skb)->nr_frags; 2593f2c31d9SMark McLoughlin if (i >= MAX_SKB_FRAGS) { 2609ab86bbcSShirley Ma pr_debug("%s: packet too long\n", skb->dev->name); 2619ab86bbcSShirley Ma skb->dev->stats.rx_length_errors++; 2629ab86bbcSShirley Ma return -EINVAL; 2633f2c31d9SMark McLoughlin } 264*e9d7417bSJason Wang page = virtqueue_get_buf(rq->vq, &len); 2659ab86bbcSShirley Ma if (!page) { 2663f2c31d9SMark McLoughlin pr_debug("%s: rx error: %d buffers missing\n", 2679ab86bbcSShirley Ma skb->dev->name, hdr->mhdr.num_buffers); 2689ab86bbcSShirley Ma skb->dev->stats.rx_length_errors++; 2699ab86bbcSShirley Ma return -EINVAL; 2703f2c31d9SMark McLoughlin } 2713fa2a1dfSstephen hemminger 2723f2c31d9SMark McLoughlin if (len > PAGE_SIZE) 2733f2c31d9SMark McLoughlin len = PAGE_SIZE; 2743f2c31d9SMark McLoughlin 2759ab86bbcSShirley Ma set_skb_frag(skb, page, 0, &len); 2769ab86bbcSShirley Ma 277*e9d7417bSJason Wang --rq->num; 2783f2c31d9SMark McLoughlin } 2799ab86bbcSShirley Ma return 0; 2809ab86bbcSShirley Ma } 2819ab86bbcSShirley Ma 282*e9d7417bSJason Wang static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) 2839ab86bbcSShirley Ma { 284*e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 285*e9d7417bSJason Wang struct net_device *dev = vi->dev; 28658472a76SEric Dumazet struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 2879ab86bbcSShirley Ma struct sk_buff *skb; 2889ab86bbcSShirley Ma struct page *page; 2899ab86bbcSShirley Ma struct skb_vnet_hdr *hdr; 2909ab86bbcSShirley Ma 2919ab86bbcSShirley Ma if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 2929ab86bbcSShirley Ma pr_debug("%s: short packet %i\n", dev->name, len); 2939ab86bbcSShirley Ma dev->stats.rx_length_errors++; 2949ab86bbcSShirley Ma if (vi->mergeable_rx_bufs || vi->big_packets) 295*e9d7417bSJason Wang give_pages(rq, buf); 2969ab86bbcSShirley Ma else 2979ab86bbcSShirley Ma dev_kfree_skb(buf); 2989ab86bbcSShirley Ma return; 2999ab86bbcSShirley Ma } 3009ab86bbcSShirley Ma 3019ab86bbcSShirley Ma if (!vi->mergeable_rx_bufs && !vi->big_packets) { 3029ab86bbcSShirley Ma skb = buf; 3039ab86bbcSShirley Ma len -= sizeof(struct virtio_net_hdr); 3049ab86bbcSShirley Ma skb_trim(skb, len); 3053f2c31d9SMark McLoughlin } else { 3069ab86bbcSShirley Ma page = buf; 307*e9d7417bSJason Wang skb = page_to_skb(rq, page, len); 3089ab86bbcSShirley Ma if (unlikely(!skb)) { 30997402b96SHerbert Xu dev->stats.rx_dropped++; 310*e9d7417bSJason Wang give_pages(rq, page); 3119ab86bbcSShirley Ma return; 3129ab86bbcSShirley Ma } 3139ab86bbcSShirley Ma if (vi->mergeable_rx_bufs) 314*e9d7417bSJason Wang if (receive_mergeable(rq, skb)) { 3159ab86bbcSShirley Ma dev_kfree_skb(skb); 3169ab86bbcSShirley Ma return; 31797402b96SHerbert Xu } 3183f2c31d9SMark McLoughlin } 3193f2c31d9SMark McLoughlin 3209ab86bbcSShirley Ma hdr = skb_vnet_hdr(skb); 3213fa2a1dfSstephen hemminger 32283a27052SEric Dumazet u64_stats_update_begin(&stats->rx_syncp); 3233fa2a1dfSstephen hemminger stats->rx_bytes += skb->len; 3243fa2a1dfSstephen hemminger stats->rx_packets++; 32583a27052SEric Dumazet u64_stats_update_end(&stats->rx_syncp); 326296f96fcSRusty Russell 327b3f24698SRusty Russell if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 328296f96fcSRusty Russell pr_debug("Needs csum!\n"); 329b3f24698SRusty Russell if (!skb_partial_csum_set(skb, 330b3f24698SRusty Russell hdr->hdr.csum_start, 331b3f24698SRusty Russell hdr->hdr.csum_offset)) 332296f96fcSRusty Russell goto frame_err; 33310a8d94aSJason Wang } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { 33410a8d94aSJason Wang skb->ip_summed = CHECKSUM_UNNECESSARY; 335296f96fcSRusty Russell } 336296f96fcSRusty Russell 33723cde76dSMark McLoughlin skb->protocol = eth_type_trans(skb, dev); 33823cde76dSMark McLoughlin pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 33923cde76dSMark McLoughlin ntohs(skb->protocol), skb->len, skb->pkt_type); 34023cde76dSMark McLoughlin 341b3f24698SRusty Russell if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 342296f96fcSRusty Russell pr_debug("GSO!\n"); 343b3f24698SRusty Russell switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 344296f96fcSRusty Russell case VIRTIO_NET_HDR_GSO_TCPV4: 345296f96fcSRusty Russell skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 346296f96fcSRusty Russell break; 347296f96fcSRusty Russell case VIRTIO_NET_HDR_GSO_UDP: 348296f96fcSRusty Russell skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 349296f96fcSRusty Russell break; 350296f96fcSRusty Russell case VIRTIO_NET_HDR_GSO_TCPV6: 351296f96fcSRusty Russell skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 352296f96fcSRusty Russell break; 353296f96fcSRusty Russell default: 354be443899SAmerigo Wang net_warn_ratelimited("%s: bad gso type %u.\n", 355b3f24698SRusty Russell dev->name, hdr->hdr.gso_type); 356296f96fcSRusty Russell goto frame_err; 357296f96fcSRusty Russell } 358296f96fcSRusty Russell 359b3f24698SRusty Russell if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) 36034a48579SRusty Russell skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 36134a48579SRusty Russell 362b3f24698SRusty Russell skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; 363296f96fcSRusty Russell if (skb_shinfo(skb)->gso_size == 0) { 364be443899SAmerigo Wang net_warn_ratelimited("%s: zero gso size.\n", dev->name); 365296f96fcSRusty Russell goto frame_err; 366296f96fcSRusty Russell } 367296f96fcSRusty Russell 368296f96fcSRusty Russell /* Header must be checked, and gso_segs computed. */ 369296f96fcSRusty Russell skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 370296f96fcSRusty Russell skb_shinfo(skb)->gso_segs = 0; 371296f96fcSRusty Russell } 372296f96fcSRusty Russell 373296f96fcSRusty Russell netif_receive_skb(skb); 374296f96fcSRusty Russell return; 375296f96fcSRusty Russell 376296f96fcSRusty Russell frame_err: 377296f96fcSRusty Russell dev->stats.rx_frame_errors++; 378296f96fcSRusty Russell dev_kfree_skb(skb); 379296f96fcSRusty Russell } 380296f96fcSRusty Russell 381*e9d7417bSJason Wang static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) 382296f96fcSRusty Russell { 383*e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 384296f96fcSRusty Russell struct sk_buff *skb; 385b3f24698SRusty Russell struct skb_vnet_hdr *hdr; 3869ab86bbcSShirley Ma int err; 3873f2c31d9SMark McLoughlin 3883464645aSMike Waychison skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); 3899ab86bbcSShirley Ma if (unlikely(!skb)) 3909ab86bbcSShirley Ma return -ENOMEM; 391296f96fcSRusty Russell 392296f96fcSRusty Russell skb_put(skb, MAX_PACKET_LEN); 3933f2c31d9SMark McLoughlin 3943f2c31d9SMark McLoughlin hdr = skb_vnet_hdr(skb); 395*e9d7417bSJason Wang sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); 39697402b96SHerbert Xu 397*e9d7417bSJason Wang skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); 39897402b96SHerbert Xu 399*e9d7417bSJason Wang err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp); 4009ab86bbcSShirley Ma if (err < 0) 4019ab86bbcSShirley Ma dev_kfree_skb(skb); 40297402b96SHerbert Xu 4039ab86bbcSShirley Ma return err; 40497402b96SHerbert Xu } 40597402b96SHerbert Xu 406*e9d7417bSJason Wang static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) 4079ab86bbcSShirley Ma { 4089ab86bbcSShirley Ma struct page *first, *list = NULL; 4099ab86bbcSShirley Ma char *p; 4109ab86bbcSShirley Ma int i, err, offset; 411296f96fcSRusty Russell 412*e9d7417bSJason Wang /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ 4139ab86bbcSShirley Ma for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 414*e9d7417bSJason Wang first = get_a_page(rq, gfp); 4159ab86bbcSShirley Ma if (!first) { 4169ab86bbcSShirley Ma if (list) 417*e9d7417bSJason Wang give_pages(rq, list); 4189ab86bbcSShirley Ma return -ENOMEM; 419296f96fcSRusty Russell } 420*e9d7417bSJason Wang sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 4219ab86bbcSShirley Ma 4229ab86bbcSShirley Ma /* chain new page in list head to match sg */ 4239ab86bbcSShirley Ma first->private = (unsigned long)list; 4249ab86bbcSShirley Ma list = first; 4259ab86bbcSShirley Ma } 4269ab86bbcSShirley Ma 427*e9d7417bSJason Wang first = get_a_page(rq, gfp); 4289ab86bbcSShirley Ma if (!first) { 429*e9d7417bSJason Wang give_pages(rq, list); 4309ab86bbcSShirley Ma return -ENOMEM; 4319ab86bbcSShirley Ma } 4329ab86bbcSShirley Ma p = page_address(first); 4339ab86bbcSShirley Ma 434*e9d7417bSJason Wang /* rq->sg[0], rq->sg[1] share the same page */ 435*e9d7417bSJason Wang /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ 436*e9d7417bSJason Wang sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); 4379ab86bbcSShirley Ma 438*e9d7417bSJason Wang /* rq->sg[1] for data packet, from offset */ 4399ab86bbcSShirley Ma offset = sizeof(struct padded_vnet_hdr); 440*e9d7417bSJason Wang sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 4419ab86bbcSShirley Ma 4429ab86bbcSShirley Ma /* chain first in list head */ 4439ab86bbcSShirley Ma first->private = (unsigned long)list; 444*e9d7417bSJason Wang err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2, 445aa989f5eSMichael S. Tsirkin first, gfp); 4469ab86bbcSShirley Ma if (err < 0) 447*e9d7417bSJason Wang give_pages(rq, first); 4489ab86bbcSShirley Ma 4499ab86bbcSShirley Ma return err; 4509ab86bbcSShirley Ma } 4519ab86bbcSShirley Ma 452*e9d7417bSJason Wang static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) 4539ab86bbcSShirley Ma { 4549ab86bbcSShirley Ma struct page *page; 4559ab86bbcSShirley Ma int err; 4569ab86bbcSShirley Ma 457*e9d7417bSJason Wang page = get_a_page(rq, gfp); 4589ab86bbcSShirley Ma if (!page) 4599ab86bbcSShirley Ma return -ENOMEM; 4609ab86bbcSShirley Ma 461*e9d7417bSJason Wang sg_init_one(rq->sg, page_address(page), PAGE_SIZE); 4629ab86bbcSShirley Ma 463*e9d7417bSJason Wang err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp); 4649ab86bbcSShirley Ma if (err < 0) 465*e9d7417bSJason Wang give_pages(rq, page); 4669ab86bbcSShirley Ma 4679ab86bbcSShirley Ma return err; 468296f96fcSRusty Russell } 469296f96fcSRusty Russell 470b2baed69SRusty Russell /* 471b2baed69SRusty Russell * Returns false if we couldn't fill entirely (OOM). 472b2baed69SRusty Russell * 473b2baed69SRusty Russell * Normally run in the receive path, but can also be run from ndo_open 474b2baed69SRusty Russell * before we're receiving packets, or from refill_work which is 475b2baed69SRusty Russell * careful to disable receiving (using napi_disable). 476b2baed69SRusty Russell */ 477*e9d7417bSJason Wang static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) 4783f2c31d9SMark McLoughlin { 479*e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 4803f2c31d9SMark McLoughlin int err; 4811788f495SMichael S. Tsirkin bool oom; 4823f2c31d9SMark McLoughlin 4830aea51c3SAmit Shah do { 4849ab86bbcSShirley Ma if (vi->mergeable_rx_bufs) 485*e9d7417bSJason Wang err = add_recvbuf_mergeable(rq, gfp); 4869ab86bbcSShirley Ma else if (vi->big_packets) 487*e9d7417bSJason Wang err = add_recvbuf_big(rq, gfp); 4889ab86bbcSShirley Ma else 489*e9d7417bSJason Wang err = add_recvbuf_small(rq, gfp); 4903f2c31d9SMark McLoughlin 4911788f495SMichael S. Tsirkin oom = err == -ENOMEM; 4921788f495SMichael S. Tsirkin if (err < 0) 4933f2c31d9SMark McLoughlin break; 494*e9d7417bSJason Wang ++rq->num; 4950aea51c3SAmit Shah } while (err > 0); 496*e9d7417bSJason Wang if (unlikely(rq->num > rq->max)) 497*e9d7417bSJason Wang rq->max = rq->num; 498*e9d7417bSJason Wang virtqueue_kick(rq->vq); 4993161e453SRusty Russell return !oom; 5003f2c31d9SMark McLoughlin } 5013f2c31d9SMark McLoughlin 50218445c4dSRusty Russell static void skb_recv_done(struct virtqueue *rvq) 503296f96fcSRusty Russell { 504296f96fcSRusty Russell struct virtnet_info *vi = rvq->vdev->priv; 505*e9d7417bSJason Wang struct receive_queue *rq = &vi->rq; 506*e9d7417bSJason Wang 50718445c4dSRusty Russell /* Schedule NAPI, Suppress further interrupts if successful. */ 508*e9d7417bSJason Wang if (napi_schedule_prep(&rq->napi)) { 5091915a712SMichael S. Tsirkin virtqueue_disable_cb(rvq); 510*e9d7417bSJason Wang __napi_schedule(&rq->napi); 51118445c4dSRusty Russell } 512296f96fcSRusty Russell } 513296f96fcSRusty Russell 514*e9d7417bSJason Wang static void virtnet_napi_enable(struct receive_queue *rq) 5153e9d08ecSBruce Rogers { 516*e9d7417bSJason Wang napi_enable(&rq->napi); 5173e9d08ecSBruce Rogers 5183e9d08ecSBruce Rogers /* If all buffers were filled by other side before we napi_enabled, we 5193e9d08ecSBruce Rogers * won't get another interrupt, so process any outstanding packets 5203e9d08ecSBruce Rogers * now. virtnet_poll wants re-enable the queue, so we disable here. 5213e9d08ecSBruce Rogers * We synchronize against interrupts via NAPI_STATE_SCHED */ 522*e9d7417bSJason Wang if (napi_schedule_prep(&rq->napi)) { 523*e9d7417bSJason Wang virtqueue_disable_cb(rq->vq); 524ec13ee80SMichael S. Tsirkin local_bh_disable(); 525*e9d7417bSJason Wang __napi_schedule(&rq->napi); 526ec13ee80SMichael S. Tsirkin local_bh_enable(); 5273e9d08ecSBruce Rogers } 5283e9d08ecSBruce Rogers } 5293e9d08ecSBruce Rogers 5303161e453SRusty Russell static void refill_work(struct work_struct *work) 5313161e453SRusty Russell { 532*e9d7417bSJason Wang struct virtnet_info *vi = 533*e9d7417bSJason Wang container_of(work, struct virtnet_info, refill.work); 5343161e453SRusty Russell bool still_empty; 5353161e453SRusty Russell 536*e9d7417bSJason Wang napi_disable(&vi->rq.napi); 537*e9d7417bSJason Wang still_empty = !try_fill_recv(&vi->rq, GFP_KERNEL); 538*e9d7417bSJason Wang virtnet_napi_enable(&vi->rq); 5393161e453SRusty Russell 5403161e453SRusty Russell /* In theory, this can happen: if we don't get any buffers in 5413161e453SRusty Russell * we will *never* try to fill again. */ 5423161e453SRusty Russell if (still_empty) 5433b07e9caSTejun Heo schedule_delayed_work(&vi->refill, HZ/2); 5443161e453SRusty Russell } 5453161e453SRusty Russell 546296f96fcSRusty Russell static int virtnet_poll(struct napi_struct *napi, int budget) 547296f96fcSRusty Russell { 548*e9d7417bSJason Wang struct receive_queue *rq = 549*e9d7417bSJason Wang container_of(napi, struct receive_queue, napi); 550*e9d7417bSJason Wang struct virtnet_info *vi = rq->vq->vdev->priv; 5519ab86bbcSShirley Ma void *buf; 552296f96fcSRusty Russell unsigned int len, received = 0; 553296f96fcSRusty Russell 554296f96fcSRusty Russell again: 555296f96fcSRusty Russell while (received < budget && 556*e9d7417bSJason Wang (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 557*e9d7417bSJason Wang receive_buf(rq, buf, len); 558*e9d7417bSJason Wang --rq->num; 559296f96fcSRusty Russell received++; 560296f96fcSRusty Russell } 561296f96fcSRusty Russell 562*e9d7417bSJason Wang if (rq->num < rq->max / 2) { 563*e9d7417bSJason Wang if (!try_fill_recv(rq, GFP_ATOMIC)) 5643b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 5653161e453SRusty Russell } 566296f96fcSRusty Russell 5678329d98eSRusty Russell /* Out of packets? */ 5688329d98eSRusty Russell if (received < budget) { 569288379f0SBen Hutchings napi_complete(napi); 570*e9d7417bSJason Wang if (unlikely(!virtqueue_enable_cb(rq->vq)) && 5718e95a202SJoe Perches napi_schedule_prep(napi)) { 572*e9d7417bSJason Wang virtqueue_disable_cb(rq->vq); 573288379f0SBen Hutchings __napi_schedule(napi); 574296f96fcSRusty Russell goto again; 575296f96fcSRusty Russell } 5764265f161SChristian Borntraeger } 577296f96fcSRusty Russell 578296f96fcSRusty Russell return received; 579296f96fcSRusty Russell } 580296f96fcSRusty Russell 581*e9d7417bSJason Wang static unsigned int free_old_xmit_skbs(struct send_queue *sq) 582296f96fcSRusty Russell { 583296f96fcSRusty Russell struct sk_buff *skb; 58448925e37SRusty Russell unsigned int len, tot_sgs = 0; 585*e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv; 58658472a76SEric Dumazet struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 587296f96fcSRusty Russell 588*e9d7417bSJason Wang while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 589296f96fcSRusty Russell pr_debug("Sent skb %p\n", skb); 5903fa2a1dfSstephen hemminger 59183a27052SEric Dumazet u64_stats_update_begin(&stats->tx_syncp); 5923fa2a1dfSstephen hemminger stats->tx_bytes += skb->len; 5933fa2a1dfSstephen hemminger stats->tx_packets++; 59483a27052SEric Dumazet u64_stats_update_end(&stats->tx_syncp); 5953fa2a1dfSstephen hemminger 59648925e37SRusty Russell tot_sgs += skb_vnet_hdr(skb)->num_sg; 597ed79bab8SEric Dumazet dev_kfree_skb_any(skb); 598296f96fcSRusty Russell } 59948925e37SRusty Russell return tot_sgs; 600296f96fcSRusty Russell } 601296f96fcSRusty Russell 602*e9d7417bSJason Wang static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 603296f96fcSRusty Russell { 604b3f24698SRusty Russell struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 605296f96fcSRusty Russell const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 606*e9d7417bSJason Wang struct virtnet_info *vi = sq->vq->vdev->priv; 607296f96fcSRusty Russell 608e174961cSJohannes Berg pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 609296f96fcSRusty Russell 610296f96fcSRusty Russell if (skb->ip_summed == CHECKSUM_PARTIAL) { 611b3f24698SRusty Russell hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 61255508d60SMichał Mirosław hdr->hdr.csum_start = skb_checksum_start_offset(skb); 613b3f24698SRusty Russell hdr->hdr.csum_offset = skb->csum_offset; 614296f96fcSRusty Russell } else { 615b3f24698SRusty Russell hdr->hdr.flags = 0; 616b3f24698SRusty Russell hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; 617296f96fcSRusty Russell } 618296f96fcSRusty Russell 619296f96fcSRusty Russell if (skb_is_gso(skb)) { 620b3f24698SRusty Russell hdr->hdr.hdr_len = skb_headlen(skb); 621b3f24698SRusty Russell hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; 62234a48579SRusty Russell if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 623b3f24698SRusty Russell hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 624296f96fcSRusty Russell else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 625b3f24698SRusty Russell hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 626296f96fcSRusty Russell else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 627b3f24698SRusty Russell hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; 628296f96fcSRusty Russell else 629296f96fcSRusty Russell BUG(); 63034a48579SRusty Russell if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 631b3f24698SRusty Russell hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; 632296f96fcSRusty Russell } else { 633b3f24698SRusty Russell hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; 634b3f24698SRusty Russell hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; 635296f96fcSRusty Russell } 636296f96fcSRusty Russell 637b3f24698SRusty Russell hdr->mhdr.num_buffers = 0; 6383f2c31d9SMark McLoughlin 6393f2c31d9SMark McLoughlin /* Encode metadata header at front. */ 6403f2c31d9SMark McLoughlin if (vi->mergeable_rx_bufs) 641*e9d7417bSJason Wang sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr); 6423f2c31d9SMark McLoughlin else 643*e9d7417bSJason Wang sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); 6443f2c31d9SMark McLoughlin 645*e9d7417bSJason Wang hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; 646*e9d7417bSJason Wang return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg, 647f96fde41SRusty Russell 0, skb, GFP_ATOMIC); 64811a3a154SRusty Russell } 64911a3a154SRusty Russell 650424efe9cSStephen Hemminger static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 65199ffc696SRusty Russell { 65299ffc696SRusty Russell struct virtnet_info *vi = netdev_priv(dev); 653*e9d7417bSJason Wang struct send_queue *sq = &vi->sq; 65448925e37SRusty Russell int capacity; 6552cb9c6baSRusty Russell 6562cb9c6baSRusty Russell /* Free up any pending old buffers before queueing new ones. */ 657*e9d7417bSJason Wang free_old_xmit_skbs(sq); 65899ffc696SRusty Russell 65903f191baSMichael S. Tsirkin /* Try to transmit */ 660*e9d7417bSJason Wang capacity = xmit_skb(sq, skb); 66199ffc696SRusty Russell 66248925e37SRusty Russell /* This can happen with OOM and indirect buffers. */ 66348925e37SRusty Russell if (unlikely(capacity < 0)) { 66458eba97dSRusty Russell if (likely(capacity == -ENOMEM)) { 66531304165STorsten Kaiser if (net_ratelimit()) 66658eba97dSRusty Russell dev_warn(&dev->dev, 66758eba97dSRusty Russell "TX queue failure: out of memory\n"); 66858eba97dSRusty Russell } else { 66958eba97dSRusty Russell dev->stats.tx_fifo_errors++; 6702e57b79cSRick Jones if (net_ratelimit()) 67158eba97dSRusty Russell dev_warn(&dev->dev, 67258eba97dSRusty Russell "Unexpected TX queue failure: %d\n", 67358eba97dSRusty Russell capacity); 6742cb9c6baSRusty Russell } 67558eba97dSRusty Russell dev->stats.tx_dropped++; 67658eba97dSRusty Russell kfree_skb(skb); 67758eba97dSRusty Russell return NETDEV_TX_OK; 678296f96fcSRusty Russell } 679*e9d7417bSJason Wang virtqueue_kick(sq->vq); 68003f191baSMichael S. Tsirkin 68148925e37SRusty Russell /* Don't wait up for transmitted skbs to be freed. */ 68248925e37SRusty Russell skb_orphan(skb); 68348925e37SRusty Russell nf_reset(skb); 68448925e37SRusty Russell 68548925e37SRusty Russell /* Apparently nice girls don't return TX_BUSY; stop the queue 68648925e37SRusty Russell * before it gets out of hand. Naturally, this wastes entries. */ 68748925e37SRusty Russell if (capacity < 2+MAX_SKB_FRAGS) { 68848925e37SRusty Russell netif_stop_queue(dev); 689*e9d7417bSJason Wang if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 69048925e37SRusty Russell /* More just got used, free them then recheck. */ 691*e9d7417bSJason Wang capacity += free_old_xmit_skbs(sq); 69248925e37SRusty Russell if (capacity >= 2+MAX_SKB_FRAGS) { 69348925e37SRusty Russell netif_start_queue(dev); 694*e9d7417bSJason Wang virtqueue_disable_cb(sq->vq); 69548925e37SRusty Russell } 69648925e37SRusty Russell } 69748925e37SRusty Russell } 69848925e37SRusty Russell 69948925e37SRusty Russell return NETDEV_TX_OK; 70048925e37SRusty Russell } 70148925e37SRusty Russell 7029c46f6d4SAlex Williamson static int virtnet_set_mac_address(struct net_device *dev, void *p) 7039c46f6d4SAlex Williamson { 7049c46f6d4SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 7059c46f6d4SAlex Williamson struct virtio_device *vdev = vi->vdev; 706f2f2c8b4SJiri Pirko int ret; 7079c46f6d4SAlex Williamson 708f2f2c8b4SJiri Pirko ret = eth_mac_addr(dev, p); 709f2f2c8b4SJiri Pirko if (ret) 710f2f2c8b4SJiri Pirko return ret; 7119c46f6d4SAlex Williamson 71262994b2dSAlex Williamson if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) 7139c46f6d4SAlex Williamson vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), 7149c46f6d4SAlex Williamson dev->dev_addr, dev->addr_len); 7159c46f6d4SAlex Williamson 7169c46f6d4SAlex Williamson return 0; 7179c46f6d4SAlex Williamson } 7189c46f6d4SAlex Williamson 7193fa2a1dfSstephen hemminger static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, 7203fa2a1dfSstephen hemminger struct rtnl_link_stats64 *tot) 7213fa2a1dfSstephen hemminger { 7223fa2a1dfSstephen hemminger struct virtnet_info *vi = netdev_priv(dev); 7233fa2a1dfSstephen hemminger int cpu; 7243fa2a1dfSstephen hemminger unsigned int start; 7253fa2a1dfSstephen hemminger 7263fa2a1dfSstephen hemminger for_each_possible_cpu(cpu) { 72758472a76SEric Dumazet struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); 7283fa2a1dfSstephen hemminger u64 tpackets, tbytes, rpackets, rbytes; 7293fa2a1dfSstephen hemminger 7303fa2a1dfSstephen hemminger do { 731e3906486SKevin Groeneveld start = u64_stats_fetch_begin_bh(&stats->tx_syncp); 7323fa2a1dfSstephen hemminger tpackets = stats->tx_packets; 7333fa2a1dfSstephen hemminger tbytes = stats->tx_bytes; 734e3906486SKevin Groeneveld } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); 73583a27052SEric Dumazet 73683a27052SEric Dumazet do { 737e3906486SKevin Groeneveld start = u64_stats_fetch_begin_bh(&stats->rx_syncp); 7383fa2a1dfSstephen hemminger rpackets = stats->rx_packets; 7393fa2a1dfSstephen hemminger rbytes = stats->rx_bytes; 740e3906486SKevin Groeneveld } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); 7413fa2a1dfSstephen hemminger 7423fa2a1dfSstephen hemminger tot->rx_packets += rpackets; 7433fa2a1dfSstephen hemminger tot->tx_packets += tpackets; 7443fa2a1dfSstephen hemminger tot->rx_bytes += rbytes; 7453fa2a1dfSstephen hemminger tot->tx_bytes += tbytes; 7463fa2a1dfSstephen hemminger } 7473fa2a1dfSstephen hemminger 7483fa2a1dfSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 749021ac8d3SRick Jones tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 7503fa2a1dfSstephen hemminger tot->rx_dropped = dev->stats.rx_dropped; 7513fa2a1dfSstephen hemminger tot->rx_length_errors = dev->stats.rx_length_errors; 7523fa2a1dfSstephen hemminger tot->rx_frame_errors = dev->stats.rx_frame_errors; 7533fa2a1dfSstephen hemminger 7543fa2a1dfSstephen hemminger return tot; 7553fa2a1dfSstephen hemminger } 7563fa2a1dfSstephen hemminger 757da74e89dSAmit Shah #ifdef CONFIG_NET_POLL_CONTROLLER 758da74e89dSAmit Shah static void virtnet_netpoll(struct net_device *dev) 759da74e89dSAmit Shah { 760da74e89dSAmit Shah struct virtnet_info *vi = netdev_priv(dev); 761da74e89dSAmit Shah 762*e9d7417bSJason Wang napi_schedule(&vi->rq.napi); 763da74e89dSAmit Shah } 764da74e89dSAmit Shah #endif 765da74e89dSAmit Shah 766296f96fcSRusty Russell static int virtnet_open(struct net_device *dev) 767296f96fcSRusty Russell { 768296f96fcSRusty Russell struct virtnet_info *vi = netdev_priv(dev); 769296f96fcSRusty Russell 770b2baed69SRusty Russell /* Make sure we have some buffers: if oom use wq. */ 771*e9d7417bSJason Wang if (!try_fill_recv(&vi->rq, GFP_KERNEL)) 7723b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 773b2baed69SRusty Russell 774*e9d7417bSJason Wang virtnet_napi_enable(&vi->rq); 775296f96fcSRusty Russell return 0; 776296f96fcSRusty Russell } 777296f96fcSRusty Russell 7782a41f71dSAlex Williamson /* 7792a41f71dSAlex Williamson * Send command via the control virtqueue and check status. Commands 7802a41f71dSAlex Williamson * supported by the hypervisor, as indicated by feature bits, should 7812a41f71dSAlex Williamson * never fail unless improperly formated. 7822a41f71dSAlex Williamson */ 7832a41f71dSAlex Williamson static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 7842a41f71dSAlex Williamson struct scatterlist *data, int out, int in) 7852a41f71dSAlex Williamson { 78623e258e1SAlex Williamson struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; 7872a41f71dSAlex Williamson struct virtio_net_ctrl_hdr ctrl; 7882a41f71dSAlex Williamson virtio_net_ctrl_ack status = ~0; 7892a41f71dSAlex Williamson unsigned int tmp; 79023e258e1SAlex Williamson int i; 7912a41f71dSAlex Williamson 7920ee904c3SAlexander Beregalov /* Caller should know better */ 7930ee904c3SAlexander Beregalov BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || 7940ee904c3SAlexander Beregalov (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); 7952a41f71dSAlex Williamson 7962a41f71dSAlex Williamson out++; /* Add header */ 7972a41f71dSAlex Williamson in++; /* Add return status */ 7982a41f71dSAlex Williamson 7992a41f71dSAlex Williamson ctrl.class = class; 8002a41f71dSAlex Williamson ctrl.cmd = cmd; 8012a41f71dSAlex Williamson 8022a41f71dSAlex Williamson sg_init_table(sg, out + in); 8032a41f71dSAlex Williamson 8042a41f71dSAlex Williamson sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); 80523e258e1SAlex Williamson for_each_sg(data, s, out + in - 2, i) 80623e258e1SAlex Williamson sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 8072a41f71dSAlex Williamson sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 8082a41f71dSAlex Williamson 809f96fde41SRusty Russell BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); 8102a41f71dSAlex Williamson 8111915a712SMichael S. Tsirkin virtqueue_kick(vi->cvq); 8122a41f71dSAlex Williamson 8132a41f71dSAlex Williamson /* 8142a41f71dSAlex Williamson * Spin for a response, the kick causes an ioport write, trapping 8152a41f71dSAlex Williamson * into the hypervisor, so the request should be handled immediately. 8162a41f71dSAlex Williamson */ 8171915a712SMichael S. Tsirkin while (!virtqueue_get_buf(vi->cvq, &tmp)) 8182a41f71dSAlex Williamson cpu_relax(); 8192a41f71dSAlex Williamson 8202a41f71dSAlex Williamson return status == VIRTIO_NET_OK; 8212a41f71dSAlex Williamson } 8222a41f71dSAlex Williamson 823586d17c5SJason Wang static void virtnet_ack_link_announce(struct virtnet_info *vi) 824586d17c5SJason Wang { 825586d17c5SJason Wang rtnl_lock(); 826586d17c5SJason Wang if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 827586d17c5SJason Wang VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, 828586d17c5SJason Wang 0, 0)) 829586d17c5SJason Wang dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 830586d17c5SJason Wang rtnl_unlock(); 831586d17c5SJason Wang } 832586d17c5SJason Wang 833296f96fcSRusty Russell static int virtnet_close(struct net_device *dev) 834296f96fcSRusty Russell { 835296f96fcSRusty Russell struct virtnet_info *vi = netdev_priv(dev); 836296f96fcSRusty Russell 837b2baed69SRusty Russell /* Make sure refill_work doesn't re-enable napi! */ 838b2baed69SRusty Russell cancel_delayed_work_sync(&vi->refill); 839*e9d7417bSJason Wang napi_disable(&vi->rq.napi); 840296f96fcSRusty Russell 841296f96fcSRusty Russell return 0; 842296f96fcSRusty Russell } 843296f96fcSRusty Russell 8442af7698eSAlex Williamson static void virtnet_set_rx_mode(struct net_device *dev) 8452af7698eSAlex Williamson { 8462af7698eSAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 847f565a7c2SAlex Williamson struct scatterlist sg[2]; 8482af7698eSAlex Williamson u8 promisc, allmulti; 849f565a7c2SAlex Williamson struct virtio_net_ctrl_mac *mac_data; 850ccffad25SJiri Pirko struct netdev_hw_addr *ha; 85132e7bfc4SJiri Pirko int uc_count; 8524cd24eafSJiri Pirko int mc_count; 853f565a7c2SAlex Williamson void *buf; 854f565a7c2SAlex Williamson int i; 8552af7698eSAlex Williamson 8562af7698eSAlex Williamson /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ 8572af7698eSAlex Williamson if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 8582af7698eSAlex Williamson return; 8592af7698eSAlex Williamson 860f565a7c2SAlex Williamson promisc = ((dev->flags & IFF_PROMISC) != 0); 861f565a7c2SAlex Williamson allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 8622af7698eSAlex Williamson 86323e258e1SAlex Williamson sg_init_one(sg, &promisc, sizeof(promisc)); 8642af7698eSAlex Williamson 8652af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 8662af7698eSAlex Williamson VIRTIO_NET_CTRL_RX_PROMISC, 867f565a7c2SAlex Williamson sg, 1, 0)) 8682af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 8692af7698eSAlex Williamson promisc ? "en" : "dis"); 8702af7698eSAlex Williamson 87123e258e1SAlex Williamson sg_init_one(sg, &allmulti, sizeof(allmulti)); 8722af7698eSAlex Williamson 8732af7698eSAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 8742af7698eSAlex Williamson VIRTIO_NET_CTRL_RX_ALLMULTI, 875f565a7c2SAlex Williamson sg, 1, 0)) 8762af7698eSAlex Williamson dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 8772af7698eSAlex Williamson allmulti ? "en" : "dis"); 878f565a7c2SAlex Williamson 87932e7bfc4SJiri Pirko uc_count = netdev_uc_count(dev); 8804cd24eafSJiri Pirko mc_count = netdev_mc_count(dev); 881f565a7c2SAlex Williamson /* MAC filter - use one buffer for both lists */ 8824cd24eafSJiri Pirko buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 883f565a7c2SAlex Williamson (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 8844cd24eafSJiri Pirko mac_data = buf; 885f565a7c2SAlex Williamson if (!buf) { 886f565a7c2SAlex Williamson dev_warn(&dev->dev, "No memory for MAC address buffer\n"); 887f565a7c2SAlex Williamson return; 888f565a7c2SAlex Williamson } 889f565a7c2SAlex Williamson 89023e258e1SAlex Williamson sg_init_table(sg, 2); 89123e258e1SAlex Williamson 892f565a7c2SAlex Williamson /* Store the unicast list and count in the front of the buffer */ 89332e7bfc4SJiri Pirko mac_data->entries = uc_count; 894ccffad25SJiri Pirko i = 0; 89532e7bfc4SJiri Pirko netdev_for_each_uc_addr(ha, dev) 896ccffad25SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 897f565a7c2SAlex Williamson 898f565a7c2SAlex Williamson sg_set_buf(&sg[0], mac_data, 89932e7bfc4SJiri Pirko sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 900f565a7c2SAlex Williamson 901f565a7c2SAlex Williamson /* multicast list and count fill the end */ 90232e7bfc4SJiri Pirko mac_data = (void *)&mac_data->macs[uc_count][0]; 903f565a7c2SAlex Williamson 9044cd24eafSJiri Pirko mac_data->entries = mc_count; 905567ec874SJiri Pirko i = 0; 90622bedad3SJiri Pirko netdev_for_each_mc_addr(ha, dev) 90722bedad3SJiri Pirko memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 908f565a7c2SAlex Williamson 909f565a7c2SAlex Williamson sg_set_buf(&sg[1], mac_data, 9104cd24eafSJiri Pirko sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 911f565a7c2SAlex Williamson 912f565a7c2SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 913f565a7c2SAlex Williamson VIRTIO_NET_CTRL_MAC_TABLE_SET, 914f565a7c2SAlex Williamson sg, 2, 0)) 915f565a7c2SAlex Williamson dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); 916f565a7c2SAlex Williamson 917f565a7c2SAlex Williamson kfree(buf); 9182af7698eSAlex Williamson } 9192af7698eSAlex Williamson 9208e586137SJiri Pirko static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) 9210bde9569SAlex Williamson { 9220bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 9230bde9569SAlex Williamson struct scatterlist sg; 9240bde9569SAlex Williamson 92523e258e1SAlex Williamson sg_init_one(&sg, &vid, sizeof(vid)); 9260bde9569SAlex Williamson 9270bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 9280bde9569SAlex Williamson VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) 9290bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 9308e586137SJiri Pirko return 0; 9310bde9569SAlex Williamson } 9320bde9569SAlex Williamson 9338e586137SJiri Pirko static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) 9340bde9569SAlex Williamson { 9350bde9569SAlex Williamson struct virtnet_info *vi = netdev_priv(dev); 9360bde9569SAlex Williamson struct scatterlist sg; 9370bde9569SAlex Williamson 93823e258e1SAlex Williamson sg_init_one(&sg, &vid, sizeof(vid)); 9390bde9569SAlex Williamson 9400bde9569SAlex Williamson if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 9410bde9569SAlex Williamson VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) 9420bde9569SAlex Williamson dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 9438e586137SJiri Pirko return 0; 9440bde9569SAlex Williamson } 9450bde9569SAlex Williamson 9468f9f4668SRick Jones static void virtnet_get_ringparam(struct net_device *dev, 9478f9f4668SRick Jones struct ethtool_ringparam *ring) 9488f9f4668SRick Jones { 9498f9f4668SRick Jones struct virtnet_info *vi = netdev_priv(dev); 9508f9f4668SRick Jones 951*e9d7417bSJason Wang ring->rx_max_pending = virtqueue_get_vring_size(vi->rq.vq); 952*e9d7417bSJason Wang ring->tx_max_pending = virtqueue_get_vring_size(vi->sq.vq); 9538f9f4668SRick Jones ring->rx_pending = ring->rx_max_pending; 9548f9f4668SRick Jones ring->tx_pending = ring->tx_max_pending; 9558f9f4668SRick Jones } 9568f9f4668SRick Jones 95766846048SRick Jones 95866846048SRick Jones static void virtnet_get_drvinfo(struct net_device *dev, 95966846048SRick Jones struct ethtool_drvinfo *info) 96066846048SRick Jones { 96166846048SRick Jones struct virtnet_info *vi = netdev_priv(dev); 96266846048SRick Jones struct virtio_device *vdev = vi->vdev; 96366846048SRick Jones 96466846048SRick Jones strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 96566846048SRick Jones strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 96666846048SRick Jones strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 96766846048SRick Jones 96866846048SRick Jones } 96966846048SRick Jones 9700fc0b732SStephen Hemminger static const struct ethtool_ops virtnet_ethtool_ops = { 97166846048SRick Jones .get_drvinfo = virtnet_get_drvinfo, 9729f4d26d0SMark McLoughlin .get_link = ethtool_op_get_link, 9738f9f4668SRick Jones .get_ringparam = virtnet_get_ringparam, 974a9ea3fc6SHerbert Xu }; 975a9ea3fc6SHerbert Xu 97639da5814SMark McLoughlin #define MIN_MTU 68 97739da5814SMark McLoughlin #define MAX_MTU 65535 97839da5814SMark McLoughlin 97939da5814SMark McLoughlin static int virtnet_change_mtu(struct net_device *dev, int new_mtu) 98039da5814SMark McLoughlin { 98139da5814SMark McLoughlin if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) 98239da5814SMark McLoughlin return -EINVAL; 98339da5814SMark McLoughlin dev->mtu = new_mtu; 98439da5814SMark McLoughlin return 0; 98539da5814SMark McLoughlin } 98639da5814SMark McLoughlin 98776288b4eSStephen Hemminger static const struct net_device_ops virtnet_netdev = { 98876288b4eSStephen Hemminger .ndo_open = virtnet_open, 98976288b4eSStephen Hemminger .ndo_stop = virtnet_close, 99076288b4eSStephen Hemminger .ndo_start_xmit = start_xmit, 99176288b4eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 9929c46f6d4SAlex Williamson .ndo_set_mac_address = virtnet_set_mac_address, 9932af7698eSAlex Williamson .ndo_set_rx_mode = virtnet_set_rx_mode, 99476288b4eSStephen Hemminger .ndo_change_mtu = virtnet_change_mtu, 9953fa2a1dfSstephen hemminger .ndo_get_stats64 = virtnet_stats, 9961824a989SAlex Williamson .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 9971824a989SAlex Williamson .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 99876288b4eSStephen Hemminger #ifdef CONFIG_NET_POLL_CONTROLLER 99976288b4eSStephen Hemminger .ndo_poll_controller = virtnet_netpoll, 100076288b4eSStephen Hemminger #endif 100176288b4eSStephen Hemminger }; 100276288b4eSStephen Hemminger 1003586d17c5SJason Wang static void virtnet_config_changed_work(struct work_struct *work) 10049f4d26d0SMark McLoughlin { 1005586d17c5SJason Wang struct virtnet_info *vi = 1006586d17c5SJason Wang container_of(work, struct virtnet_info, config_work); 10079f4d26d0SMark McLoughlin u16 v; 10089f4d26d0SMark McLoughlin 1009586d17c5SJason Wang mutex_lock(&vi->config_lock); 1010586d17c5SJason Wang if (!vi->config_enable) 1011586d17c5SJason Wang goto done; 1012586d17c5SJason Wang 101377dd7693SSasha Levin if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, 10149f4d26d0SMark McLoughlin offsetof(struct virtio_net_config, status), 101577dd7693SSasha Levin &v) < 0) 1016586d17c5SJason Wang goto done; 1017586d17c5SJason Wang 1018586d17c5SJason Wang if (v & VIRTIO_NET_S_ANNOUNCE) { 1019ee89bab1SAmerigo Wang netdev_notify_peers(vi->dev); 1020586d17c5SJason Wang virtnet_ack_link_announce(vi); 1021586d17c5SJason Wang } 10229f4d26d0SMark McLoughlin 10239f4d26d0SMark McLoughlin /* Ignore unknown (future) status bits */ 10249f4d26d0SMark McLoughlin v &= VIRTIO_NET_S_LINK_UP; 10259f4d26d0SMark McLoughlin 10269f4d26d0SMark McLoughlin if (vi->status == v) 1027586d17c5SJason Wang goto done; 10289f4d26d0SMark McLoughlin 10299f4d26d0SMark McLoughlin vi->status = v; 10309f4d26d0SMark McLoughlin 10319f4d26d0SMark McLoughlin if (vi->status & VIRTIO_NET_S_LINK_UP) { 10329f4d26d0SMark McLoughlin netif_carrier_on(vi->dev); 10339f4d26d0SMark McLoughlin netif_wake_queue(vi->dev); 10349f4d26d0SMark McLoughlin } else { 10359f4d26d0SMark McLoughlin netif_carrier_off(vi->dev); 10369f4d26d0SMark McLoughlin netif_stop_queue(vi->dev); 10379f4d26d0SMark McLoughlin } 1038586d17c5SJason Wang done: 1039586d17c5SJason Wang mutex_unlock(&vi->config_lock); 10409f4d26d0SMark McLoughlin } 10419f4d26d0SMark McLoughlin 10429f4d26d0SMark McLoughlin static void virtnet_config_changed(struct virtio_device *vdev) 10439f4d26d0SMark McLoughlin { 10449f4d26d0SMark McLoughlin struct virtnet_info *vi = vdev->priv; 10459f4d26d0SMark McLoughlin 10463b07e9caSTejun Heo schedule_work(&vi->config_work); 10479f4d26d0SMark McLoughlin } 10489f4d26d0SMark McLoughlin 1049*e9d7417bSJason Wang static void virtnet_del_vqs(struct virtnet_info *vi) 1050*e9d7417bSJason Wang { 1051*e9d7417bSJason Wang struct virtio_device *vdev = vi->vdev; 1052*e9d7417bSJason Wang 1053*e9d7417bSJason Wang vdev->config->del_vqs(vdev); 1054*e9d7417bSJason Wang } 1055*e9d7417bSJason Wang 10563f9c10b0SAmit Shah static int init_vqs(struct virtnet_info *vi) 10573f9c10b0SAmit Shah { 10583f9c10b0SAmit Shah struct virtqueue *vqs[3]; 10593f9c10b0SAmit Shah vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; 10603f9c10b0SAmit Shah const char *names[] = { "input", "output", "control" }; 10613f9c10b0SAmit Shah int nvqs, err; 10623f9c10b0SAmit Shah 10633f9c10b0SAmit Shah /* We expect two virtqueues, receive then send, 10643f9c10b0SAmit Shah * and optionally control. */ 10653f9c10b0SAmit Shah nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; 10663f9c10b0SAmit Shah 10673f9c10b0SAmit Shah err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names); 10683f9c10b0SAmit Shah if (err) 10693f9c10b0SAmit Shah return err; 10703f9c10b0SAmit Shah 1071*e9d7417bSJason Wang vi->rq.vq = vqs[0]; 1072*e9d7417bSJason Wang vi->sq.vq = vqs[1]; 10733f9c10b0SAmit Shah 10743f9c10b0SAmit Shah if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { 10753f9c10b0SAmit Shah vi->cvq = vqs[2]; 10763f9c10b0SAmit Shah 10773f9c10b0SAmit Shah if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 10783f9c10b0SAmit Shah vi->dev->features |= NETIF_F_HW_VLAN_FILTER; 10793f9c10b0SAmit Shah } 10803f9c10b0SAmit Shah return 0; 10813f9c10b0SAmit Shah } 10823f9c10b0SAmit Shah 1083296f96fcSRusty Russell static int virtnet_probe(struct virtio_device *vdev) 1084296f96fcSRusty Russell { 1085296f96fcSRusty Russell int err; 1086296f96fcSRusty Russell struct net_device *dev; 1087296f96fcSRusty Russell struct virtnet_info *vi; 1088296f96fcSRusty Russell 1089296f96fcSRusty Russell /* Allocate ourselves a network device with room for our info */ 1090296f96fcSRusty Russell dev = alloc_etherdev(sizeof(struct virtnet_info)); 1091296f96fcSRusty Russell if (!dev) 1092296f96fcSRusty Russell return -ENOMEM; 1093296f96fcSRusty Russell 1094296f96fcSRusty Russell /* Set up network device as normal. */ 1095f2f2c8b4SJiri Pirko dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 109676288b4eSStephen Hemminger dev->netdev_ops = &virtnet_netdev; 1097296f96fcSRusty Russell dev->features = NETIF_F_HIGHDMA; 10983fa2a1dfSstephen hemminger 1099a9ea3fc6SHerbert Xu SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); 1100296f96fcSRusty Russell SET_NETDEV_DEV(dev, &vdev->dev); 1101296f96fcSRusty Russell 1102296f96fcSRusty Russell /* Do we support "hardware" checksums? */ 110398e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 1104296f96fcSRusty Russell /* This opens up the world of extra features. */ 110598e778c9SMichał Mirosław dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 110698e778c9SMichał Mirosław if (csum) 1107296f96fcSRusty Russell dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 110898e778c9SMichał Mirosław 110998e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 111098e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 111134a48579SRusty Russell | NETIF_F_TSO_ECN | NETIF_F_TSO6; 111234a48579SRusty Russell } 11135539ae96SRusty Russell /* Individual feature bits: what can host handle? */ 111498e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 111598e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO; 111698e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 111798e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO6; 111898e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 111998e778c9SMichał Mirosław dev->hw_features |= NETIF_F_TSO_ECN; 112098e778c9SMichał Mirosław if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 112198e778c9SMichał Mirosław dev->hw_features |= NETIF_F_UFO; 112298e778c9SMichał Mirosław 112398e778c9SMichał Mirosław if (gso) 112498e778c9SMichał Mirosław dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 112598e778c9SMichał Mirosław /* (!csum && gso) case will be fixed by register_netdev() */ 1126296f96fcSRusty Russell } 1127296f96fcSRusty Russell 1128296f96fcSRusty Russell /* Configuration may specify what MAC to use. Otherwise random. */ 112977dd7693SSasha Levin if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, 1130a586d4f6SRusty Russell offsetof(struct virtio_net_config, mac), 113177dd7693SSasha Levin dev->dev_addr, dev->addr_len) < 0) 1132f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 1133296f96fcSRusty Russell 1134296f96fcSRusty Russell /* Set up our device-specific information */ 1135296f96fcSRusty Russell vi = netdev_priv(dev); 1136*e9d7417bSJason Wang netif_napi_add(dev, &vi->rq.napi, virtnet_poll, napi_weight); 1137296f96fcSRusty Russell vi->dev = dev; 1138296f96fcSRusty Russell vi->vdev = vdev; 1139d9d5dcc8SChristian Borntraeger vdev->priv = vi; 1140*e9d7417bSJason Wang vi->rq.pages = NULL; 11413fa2a1dfSstephen hemminger vi->stats = alloc_percpu(struct virtnet_stats); 11423fa2a1dfSstephen hemminger err = -ENOMEM; 11433fa2a1dfSstephen hemminger if (vi->stats == NULL) 11443fa2a1dfSstephen hemminger goto free; 11453fa2a1dfSstephen hemminger 11463161e453SRusty Russell INIT_DELAYED_WORK(&vi->refill, refill_work); 1147586d17c5SJason Wang mutex_init(&vi->config_lock); 1148586d17c5SJason Wang vi->config_enable = true; 1149586d17c5SJason Wang INIT_WORK(&vi->config_work, virtnet_config_changed_work); 1150*e9d7417bSJason Wang sg_init_table(vi->rq.sg, ARRAY_SIZE(vi->rq.sg)); 1151*e9d7417bSJason Wang sg_init_table(vi->sq.sg, ARRAY_SIZE(vi->sq.sg)); 1152296f96fcSRusty Russell 115397402b96SHerbert Xu /* If we can receive ANY GSO packets, we must allocate large ones. */ 11548e95a202SJoe Perches if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 11558e95a202SJoe Perches virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 11568e95a202SJoe Perches virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 115797402b96SHerbert Xu vi->big_packets = true; 115897402b96SHerbert Xu 11593f2c31d9SMark McLoughlin if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 11603f2c31d9SMark McLoughlin vi->mergeable_rx_bufs = true; 11613f2c31d9SMark McLoughlin 11623f9c10b0SAmit Shah err = init_vqs(vi); 1163d2a7dddaSMichael S. Tsirkin if (err) 11643fa2a1dfSstephen hemminger goto free_stats; 1165d2a7dddaSMichael S. Tsirkin 1166296f96fcSRusty Russell err = register_netdev(dev); 1167296f96fcSRusty Russell if (err) { 1168296f96fcSRusty Russell pr_debug("virtio_net: registering device failed\n"); 1169d2a7dddaSMichael S. Tsirkin goto free_vqs; 1170296f96fcSRusty Russell } 1171b3369c1fSRusty Russell 1172b3369c1fSRusty Russell /* Last of all, set up some receive buffers. */ 1173*e9d7417bSJason Wang try_fill_recv(&vi->rq, GFP_KERNEL); 1174b3369c1fSRusty Russell 1175b3369c1fSRusty Russell /* If we didn't even get one input buffer, we're useless. */ 1176*e9d7417bSJason Wang if (vi->rq.num == 0) { 1177b3369c1fSRusty Russell err = -ENOMEM; 1178b3369c1fSRusty Russell goto unregister; 1179b3369c1fSRusty Russell } 1180b3369c1fSRusty Russell 1181167c25e4SJason Wang /* Assume link up if device can't report link status, 1182167c25e4SJason Wang otherwise get link status from config. */ 1183167c25e4SJason Wang if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1184167c25e4SJason Wang netif_carrier_off(dev); 11853b07e9caSTejun Heo schedule_work(&vi->config_work); 1186167c25e4SJason Wang } else { 1187167c25e4SJason Wang vi->status = VIRTIO_NET_S_LINK_UP; 11884783256eSPantelis Koukousoulas netif_carrier_on(dev); 1189167c25e4SJason Wang } 11909f4d26d0SMark McLoughlin 1191296f96fcSRusty Russell pr_debug("virtnet: registered device %s\n", dev->name); 1192296f96fcSRusty Russell return 0; 1193296f96fcSRusty Russell 1194b3369c1fSRusty Russell unregister: 1195b3369c1fSRusty Russell unregister_netdev(dev); 1196d2a7dddaSMichael S. Tsirkin free_vqs: 1197*e9d7417bSJason Wang virtnet_del_vqs(vi); 11983fa2a1dfSstephen hemminger free_stats: 11993fa2a1dfSstephen hemminger free_percpu(vi->stats); 1200296f96fcSRusty Russell free: 1201296f96fcSRusty Russell free_netdev(dev); 1202296f96fcSRusty Russell return err; 1203296f96fcSRusty Russell } 1204296f96fcSRusty Russell 12059ab86bbcSShirley Ma static void free_unused_bufs(struct virtnet_info *vi) 12069ab86bbcSShirley Ma { 12079ab86bbcSShirley Ma void *buf; 12089ab86bbcSShirley Ma while (1) { 1209*e9d7417bSJason Wang buf = virtqueue_detach_unused_buf(vi->sq.vq); 1210830a8a97SShirley Ma if (!buf) 1211830a8a97SShirley Ma break; 1212830a8a97SShirley Ma dev_kfree_skb(buf); 1213830a8a97SShirley Ma } 1214830a8a97SShirley Ma while (1) { 1215*e9d7417bSJason Wang buf = virtqueue_detach_unused_buf(vi->rq.vq); 12169ab86bbcSShirley Ma if (!buf) 12179ab86bbcSShirley Ma break; 12189ab86bbcSShirley Ma if (vi->mergeable_rx_bufs || vi->big_packets) 1219*e9d7417bSJason Wang give_pages(&vi->rq, buf); 12209ab86bbcSShirley Ma else 12219ab86bbcSShirley Ma dev_kfree_skb(buf); 1222*e9d7417bSJason Wang --vi->rq.num; 12239ab86bbcSShirley Ma } 1224*e9d7417bSJason Wang BUG_ON(vi->rq.num != 0); 12259ab86bbcSShirley Ma } 12269ab86bbcSShirley Ma 122704486ed0SAmit Shah static void remove_vq_common(struct virtnet_info *vi) 1228296f96fcSRusty Russell { 122904486ed0SAmit Shah vi->vdev->config->reset(vi->vdev); 1230830a8a97SShirley Ma 1231830a8a97SShirley Ma /* Free unused buffers in both send and recv, if any. */ 12329ab86bbcSShirley Ma free_unused_bufs(vi); 1233fb6813f4SRusty Russell 1234*e9d7417bSJason Wang virtnet_del_vqs(vi); 1235d2a7dddaSMichael S. Tsirkin 1236*e9d7417bSJason Wang while (vi->rq.pages) 1237*e9d7417bSJason Wang __free_pages(get_a_page(&vi->rq, GFP_KERNEL), 0); 123804486ed0SAmit Shah } 123904486ed0SAmit Shah 12408cc085d6SBill Pemberton static void virtnet_remove(struct virtio_device *vdev) 124104486ed0SAmit Shah { 124204486ed0SAmit Shah struct virtnet_info *vi = vdev->priv; 124304486ed0SAmit Shah 1244586d17c5SJason Wang /* Prevent config work handler from accessing the device. */ 1245586d17c5SJason Wang mutex_lock(&vi->config_lock); 1246586d17c5SJason Wang vi->config_enable = false; 1247586d17c5SJason Wang mutex_unlock(&vi->config_lock); 1248586d17c5SJason Wang 124904486ed0SAmit Shah unregister_netdev(vi->dev); 125004486ed0SAmit Shah 125104486ed0SAmit Shah remove_vq_common(vi); 1252fb6813f4SRusty Russell 1253586d17c5SJason Wang flush_work(&vi->config_work); 1254586d17c5SJason Wang 12552e66f55bSKrishna Kumar free_percpu(vi->stats); 125674b2553fSRusty Russell free_netdev(vi->dev); 1257296f96fcSRusty Russell } 1258296f96fcSRusty Russell 12590741bcb5SAmit Shah #ifdef CONFIG_PM 12600741bcb5SAmit Shah static int virtnet_freeze(struct virtio_device *vdev) 12610741bcb5SAmit Shah { 12620741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 12630741bcb5SAmit Shah 1264586d17c5SJason Wang /* Prevent config work handler from accessing the device */ 1265586d17c5SJason Wang mutex_lock(&vi->config_lock); 1266586d17c5SJason Wang vi->config_enable = false; 1267586d17c5SJason Wang mutex_unlock(&vi->config_lock); 1268586d17c5SJason Wang 12690741bcb5SAmit Shah netif_device_detach(vi->dev); 12700741bcb5SAmit Shah cancel_delayed_work_sync(&vi->refill); 12710741bcb5SAmit Shah 12720741bcb5SAmit Shah if (netif_running(vi->dev)) 1273*e9d7417bSJason Wang napi_disable(&vi->rq.napi); 12740741bcb5SAmit Shah 12750741bcb5SAmit Shah remove_vq_common(vi); 12760741bcb5SAmit Shah 1277586d17c5SJason Wang flush_work(&vi->config_work); 1278586d17c5SJason Wang 12790741bcb5SAmit Shah return 0; 12800741bcb5SAmit Shah } 12810741bcb5SAmit Shah 12820741bcb5SAmit Shah static int virtnet_restore(struct virtio_device *vdev) 12830741bcb5SAmit Shah { 12840741bcb5SAmit Shah struct virtnet_info *vi = vdev->priv; 12850741bcb5SAmit Shah int err; 12860741bcb5SAmit Shah 12870741bcb5SAmit Shah err = init_vqs(vi); 12880741bcb5SAmit Shah if (err) 12890741bcb5SAmit Shah return err; 12900741bcb5SAmit Shah 12910741bcb5SAmit Shah if (netif_running(vi->dev)) 1292*e9d7417bSJason Wang virtnet_napi_enable(&vi->rq); 12930741bcb5SAmit Shah 12940741bcb5SAmit Shah netif_device_attach(vi->dev); 12950741bcb5SAmit Shah 1296*e9d7417bSJason Wang if (!try_fill_recv(&vi->rq, GFP_KERNEL)) 12973b07e9caSTejun Heo schedule_delayed_work(&vi->refill, 0); 12980741bcb5SAmit Shah 1299586d17c5SJason Wang mutex_lock(&vi->config_lock); 1300586d17c5SJason Wang vi->config_enable = true; 1301586d17c5SJason Wang mutex_unlock(&vi->config_lock); 1302586d17c5SJason Wang 13030741bcb5SAmit Shah return 0; 13040741bcb5SAmit Shah } 13050741bcb5SAmit Shah #endif 13060741bcb5SAmit Shah 1307296f96fcSRusty Russell static struct virtio_device_id id_table[] = { 1308296f96fcSRusty Russell { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 1309296f96fcSRusty Russell { 0 }, 1310296f96fcSRusty Russell }; 1311296f96fcSRusty Russell 1312c45a6816SRusty Russell static unsigned int features[] = { 13135e4fe5c4SMark McLoughlin VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 13145e4fe5c4SMark McLoughlin VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 1315c45a6816SRusty Russell VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 131697402b96SHerbert Xu VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 13175c516751SSridhar Samudrala VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 13182a41f71dSAlex Williamson VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 13190bde9569SAlex Williamson VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1320586d17c5SJason Wang VIRTIO_NET_F_GUEST_ANNOUNCE, 1321c45a6816SRusty Russell }; 1322c45a6816SRusty Russell 132322402529SUwe Kleine-König static struct virtio_driver virtio_net_driver = { 1324c45a6816SRusty Russell .feature_table = features, 1325c45a6816SRusty Russell .feature_table_size = ARRAY_SIZE(features), 1326296f96fcSRusty Russell .driver.name = KBUILD_MODNAME, 1327296f96fcSRusty Russell .driver.owner = THIS_MODULE, 1328296f96fcSRusty Russell .id_table = id_table, 1329296f96fcSRusty Russell .probe = virtnet_probe, 13308cc085d6SBill Pemberton .remove = virtnet_remove, 13319f4d26d0SMark McLoughlin .config_changed = virtnet_config_changed, 13320741bcb5SAmit Shah #ifdef CONFIG_PM 13330741bcb5SAmit Shah .freeze = virtnet_freeze, 13340741bcb5SAmit Shah .restore = virtnet_restore, 13350741bcb5SAmit Shah #endif 1336296f96fcSRusty Russell }; 1337296f96fcSRusty Russell 1338296f96fcSRusty Russell static int __init init(void) 1339296f96fcSRusty Russell { 134022402529SUwe Kleine-König return register_virtio_driver(&virtio_net_driver); 1341296f96fcSRusty Russell } 1342296f96fcSRusty Russell 1343296f96fcSRusty Russell static void __exit fini(void) 1344296f96fcSRusty Russell { 134522402529SUwe Kleine-König unregister_virtio_driver(&virtio_net_driver); 1346296f96fcSRusty Russell } 1347296f96fcSRusty Russell module_init(init); 1348296f96fcSRusty Russell module_exit(fini); 1349296f96fcSRusty Russell 1350296f96fcSRusty Russell MODULE_DEVICE_TABLE(virtio, id_table); 1351296f96fcSRusty Russell MODULE_DESCRIPTION("Virtio network driver"); 1352296f96fcSRusty Russell MODULE_LICENSE("GPL"); 1353